Merge branch 'develop' into feature/TD-2581

This commit is contained in:
wpan 2021-08-09 09:23:12 +08:00
commit 28692a51cc
214 changed files with 15909 additions and 2960 deletions

View File

@ -25,15 +25,14 @@ steps:
- master
---
kind: pipeline
name: test_arm64
name: test_arm64_bionic
platform:
os: linux
arch: arm64
steps:
- name: build
image: gcc
image: arm64v8/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
@ -48,9 +47,87 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm
name: test_arm64_focal
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/ubuntu:focal
commands:
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
- apt-get update
- apt-get install -y -qq cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_centos7
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:7
commands:
- yum install -y gcc gcc-c++ make cmake git
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_centos8
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:8
commands:
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm_bionic
platform:
os: linux
@ -73,7 +150,6 @@ steps:
branch:
- develop
- master
---
kind: pipeline
name: build_trusty
@ -174,25 +250,3 @@ steps:
- develop
- master
---
kind: pipeline
name: goodbye
platform:
os: linux
arch: amd64
steps:
- name: 64-bit
image: alpine
commands:
- echo 64-bit is good.
when:
branch:
- develop
- master
depends_on:
- test_arm64
- test_amd64

38
Jenkinsfile vendored
View File

@ -41,6 +41,7 @@ def pre_test(){
sh '''
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
@ -223,24 +224,27 @@ pipeline {
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
}
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
timeout(time: 45, unit: 'MINUTES'){
sh '''
date

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr
import "testing"

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package models
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package models
import "time"

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package log
import (

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.1.5.0")
SET(TD_VER_NUMBER "2.1.6.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -126,7 +126,7 @@ taos> source <filename>;
$ taosdemo
```
该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdIdgroupdId 被设置为 1 到 10 location 被设置为 "beijing" 或者 "shanghai"。
该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupIdgroupId 被设置为 1 到 10 location 被设置为 "beijing" 或者 "shanghai"。
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
@ -150,10 +150,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters;
taos> select count(*) from test.meters where location="beijing";
```
- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等:
- 查询 groupId=10 的所有记录的平均值、最大值、最小值等:
```mysql
taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
```
- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:

View File

@ -33,7 +33,7 @@ USE power;
一个物联网系统往往存在多种类型的设备比如对于电网存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例可以使用如下的SQL命令创建超级表
```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
**注意:**这一指令中的 STABLE 关键字,在 2.0.15 之前的版本中需写作 TABLE 。

View File

@ -17,7 +17,7 @@ TDengine提供的连续查询与普通流计算中的时间窗口计算具有以
下面以智能电表场景为例介绍连续查询的具体使用方法。假设我们通过下列SQL语句创建了超级表和子表
```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
create table D1002 using meters tags ("Beijing.Haidian", 2);
...

View File

@ -3,17 +3,17 @@
## <a class="anchor" id="grafana"></a>Grafana
TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统整个过程无需任何代码开发TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统整个过程无需任何代码开发TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
### 安装Grafana
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download。
目前 TDengine 支持 Grafana 6.2 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download。
### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
TDengine Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。
以CentOS 7.2操作系统为例将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。
CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine

View File

@ -143,7 +143,7 @@ taosd -C
TDengine集群中加入一个新的dnode时涉及集群相关的一些参数必须与已有集群的配置相同否则不能成功加入到集群中。会进行校验的参数如下
- numOfMnodes系统中管理节点个数。默认值3。
- numOfMnodes系统中管理节点个数。默认值3。2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始numOfMnodes 默认值改为 1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值4。
- offlineThreshold: dnode离线阈值超过该时间将导致该dnode从集群中删除。单位为秒默认值86400*10即10天
- statusInterval: dnode向mnode报告状态时长。单位为秒默认值1。
@ -427,7 +427,7 @@ TDengine启动后会自动创建一个监测数据库log并自动将服务
COMPACT VNODES IN (vg_id1, vg_id2, ...)
```
COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态。
COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 2 时表示对应的 VGroup 处于排队等待进行重整的状态,值为 1 时表示正在进行碎片重整,为 0 时则表示并没有处于重整状态(未要求进行重整或已经完成重整)
需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间有可能会影响节点的写入和查询性能甚至在极端情况下导致短时间的阻写。

View File

@ -34,7 +34,7 @@ taos> DESCRIBE meters;
- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time时间戳也可以是一个长整数表示从 1970-01-01 08:00:00.000 开始的毫秒数
- Epoch Time时间戳也可以是一个长整数表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数)
- 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降频操作down sampling的时间窗口interval时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
@ -182,7 +182,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
- **批量创建数据表**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
以更快的速度批量创建大量数据表(服务器端 2.0.14 及以上版本)。
@ -414,13 +414,13 @@ INSERT INTO
```
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如
```mysql
INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
```
自动建表语法也支持在一条语句中向多个表插入记录。例如:
```mysql
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
**说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。

View File

@ -32,7 +32,7 @@ Replace the database operating in the current connection with “power”, other
An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable:
```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.

View File

@ -17,7 +17,7 @@ The continuous query provided by TDengine differs from the time window calculati
The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement:
```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
create table D1002 using meters tags ("Beijing.Haidian", 2);
...
@ -357,4 +357,4 @@ This SQL statement will obtain the last recorded voltage value of all smart mete
In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).

View File

@ -132,7 +132,7 @@ The SQL creates a database demo, each data file stores 10 days of data, the memo
When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows:
- numOfMnodes: the number of management nodes in the system. Default: 3.
- numOfMnodes: the number of management nodes in the system. Default: 3. (Since version 2.0.20.11 and version 2.1.6.0, the default value of "numOfMnodes" has been changed to 1.)
- balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1.
- mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4.
- offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days).

View File

@ -165,7 +165,7 @@ Note:
- **Create tables in batches**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
Create a large number of data tables in batches faster. (Server side 2.0. 14 and above)

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package dataimport
import (

View File

@ -44,7 +44,8 @@ echo "version=${version}"
#docker manifest rm tdengine/tdengine
#docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then
docker manifest rm tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine-beta:latest
docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
@ -52,6 +53,7 @@ if [ "$verType" == "beta" ]; then
docker manifest push tdengine/tdengine-beta:${version}
elif [ "$verType" == "stable" ]; then
docker manifest inspect tdengine/tdengine:latest
docker manifest rm tdengine/tdengine:latest
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest

View File

@ -35,7 +35,7 @@ fi
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.1.5.0'
version: '2.1.6.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.5.0
- usr/lib/libtaos.so.2.1.6.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -342,10 +342,11 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf);
int32_t tscCreateTableMetaFromSTableMeta(STableMeta** pChild, const char* name, size_t *tableMetaCapacity);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo);
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId);

View File

@ -295,7 +295,7 @@ typedef struct SSqlObj {
SSqlCmd cmd;
SSqlRes res;
bool isBind;
SSubqueryState subState;
struct SSqlObj **pSubs;

View File

@ -84,8 +84,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
int64_t useconds = 0;
char * pTokenEnd = *next;
index = 0;
if (pToken->type == TK_NOW) {
useconds = taosGetTimestamp(timePrec);
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
@ -130,7 +128,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
}
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, timePrec) != TSDB_CODE_SUCCESS) {
char unit = 0;
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, &unit, timePrec) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}

View File

@ -458,9 +458,9 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm
schema->tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
schema->fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
uint32_t size = tscGetTableMetaMaxSize();
STableMeta* tableMeta = calloc(1, size);
taosHashGetClone(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, tableMeta);
size_t size = 0;
STableMeta* tableMeta = NULL;
taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void **)&tableMeta, &size);
tstrncpy(schema->sTableName, tableName, strlen(tableName)+1);
schema->precision = tableMeta->tableInfo.precision;
@ -484,7 +484,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm
size_t tagIndex = taosArrayGetSize(schema->tags) - 1;
taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex));
}
tscDebug("SML:0x%"PRIx64 "load table meta succeed. %s, columns number: %d, tag number: %d, precision: %d",
tscDebug("SML:0x%"PRIx64 " load table meta succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision);
free(tableMeta); tableMeta = NULL;
return code;
@ -1417,7 +1417,7 @@ static bool isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType) {
return false;
}
static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) {
static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str, SSmlLinesInfo* info) {
errno = 0;
uint8_t type = pVal->type;
int16_t length = pVal->length;
@ -1436,7 +1436,7 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) {
}
if (errno == ERANGE) {
tscError("Converted number out of range");
tscError("SML:0x%"PRIx64" Convert number(%s) out of range", info->id, str);
return false;
}
@ -1518,7 +1518,7 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) {
}
//len does not include '\0' from value.
static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
uint16_t len) {
uint16_t len, SSmlLinesInfo* info) {
if (len <= 0) {
return false;
}
@ -1528,7 +1528,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_TINYINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 2] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1537,7 +1537,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_UTINYINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 2] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1546,7 +1546,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_SMALLINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1555,7 +1555,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_USMALLINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1564,7 +1564,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_INT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1573,7 +1573,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_UINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1582,7 +1582,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_BIGINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1591,7 +1591,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_UBIGINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) {
if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1601,7 +1601,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_FLOAT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidFloat(value) || !convertStrToNumber(pVal, value)) {
if (!isValidFloat(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1610,7 +1610,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_DOUBLE;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0';
if (!isValidFloat(value) || !convertStrToNumber(pVal, value)) {
if (!isValidFloat(value) || !convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1646,7 +1646,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
if (isValidInteger(value) || isValidFloat(value)) {
pVal->type = TSDB_DATA_TYPE_FLOAT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
if (!convertStrToNumber(pVal, value)) {
if (!convertStrToNumber(pVal, value, info)) {
return false;
}
return true;
@ -1702,7 +1702,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
}
static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
uint16_t len) {
uint16_t len, SSmlLinesInfo* info) {
int32_t ret;
SMLTimeStampType type;
int64_t tsVal;
@ -1715,7 +1715,7 @@ static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
if (ret) {
return ret;
}
tscDebug("Timestamp after conversion:%"PRId64, tsVal);
tscDebug("SML:0x%"PRIx64"Timestamp after conversion:%"PRId64, info->id, tsVal);
pVal->type = TSDB_DATA_TYPE_TIMESTAMP;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
@ -1724,7 +1724,7 @@ static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
return TSDB_CODE_SUCCESS;
}
static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) {
static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLinesInfo* info) {
const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS;
int len = 0;
@ -1744,7 +1744,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) {
memcpy(value, start, len);
}
ret = convertSmlTimeStamp(*pTS, value, len);
ret = convertSmlTimeStamp(*pTS, value, len, info);
if (ret) {
free(value);
free(*pTS);
@ -1757,7 +1757,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) {
return ret;
}
static bool checkDuplicateKey(char *key, SHashObj *pHash) {
static bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
char *val = NULL;
char *cur = key;
char keyLower[TSDB_COL_NAME_LEN];
@ -1771,7 +1771,7 @@ static bool checkDuplicateKey(char *key, SHashObj *pHash) {
val = taosHashGet(pHash, keyLower, keyLen);
if (val) {
tscError("Duplicate key:%s", keyLower);
tscError("SML:0x%"PRIx64" Duplicate key detected:%s", info->id, keyLower);
return true;
}
@ -1781,19 +1781,19 @@ static bool checkDuplicateKey(char *key, SHashObj *pHash) {
return false;
}
static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash) {
static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
const char *cur = *index;
char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
uint16_t len = 0;
//key field cannot start with digit
if (isdigit(*cur)) {
tscError("Tag key cannnot start with digit\n");
tscError("SML:0x%"PRIx64" Tag key cannnot start with digit", info->id);
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
while (*cur != '\0') {
if (len > TSDB_COL_NAME_LEN) {
tscDebug("Key field cannot exceeds 65 characters");
tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id);
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
//unescaped '=' identifies a tag key
@ -1810,20 +1810,20 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
}
key[len] = '\0';
if (checkDuplicateKey(key, pHash)) {
if (checkDuplicateKey(key, pHash, info)) {
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
pKV->key = calloc(len + 1, 1);
memcpy(pKV->key, key, len + 1);
//tscDebug("Key:%s|len:%d", pKV->key, len);
//tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
*index = cur + 1;
return TSDB_CODE_SUCCESS;
}
static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
bool *is_last_kv) {
bool *is_last_kv, SSmlLinesInfo* info) {
const char *start, *cur;
char *value = NULL;
uint16_t len = 0;
@ -1847,7 +1847,9 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
value = calloc(len + 1, 1);
memcpy(value, start, len);
value[len] = '\0';
if (!convertSmlValueType(pKV, value, len)) {
if (!convertSmlValueType(pKV, value, len, info)) {
tscError("SML:0x%"PRIx64" Failed to convert sml value string(%s) to any type",
info->id, value);
//free previous alocated key field
free(pKV->key);
pKV->key = NULL;
@ -1861,7 +1863,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
}
static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index,
uint8_t *has_tags) {
uint8_t *has_tags, SSmlLinesInfo* info) {
const char *cur = *index;
uint16_t len = 0;
@ -1870,7 +1872,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
if (isdigit(*cur)) {
tscError("Measurement field cannnot start with digit");
tscError("SML:0x%"PRIx64" Measurement field cannnot start with digit", info->id);
free(pSml->stableName);
pSml->stableName = NULL;
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
@ -1878,7 +1880,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
while (*cur != '\0') {
if (len > TSDB_TABLE_NAME_LEN) {
tscError("Measurement field cannot exceeds 193 characters");
tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id);
free(pSml->stableName);
pSml->stableName = NULL;
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
@ -1902,7 +1904,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
}
pSml->stableName[len] = '\0';
*index = cur + 1;
tscDebug("Stable name in measurement:%s|len:%d", pSml->stableName, len);
tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len);
return TSDB_CODE_SUCCESS;
}
@ -1921,7 +1923,8 @@ static int32_t isValidChildTableName(const char *pTbName, int16_t len) {
static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
const char **index, bool isField,
TAOS_SML_DATA_POINT* smlData, SHashObj *pHash) {
TAOS_SML_DATA_POINT* smlData, SHashObj *pHash,
SSmlLinesInfo* info) {
const char *cur = *index;
int32_t ret = TSDB_CODE_SUCCESS;
TAOS_SML_KV *pkv;
@ -1941,14 +1944,14 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
}
while (*cur != '\0') {
ret = parseSmlKey(pkv, &cur, pHash);
ret = parseSmlKey(pkv, &cur, pHash, info);
if (ret) {
tscError("Unable to parse key field");
tscError("SML:0x%"PRIx64" Unable to parse key", info->id);
goto error;
}
ret = parseSmlValue(pkv, &cur, &is_last_kv);
ret = parseSmlValue(pkv, &cur, &is_last_kv, info);
if (ret) {
tscError("Unable to parse value field");
tscError("SML:0x%"PRIx64" Unable to parse value", info->id);
goto error;
}
if (!isField &&
@ -1966,7 +1969,6 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
*num_kvs += 1;
}
if (is_last_kv) {
//tscDebug("last key-value field detected");
goto done;
}
@ -2024,50 +2026,50 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t
free(ts);
}
int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData) {
int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
const char* index = sql;
int32_t ret = TSDB_CODE_SUCCESS;
uint8_t has_tags = 0;
TAOS_SML_KV *timestamp = NULL;
SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
ret = parseSmlMeasurement(smlData, &index, &has_tags);
ret = parseSmlMeasurement(smlData, &index, &has_tags, info);
if (ret) {
tscError("Unable to parse measurement");
tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id);
taosHashCleanup(keyHashTable);
return ret;
}
tscDebug("Parse measurement finished, has_tags:%d", has_tags);
tscDebug("SML:0x%"PRIx64" Parse measurement finished, has_tags:%d", info->id, has_tags);
//Parse Tags
if (has_tags) {
ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable);
ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable, info);
if (ret) {
tscError("Unable to parse tag");
tscError("SML:0x%"PRIx64" Unable to parse tag", info->id);
taosHashCleanup(keyHashTable);
return ret;
}
}
tscDebug("Parse tags finished, num of tags:%d", smlData->tagNum);
tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum);
//Parse fields
ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable);
ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable, info);
if (ret) {
tscError("Unable to parse field");
tscError("SML:0x%"PRIx64" Unable to parse field", info->id);
taosHashCleanup(keyHashTable);
return ret;
}
tscDebug("Parse fields finished, num of fields:%d", smlData->fieldNum);
tscDebug("SML:0x%"PRIx64" Parse fields finished, num of fields:%d", info->id, smlData->fieldNum);
taosHashCleanup(keyHashTable);
//Parse timestamp
ret = parseSmlTimeStamp(&timestamp, &index);
ret = parseSmlTimeStamp(&timestamp, &index, info);
if (ret) {
tscError("Unable to parse timestamp");
tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id);
return ret;
}
moveTimeStampToFirstKv(&smlData, timestamp);
tscDebug("Parse timestamp finished");
tscDebug("SML:0x%"PRIx64" Parse timestamp finished", info->id);
return TSDB_CODE_SUCCESS;
}
@ -2104,7 +2106,7 @@ void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) {
int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) {
for (int32_t i = 0; i < numLines; ++i) {
TAOS_SML_DATA_POINT point = {0};
int32_t code = tscParseLine(lines[i], &point);
int32_t code = tscParseLine(lines[i], &point, info);
if (code != TSDB_CODE_SUCCESS) {
tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
destroySmlDataPoint(&point);

View File

@ -255,10 +255,25 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pQdesc->qId = htobe64(pSql->res.qId);
pQdesc->sqlObjId = htobe64(pSql->self);
pQdesc->pid = pHeartbeat->pid;
if (pSql->cmd.pQueryInfo->stableQuery == true) {
pQdesc->numOfSub = pSql->subState.numOfSub;
pQdesc->stableQuery = pSql->cmd.pQueryInfo->stableQuery;
pQdesc->numOfSub = pSql->subState.numOfSub;
char *p = pQdesc->subSqlInfo;
int32_t remainLen = sizeof(pQdesc->subSqlInfo);
if (pQdesc->numOfSub == 0) {
snprintf(p, remainLen, "N/A");
} else {
pQdesc->numOfSub = 1;
int32_t len;
for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i,
pSql->pSubs[i]->self,
pSql->subState.states[i] ? 'C' : 'I');
if (len > remainLen) {
break;
}
remainLen -= len;
p += len;
}
}
pQdesc->numOfSub = htonl(pQdesc->numOfSub);

View File

@ -905,6 +905,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i);
tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size);
// normalizeSqlNode(pSqlNode); // normalize the column name in each function
if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) {
return code;
@ -1293,35 +1294,31 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SInterval* pInterval = &pQueryInfo->interval;
if (pSliding->n == 0) {
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
pInterval->slidingUnit = pInterval->intervalUnit;
pInterval->sliding = pInterval->interval;
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') {
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding, tinfo.precision);
parseAbsoluteDuration(pSliding->z, pSliding->n, &pInterval->sliding, &pInterval->slidingUnit, tinfo.precision);
if (pQueryInfo->interval.sliding <
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
if (pInterval->sliding < convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
if (pInterval->sliding > pInterval->interval) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if ((pQueryInfo->interval.interval != 0) && (pQueryInfo->interval.interval/pQueryInfo->interval.sliding > INTERVAL_SLIDING_FACTOR)) {
if ((pInterval->interval != 0) && (pInterval->interval/pInterval->sliding > INTERVAL_SLIDING_FACTOR)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// if (pQueryInfo->interval.sliding != pQueryInfo->interval.interval && pSql->pStream == NULL) {
// return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
// }
return TSDB_CODE_SUCCESS;
}
@ -2033,7 +2030,6 @@ static SUdfInfo* isValidUdf(SArray* pUdfInfo, const char* name, int32_t len) {
tscError("udfinfo is null");
return NULL;
}
size_t t = taosArrayGetSize(pUdfInfo);
for(int32_t i = 0; i < t; ++i) {
SUdfInfo* pUdf = taosArrayGet(pUdfInfo, i);
@ -6166,6 +6162,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pItem = taosArrayGet(pVarList, 1);
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) {
return invalidOperationMsg(pMsg, msg14);
}
pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
@ -6356,10 +6357,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
int16_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
int16_t i;
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
int32_t tagIndex = columnIndex.columnIndex - numOfCols;
assert(tagIndex>=0);
uint32_t nLen = 0;
for (i = 0; i < numOfTags; ++i) {
nLen += (i != columnIndex.columnIndex) ? pSchema[i].bytes : pItem->bytes;
for (int i = 0; i < numOfTags; ++i) {
nLen += (i != tagIndex) ? pSchema[i].bytes : pItem->bytes;
}
if (nLen >= TSDB_MAX_TAGS_LEN) {
return invalidOperationMsg(pMsg, msg24);
@ -8369,6 +8372,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SArray* pVgroupList = NULL;
SArray* plist = NULL;
STableMeta* pTableMeta = NULL;
size_t tableMetaCapacity = 0;
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@ -8395,18 +8399,14 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
}
uint32_t maxSize = tscGetTableMetaMaxSize();
char name[TSDB_TABLE_FNAME_LEN] = {0};
assert(maxSize < 80 * TSDB_MAX_COLUMNS);
if (!pSql->pBuf) {
if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _end;
}
}
pTableMeta = calloc(1, maxSize);
//if (!pSql->pBuf) {
// if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
// code = TSDB_CODE_TSC_OUT_OF_MEMORY;
// goto _end;
// }
//}
plist = taosArrayInit(4, POINTER_BYTES);
pVgroupList = taosArrayInit(4, POINTER_BYTES);
@ -8420,16 +8420,16 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tNameExtractFullName(pname, name);
size_t len = strlen(name);
memset(pTableMeta, 0, maxSize);
taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMeta);
taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity);
if (pTableMeta->id.uid > 0) {
if (pTableMeta && pTableMeta->id.uid > 0) {
tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name);
// avoid mem leak, may should update pTableMeta
void* pVgroupIdList = NULL;
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf);
code = tscCreateTableMetaFromSTableMeta((STableMeta **)(&pTableMeta), name, &tableMetaCapacity);
// create the child table meta from super table failed, try load it from mnode
if (code != TSDB_CODE_SUCCESS) {
@ -8584,7 +8584,8 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
tNameExtractFullName(&pTableMetaInfo->name, fname);
STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta);
pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta);
pTableMetaInfo->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
assert(pTableMetaInfo->pTableMeta != NULL);
if (p->vgroupIdList != NULL) {
@ -8684,7 +8685,8 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
if (pTableMetaInfo1 == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
pTableMetaInfo1->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo1->pTableMeta);
if (subInfo->aliasName.n > 0) {
if (subInfo->aliasName.n >= TSDB_TABLE_FNAME_LEN) {
@ -8733,6 +8735,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg6 = "not support stddev/percentile/interp in the outer query yet";
const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery";
const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select";
int32_t code = TSDB_CODE_SUCCESS;
@ -8763,6 +8766,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
// parse the subquery in the first place
int32_t numOfSub = (int32_t)taosArrayGetSize(pSqlNode->from->list);
for (int32_t i = 0; i < numOfSub; ++i) {
// check if there is 3 level select
SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i);
SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
if (p->from->type == SQL_NODE_FROM_SUBQUERY){
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
code = doValidateSubquery(pSqlNode, i, pSql, pQueryInfo, tscGetErrorMsgPayload(pCmd));
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -9039,8 +9049,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
#if 0
SQueryNode* p = qCreateQueryPlan(pQueryInfo);
char* s = queryPlanToString(p);
printf("%s\n", s);
tfree(s);
qDestroyQueryPlan(p);
#endif

View File

@ -337,11 +337,16 @@ int tscSendMsgToServer(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
SRpcMsg* rpcMsg = pSchedMsg->ahandle;
SRpcEpSet* pEpSet = pSchedMsg->thandle;
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
if (pSql == NULL) {
rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return;
}
@ -359,6 +364,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosRemoveRef(tscObjRef, handle);
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return;
}
@ -370,6 +377,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosRemoveRef(tscObjRef, handle);
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return;
}
@ -392,10 +401,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
// single table query error need to be handled here.
if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
(((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
(((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
// 1. super table subquery
// 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
@ -425,6 +432,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return;
}
}
@ -432,7 +441,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
pRes->rspLen = 0;
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
tscDebug("0x%"PRIx64" query is cancelled, code:%s", pSql->self, tstrerror(pRes->code));
} else {
@ -481,7 +490,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen);
}
}
if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
}
@ -502,6 +511,29 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SSchedMsg schedMsg = {0};
schedMsg.fp = doProcessMsgFromServer;
SRpcMsg* rpcMsgCopy = calloc(1, sizeof(SRpcMsg));
memcpy(rpcMsgCopy, rpcMsg, sizeof(struct SRpcMsg));
schedMsg.ahandle = (void*)rpcMsgCopy;
SRpcEpSet* pEpSetCopy = NULL;
if (pEpSet != NULL) {
pEpSetCopy = calloc(1, sizeof(SRpcEpSet));
memcpy(pEpSetCopy, pEpSet, sizeof(SRpcEpSet));
}
schedMsg.thandle = (void*)pEpSetCopy;
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
}
int doBuildAndSendMsg(SSqlObj *pSql) {
@ -644,6 +676,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
int32_t srcColFilterSize = 0;
int32_t srcTagFilterSize = tscGetTagFilterSerializeLen(pQueryInfo);
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2);
@ -672,7 +706,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
}
}
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + exprSize + tsBufSize +
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + exprSize + tsBufSize +
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
}
@ -1020,7 +1054,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->tagCondLen = htons(pCond->len);
pQueryMsg->tagCondLen = htonl(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len);
pMsg += pCond->len;
@ -2825,43 +2859,22 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate, bool onlyLocal) {
assert(tIsValidName(&pTableMetaInfo->name));
uint32_t size = tscGetTableMetaMaxSize();
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->pTableMeta = malloc(size);
pTableMetaInfo->tableMetaSize = size;
} else if (pTableMetaInfo->tableMetaSize < size) {
char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
}
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name);
taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMetaInfo->pTableMeta);
// TODO resize the tableMeta
assert(size < 80 * TSDB_MAX_COLUMNS);
if (!pSql->pBuf) {
if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
if (pTableMetaInfo->tableMetaCapacity != 0) {
if (pTableMetaInfo->pTableMeta != NULL) {
memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
}
}
taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity);
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta && pMeta->id.uid > 0) {
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, pSql->pBuf);
int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}

View File

@ -1636,6 +1636,8 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
continue;
}
SSqlRes* pRes1 = &pSql1->res;
if (pRes1->row >= pRes1->numOfRows) {
subquerySetState(pSql1, &pSql->subState, i, 0);

View File

@ -1308,10 +1308,10 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
for (int i = 0; i < pRes->numOfCols; i++) {
tfree(pRes->buffer[i]);
}
pRes->numOfCols = 0;
}
tfree(pRes->pRsp);
tfree(pRes->tsrow);
@ -1689,6 +1689,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
}
/*
@ -1751,7 +1752,7 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
if (dataBuf->nAllocSize <= dataBuf->headerSize) {
dataBuf->nAllocSize = dataBuf->headerSize * 2;
}
//dataBuf->pData = calloc(1, dataBuf->nAllocSize);
dataBuf->pData = malloc(dataBuf->nAllocSize);
if (dataBuf->pData == NULL) {
@ -1867,7 +1868,7 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
#if 0 // no need anymore
while (i < nColsBound) {
p = payloadNextCol(p);
@ -2037,7 +2038,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
@ -2091,7 +2092,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
}
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
@ -2115,7 +2116,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
}else {
tscDebug("0x%"PRIx64" table %s data block is empty", pInsertParam->objectId, pOneTableBlock->tableName.tname);
}
p = taosHashIterate(pInsertParam->pTableBlockHashList, p);
if (p == NULL) {
break;
@ -2179,7 +2180,7 @@ int tscAllocPayload(SSqlCmd* pCmd, int size) {
pCmd->payload = b;
pCmd->allocSize = size;
}
memset(pCmd->payload, 0, pCmd->allocSize);
}
@ -2196,7 +2197,7 @@ TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) {
SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
assert(pFieldInfo != NULL);
pFieldInfo->numOfOutput++;
struct SInternalField info = { .pExpr = NULL, .visible = true };
info.field = *pField;
@ -2288,13 +2289,13 @@ int32_t tscGetResRowLength(SArray* pExprList) {
if (num == 0) {
return 0;
}
int32_t size = 0;
for(int32_t i = 0; i < num; ++i) {
SExprInfo* pExpr = taosArrayGetP(pExprList, i);
size += pExpr->base.resBytes;
}
return size;
}
@ -2439,7 +2440,7 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo
snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema->name);
}
}
p->colInfo.flag = colType;
p->colInfo.colIndex = pColIndex->columnIndex;
@ -2451,7 +2452,7 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo
if (pTableMetaInfo->pTableMeta) {
p->uid = pTableMetaInfo->pTableMeta->id.uid;
}
return pExpr;
}
@ -2537,18 +2538,18 @@ SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t index) {
*/
void tscExprDestroy(SArray* pExprInfo) {
size_t size = taosArrayGetSize(pExprInfo);
for(int32_t i = 0; i < size; ++i) {
SExprInfo* pExpr = taosArrayGetP(pExprInfo, i);
sqlExprDestroy(pExpr);
}
taosArrayDestroy(pExprInfo);
}
int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
assert(src != NULL && dst != NULL);
size_t size = taosArrayGetSize(src);
for (int32_t i = 0; i < size; ++i) {
SExprInfo* pExpr = taosArrayGetP(src, i);
@ -2633,7 +2634,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t
if (columnIndex < 0) {
return NULL;
}
size_t numOfCols = taosArrayGetSize(pColumnList);
int32_t i = 0;
@ -2663,7 +2664,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t
taosArrayInsert(pColumnList, i, &b);
} else {
SColumn* pCol = taosArrayGetP(pColumnList, i);
if (i < numOfCols && (pCol->columnIndex > columnIndex || pCol->tableUid != uid)) {
SColumn* b = calloc(1, sizeof(SColumn));
if (b == NULL) {
@ -2687,7 +2688,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t
SColumn* tscColumnClone(const SColumn* src) {
assert(src != NULL);
SColumn* dst = calloc(1, sizeof(SColumn));
if (dst == NULL) {
return NULL;
@ -2716,7 +2717,7 @@ void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) {
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) {
assert(src != NULL && dst != NULL);
size_t num = taosArrayGetSize(src);
for (int32_t i = 0; i < num; ++i) {
SColumn* pCol = taosArrayGetP(src, i);
@ -2827,18 +2828,19 @@ void tscDequoteAndTrimToken(SStrToken* pToken) {
}
int32_t tscValidateName(SStrToken* pToken) {
if (pToken->type != TK_STRING && pToken->type != TK_ID) {
if (pToken == NULL || pToken->z == NULL ||
(pToken->type != TK_STRING && pToken->type != TK_ID)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
char* sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true);
if (sep == NULL) { // single part
if (pToken->type == TK_STRING) {
tscDequoteAndTrimToken(pToken);
tscStrToLower(pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
int len = tGetToken(pToken->z, &pToken->type);
// single token, validate it
@ -2890,7 +2892,7 @@ int32_t tscValidateName(SStrToken* pToken) {
if (pToken->type == TK_STRING && validateQuoteToken(pToken) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// re-build the whole name string
if (pStr[firstPartLen] == TS_PATH_DELIMITER[0]) {
// first part do not have quote do nothing
@ -2929,7 +2931,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
int32_t numOfTotal = tinfo.numOfTags + tinfo.numOfColumns;
for (int32_t i = 0; i < numOfTotal; ++i) {
@ -2974,21 +2976,21 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
dest->relType = src->relType;
if (src->pCond == NULL) {
return 0;
}
size_t s = taosArrayGetSize(src->pCond);
dest->pCond = taosArrayInit(s, sizeof(SCond));
for (int32_t i = 0; i < s; ++i) {
SCond* pCond = taosArrayGet(src->pCond, i);
SCond c = {0};
c.len = pCond->len;
c.uid = pCond->uid;
if (pCond->len > 0) {
assert(pCond->cond != NULL);
c.cond = malloc(c.len);
@ -2998,7 +3000,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
memcpy(c.cond, pCond->cond, c.len);
}
taosArrayPush(dest->pCond, &c);
}
@ -3065,14 +3067,14 @@ void tscColCondRelease(SArray** pCond) {
void tscTagCondRelease(STagCond* pTagCond) {
free(pTagCond->tbnameCond.cond);
if (pTagCond->pCond != NULL) {
size_t s = taosArrayGetSize(pTagCond->pCond);
for (int32_t i = 0; i < s; ++i) {
SCond* p = taosArrayGet(pTagCond->pCond, i);
tfree(p->cond);
}
taosArrayDestroy(pTagCond->pCond);
}
@ -3099,7 +3101,7 @@ void tscTagCondRelease(STagCond* pTagCond) {
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
@ -3107,7 +3109,7 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) {
if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
int16_t index = pExpr->base.colInfo.colIndex;
pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY;
} else {
@ -3132,7 +3134,7 @@ bool tscShouldBeFreed(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return false;
}
STscObj* pTscObj = pSql->pTscObj;
if (pSql->pStream != NULL || pTscObj->hbrid == pSql->self || pSql->pSubscription != NULL) {
return false;
@ -3211,7 +3213,7 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i
void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
assert(pQueryInfo->fieldsInfo.internalField == NULL);
pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField));
assert(pQueryInfo->exprList == NULL);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
@ -3274,7 +3276,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
pQueryInfo->groupbyExpr.columnInfo = NULL;
pQueryInfo->groupbyExpr.numOfGroupCols = 0;
}
pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
pQueryInfo->fillType = 0;
@ -3476,7 +3478,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
tscClearTableMetaInfo(pTableMetaInfo);
@ -3510,11 +3512,13 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
pTableMetaInfo->pTableMeta = pTableMeta;
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->tableMetaSize = 0;
pTableMetaInfo->tableMetaSize = 0;
} else {
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
}
pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
}
@ -3530,7 +3534,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables);
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
}
@ -3738,7 +3742,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
goto _error;
}
}
if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@ -3751,7 +3755,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
//just make memory memory sanitizer happy
//refator later
//refactor later
pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
if (pNewQueryInfo->fillVal == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -3799,14 +3803,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
terrno = TSDB_CODE_TSC_APP_ERROR;
goto _error;
}
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
@ -3826,9 +3830,9 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
goto _error;
}
assert(pNewQueryInfo->numOfTables == 1);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pFinalInfo->vgroupList != NULL);
}
@ -3837,13 +3841,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
if (cmd == TSDB_SQL_SELECT) {
size_t size = taosArrayGetSize(pNewQueryInfo->colList);
tscDebug("0x%"PRIx64" new subquery:0x%"PRIx64", tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscNumOfExprs(pNewQueryInfo),
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
tscPrintSelNodeList(pNew, 0);
} else {
tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex);
@ -4151,7 +4155,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
const char* msgFormat2 = "syntax error near \'%s\' (%s)";
const char* msgFormat3 = "%s";
const char* prefix = "syntax error";
const char* prefix = "syntax error";
const int32_t BACKWARD_CHAR_STEP = 0;
if (sql == NULL) {
@ -4166,7 +4170,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
if (additionalInfo != NULL) {
sprintf(msg, msgFormat2, buf, additionalInfo);
} else {
const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1;
const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1;
sprintf(msg, msgFormat, buf);
}
@ -4224,7 +4228,7 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) || (pTableMetaInfo->vgroupList == NULL)) {
return false;
}
int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
if (pTableMetaInfo->pVgroupTables != NULL) {
numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
@ -4251,7 +4255,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
*/
assert(pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !tscHasReachLimitation(pQueryInfo, pRes));
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
if (++pTableMetaInfo->vgroupIndex < totalVgroups) {
tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql->self,
@ -4272,7 +4276,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
pQueryInfo->limit.offset = pRes->offset;
assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0));
tscDebug("0x%"PRIx64" new query to next vgroup, index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64,
pSql->self, pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit);
@ -4357,7 +4361,7 @@ char* strdup_throw(const char* str) {
int tscSetMgmtEpSetFromCfg(const char *first, const char *second, SRpcCorEpSet *corMgmtEpSet) {
corMgmtEpSet->version = 0;
// init mgmt ip set
// init mgmt ip set
SRpcEpSet *mgmtEpSet = &(corMgmtEpSet->epSet);
mgmtEpSet->numOfEps = 0;
mgmtEpSet->inUse = 0;
@ -4532,7 +4536,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) {
if (pTableMeta->tableInfo.numOfColumns >= 0) {
totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
}
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
}
@ -4550,24 +4554,36 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf) {
assert(pChild != NULL && buf != NULL);
int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity) {
assert(*ppChild != NULL);
STableMeta* p = buf;
taosHashGetClone(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p);
STableMeta* p = NULL;
size_t sz = 0;
STableMeta* pChild = *ppChild;
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
if (p->id.uid > 0 && pChild->suid == p->id.uid) {
if (p && p->id.uid > 0 && pChild->suid == p->id.uid) {
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
pChild = realloc(pChild, tableMetaSize);
*tableMetaCapacity = (size_t)tableMetaSize;
}
pChild->sversion = p->sversion;
pChild->tversion = p->tversion;
memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableComInfo));
int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags;
memcpy(pChild->schema, p->schema, totalBytes);
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
*ppChild = pChild;
tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
tfree(p);
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
return -1;
}
@ -4790,6 +4806,21 @@ int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) {
return len;
}
int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo) {
// serialize tag column query condition
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) {
STagCond* pTagCond = &pQueryInfo->tagCond;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) {
return pCond->len;
}
}
return 0;
}
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) {
memset(pQueryAttr, 0, sizeof(SQueryAttr));
@ -5082,4 +5113,4 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
taosHashRemove(tscTableMetaMap, fname, len);
tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap));
}
}

View File

@ -325,7 +325,9 @@ typedef struct SDataCol {
#define isAllRowsNull(pCol) ((pCol)->len == 0)
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints);
int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
void dataColSetOffset(SDataCol *pCol, int nEle);
@ -358,12 +360,10 @@ typedef struct {
int maxRowSize;
int maxCols; // max number of columns
int maxPoints; // max number of points
int bufSize;
int numOfRows;
int numOfCols; // Total number of cols
int sversion; // TODO: set sversion
void * buf;
SDataCol *cols;
} SDataCols;

View File

@ -22,6 +22,29 @@
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows, bool forceSetNull);
//TODO: change caller to use return val
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
int spaceNeeded = pCol->bytes * maxPoints;
if(IS_VAR_DATA_TYPE(pCol->type)) {
spaceNeeded += sizeof(VarDataOffsetT) * maxPoints;
}
if(pCol->spaceSize < spaceNeeded) {
void* ptr = realloc(pCol->pData, spaceNeeded);
if(ptr == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)pCol->spaceSize,
strerror(errno));
return -1;
} else {
pCol->pData = ptr;
pCol->spaceSize = spaceNeeded;
}
}
if(IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
}
return 0;
}
/**
* Duplicate the schema and return a new object
*/
@ -207,24 +230,13 @@ SMemRow tdMemRowDup(SMemRow row) {
return trow;
}
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) {
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->type = colType(pCol);
pDataCol->colId = colColId(pCol);
pDataCol->bytes = colBytes(pCol);
pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE;
pDataCol->len = 0;
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
pDataCol->dataOff = (VarDataOffsetT *)(*pBuf);
pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints);
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
*pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize + sizeof(VarDataOffsetT) * maxPoints);
} else {
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
pDataCol->dataOff = NULL;
pDataCol->pData = *pBuf;
*pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize);
}
}
// value from timestamp should be TKEY here instead of TSKEY
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
@ -239,6 +251,8 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP
if (numOfRows > 0) {
// Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull(pCol, numOfRows, maxPoints);
} else {
tdAllocMemForCol(pCol, maxPoints);
}
}
@ -257,13 +271,14 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP
}
bool isNEleNull(SDataCol *pCol, int nEle) {
if(isAllRowsNull(pCol)) return true;
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
}
FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
@ -276,6 +291,7 @@ FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
}
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
tdAllocMemForCol(pCol, maxPoints);
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->len = 0;
@ -319,56 +335,63 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
tdFreeDataCols(pCols);
return NULL;
}
int i;
for(i = 0; i < maxCols; i++) {
pCols->cols[i].spaceSize = 0;
pCols->cols[i].len = 0;
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
}
pCols->maxCols = maxCols;
}
pCols->maxRowSize = maxRowSize;
pCols->bufSize = maxRowSize * maxRows;
if (pCols->bufSize > 0) {
pCols->buf = malloc(pCols->bufSize);
if (pCols->buf == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols,
strerror(errno));
tdFreeDataCols(pCols);
return NULL;
}
}
return pCols;
}
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
if (schemaNCols(pSchema) > pCols->maxCols) {
int i;
int oldMaxCols = pCols->maxCols;
if (schemaNCols(pSchema) > oldMaxCols) {
pCols->maxCols = schemaNCols(pSchema);
pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
if (pCols->cols == NULL) return -1;
for(i = oldMaxCols; i < pCols->maxCols; i++) {
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
pCols->cols[i].spaceSize = 0;
}
}
if (schemaTLen(pSchema) > pCols->maxRowSize) {
pCols->maxRowSize = schemaTLen(pSchema);
pCols->bufSize = schemaTLen(pSchema) * pCols->maxPoints;
pCols->buf = realloc(pCols->buf, pCols->bufSize);
if (pCols->buf == NULL) return -1;
}
tdResetDataCols(pCols);
pCols->numOfCols = schemaNCols(pSchema);
void *ptr = pCols->buf;
for (int i = 0; i < schemaNCols(pSchema); i++) {
dataColInit(pCols->cols + i, schemaColAt(pSchema, i), &ptr, pCols->maxPoints);
ASSERT((char *)ptr - (char *)(pCols->buf) <= pCols->bufSize);
for (i = 0; i < schemaNCols(pSchema); i++) {
dataColInit(pCols->cols + i, schemaColAt(pSchema, i), pCols->maxPoints);
}
return 0;
}
SDataCols *tdFreeDataCols(SDataCols *pCols) {
int i;
if (pCols) {
tfree(pCols->buf);
tfree(pCols->cols);
if(pCols->cols) {
int maxCols = pCols->maxCols;
for(i = 0; i < maxCols; i++) {
SDataCol *pCol = &pCols->cols[i];
tfree(pCol->pData);
}
free(pCols->cols);
pCols->cols = NULL;
}
free(pCols);
}
return NULL;
@ -388,21 +411,14 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].bytes = pDataCols->cols[i].bytes;
pRet->cols[i].offset = pDataCols->cols[i].offset;
pRet->cols[i].spaceSize = pDataCols->cols[i].spaceSize;
pRet->cols[i].pData = (void *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].pData) - (char *)(pDataCols->buf)));
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
ASSERT(pDataCols->cols[i].dataOff != NULL);
pRet->cols[i].dataOff =
(int32_t *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].dataOff) - (char *)(pDataCols->buf)));
}
if (keepData) {
pRet->cols[i].len = pDataCols->cols[i].len;
if (pDataCols->cols[i].len > 0) {
tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints);
pRet->cols[i].len = pDataCols->cols[i].len;
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, sizeof(VarDataOffsetT) * pDataCols->maxPoints);
int dataOffSize = sizeof(VarDataOffsetT) * pDataCols->maxPoints;
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, dataOffSize);
}
}
}
@ -426,40 +442,27 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int rcol = 0;
int dcol = 0;
if (dataRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, dataRowTuple(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dcol++;
continue;
}
} else {
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dcol++;
continue;
}
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
if(forceSetNull) {
//dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
dcol++;
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
if(forceSetNull) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
dcol++;
}
}
pCols->numOfRows++;
@ -471,43 +474,30 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int rcol = 0;
int dcol = 0;
if (kvRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, kvRowValues(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
++dcol;
continue;
}
} else {
int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
SColIdx *colIdx = kvRowColIdxAt(row, rcol);
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
if (forceSetNull) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
++dcol;
continue;
}
SColIdx *colIdx = kvRowColIdxAt(row, rcol);
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
if (forceSetNull) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
++dcol;
}
++dcol;
}
}
pCols->numOfRows++;

@ -1 +1 @@
Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d
Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206

View File

@ -122,6 +122,7 @@
<exclude>**/TSDBJNIConnectorTest.java</exclude>
<exclude>**/TaosInfoMonitorTest.java</exclude>
<exclude>**/UnsignedNumberJniTest.java</exclude>
<exclude>**/TimeZoneTest.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>
</configuration>

View File

@ -77,8 +77,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合的模式
return false;
return false; //像databasetable这些对象的标识符在存储时是否采用大小写混合的模式
}
public boolean storesUpperCaseIdentifiers() throws SQLException {
@ -514,7 +513,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(colIndex);
col6.setColName("TYPE_CAT");
col6.setColType(Types.NCHAR);
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col6;
}
@ -522,7 +521,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(colIndex);
col7.setColName("TYPE_SCHEM");
col7.setColType(Types.NCHAR);
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col7;
}
@ -530,7 +529,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(colIndex);
col8.setColName("TYPE_NAME");
col8.setColType(Types.NCHAR);
col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col8;
}
@ -538,7 +537,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(colIndex);
col9.setColName("SELF_REFERENCING_COL_NAME");
col9.setColType(Types.NCHAR);
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col9;
}
@ -546,7 +545,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(colIndex);
col10.setColName("REF_GENERATION");
col10.setColType(Types.NCHAR);
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col10;
}
@ -592,7 +591,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(colIndex);
col4.setColName("TABLE_TYPE");
col4.setColType(Types.NCHAR);
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4;
}
@ -734,7 +733,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(colIndex);
col1.setColName("TABLE_CAT");
col1.setColType(Types.NCHAR);
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col1;
}
@ -742,7 +741,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(colIndex);
col2.setColName("TABLE_SCHEM");
col2.setColType(Types.NCHAR);
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col2;
}
@ -751,7 +750,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
col3.setColIndex(colIndex);
col3.setColName("TABLE_NAME");
col3.setColSize(193);
col3.setColType(Types.NCHAR);
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col3;
}
@ -760,7 +759,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
col4.setColIndex(colIndex);
col4.setColName("COLUMN_NAME");
col4.setColSize(65);
col4.setColType(Types.NCHAR);
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4;
}
@ -768,7 +767,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(colIndex);
col5.setColName("DATA_TYPE");
col5.setColType(Types.INTEGER);
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col5;
}
@ -776,7 +775,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7);
col7.setColName("COLUMN_SIZE");
col7.setColType(Types.INTEGER);
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col7;
}
@ -791,7 +790,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9);
col9.setColName("DECIMAL_DIGITS");
col9.setColType(Types.INTEGER);
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col9;
}
@ -799,7 +798,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10);
col10.setColName("NUM_PREC_RADIX");
col10.setColType(Types.INTEGER);
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col10;
}
@ -807,7 +806,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col11 = new ColumnMetaData();
col11.setColIndex(11);
col11.setColName("NULLABLE");
col11.setColType(Types.INTEGER);
col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col11;
}
@ -815,7 +814,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col12 = new ColumnMetaData();
col12.setColIndex(colIndex);
col12.setColName("REMARKS");
col12.setColType(Types.NCHAR);
col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col12;
}
@ -823,7 +822,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col13 = new ColumnMetaData();
col13.setColIndex(13);
col13.setColName("COLUMN_DEF");
col13.setColType(Types.NCHAR);
col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col13;
}
@ -831,7 +830,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col14 = new ColumnMetaData();
col14.setColIndex(14);
col14.setColName("SQL_DATA_TYPE");
col14.setColType(Types.INTEGER);
col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col14;
}
@ -839,7 +838,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col15 = new ColumnMetaData();
col15.setColIndex(15);
col15.setColName("SQL_DATETIME_SUB");
col15.setColType(Types.INTEGER);
col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col15;
}
@ -847,7 +846,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col16 = new ColumnMetaData();
col16.setColIndex(16);
col16.setColName("CHAR_OCTET_LENGTH");
col16.setColType(Types.INTEGER);
col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col16;
}
@ -855,7 +854,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col17 = new ColumnMetaData();
col17.setColIndex(17);
col17.setColName("ORDINAL_POSITION");
col17.setColType(Types.INTEGER);
col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col17;
}
@ -863,7 +862,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col18 = new ColumnMetaData();
col18.setColIndex(18);
col18.setColName("IS_NULLABLE");
col18.setColType(Types.NCHAR);
col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col18;
}
@ -871,7 +870,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col19 = new ColumnMetaData();
col19.setColIndex(19);
col19.setColName("SCOPE_CATALOG");
col19.setColType(Types.NCHAR);
col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col19;
}
@ -879,7 +878,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col20 = new ColumnMetaData();
col20.setColIndex(20);
col20.setColName("SCOPE_SCHEMA");
col20.setColType(Types.NCHAR);
col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col20;
}
@ -887,7 +886,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col21 = new ColumnMetaData();
col21.setColIndex(21);
col21.setColName("SCOPE_TABLE");
col21.setColType(Types.NCHAR);
col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col21;
}
@ -903,7 +902,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col23 = new ColumnMetaData();
col23.setColIndex(23);
col23.setColName("IS_AUTOINCREMENT");
col23.setColType(Types.NCHAR);
col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col23;
}
@ -911,7 +910,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col24 = new ColumnMetaData();
col24.setColIndex(24);
col24.setColName("IS_GENERATEDCOLUMN");
col24.setColType(Types.NCHAR);
col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col24;
}
@ -1205,7 +1204,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(colIndex);
col5.setColName("KEY_SEQ");
col5.setColType(Types.SMALLINT);
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
return col5;
}
@ -1213,7 +1212,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(colIndex);
col6.setColName("PK_NAME");
col6.setColType(Types.NCHAR);
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col6;
}
@ -1275,7 +1274,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(colIndex);
col4.setColName("SUPERTABLE_NAME");
col4.setColType(Types.NCHAR);
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4;
}

View File

@ -16,7 +16,7 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
private int colType = 0;
private int colType = 0; //taosType
private String colName = null;
private int colSize = -1;
private int colIndex = 0;

View File

@ -68,71 +68,61 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override
public String getString(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getString(columnIndex, nativeType);
return rowCursor.getString(columnIndex, colType);
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getBoolean(columnIndex, nativeType);
return rowCursor.getBoolean(columnIndex, colType);
}
@Override
public byte getByte(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return (byte) rowCursor.getInt(columnIndex, nativeType);
return (byte) rowCursor.getInt(columnIndex, colType);
}
@Override
public short getShort(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return (short) rowCursor.getInt(columnIndex, nativeType);
return (short) rowCursor.getInt(columnIndex, colType);
}
@Override
public int getInt(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getInt(columnIndex, nativeType);
return rowCursor.getInt(columnIndex, colType);
}
@Override
public long getLong(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getLong(columnIndex, nativeType);
return rowCursor.getLong(columnIndex, colType);
}
@Override
public float getFloat(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getFloat(columnIndex, nativeType);
return rowCursor.getFloat(columnIndex, colType);
}
@Override
public double getDouble(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getDouble(columnIndex, nativeType);
return rowCursor.getDouble(columnIndex, colType);
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return (rowCursor.getString(columnIndex, nativeType)).getBytes();
return (rowCursor.getString(columnIndex, colType)).getBytes();
}
@Override
public Timestamp getTimestamp(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
return rowCursor.getTimestamp(columnIndex, nativeType);
return rowCursor.getTimestamp(columnIndex, colType);
}
@Override
@ -158,8 +148,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
double value = rowCursor.getDouble(columnIndex, nativeType);
double value = rowCursor.getDouble(columnIndex, colType);
return new BigDecimal(value);
}

View File

@ -129,8 +129,9 @@ public abstract class TSDBConstants {
return Types.TIMESTAMP;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR;
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
}
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
@ -160,7 +161,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
}
}
@ -187,7 +188,7 @@ public abstract class TSDBConstants {
case Types.NCHAR:
return TSDBConstants.TSDB_DATA_TYPE_NCHAR;
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
}
public static String jdbcType2TaosTypeName(int jdbcType) throws SQLException {
@ -213,7 +214,7 @@ public abstract class TSDBConstants {
case Types.NCHAR:
return "NCHAR";
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
}
}

View File

@ -80,7 +80,8 @@ public class TSDBJNIConnector {
this.taos = this.connectImp(host, port, dbName, user, password);
if (this.taos == TSDBConstants.JNI_NULL_POINTER) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
String errMsg = this.getErrMsg(0);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, errMsg);
}
// invoke connectImp only here
taosInfo.conn_open_increment();

View File

@ -110,7 +110,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD
ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1);
switch (columnMetaData.getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
return 5;
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:

View File

@ -34,9 +34,8 @@ public class QueryDataTest {
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql);
} catch (SQLException e) {
return;
e.printStackTrace();
}
}

View File

@ -0,0 +1,71 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.Test;
import java.sql.*;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.Properties;
public class TimeZoneTest {
private String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
@Test
public void javaTimeZone() {
LocalDateTime localDateTime = LocalDateTime.of(1970, 1, 1, 0, 0, 0);
Instant instant = localDateTime.atZone(ZoneId.of("UTC-8")).toInstant();
System.out.println("UTC-8: " + instant.getEpochSecond() + "," + instant);
instant = localDateTime.atZone(ZoneId.of("UT")).toInstant();
System.out.println("UTC: " + instant.getEpochSecond() + "," + instant);
instant = localDateTime.atZone(ZoneId.of("UTC+8")).toInstant();
System.out.println("UTC+8: " + instant.getEpochSecond() + "," + instant);
}
@Test
public void taosTimeZone() {
// given
Properties props = new Properties();
props.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
// when and then
try (Connection connection = DriverManager.getConnection(url, props)) {
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists timezone_test");
stmt.execute("create database if not exists timezone_test keep 365000");
stmt.execute("use timezone_test");
stmt.execute("create table weather(ts timestamp, temperature float)");
stmt.execute("insert into timezone_test.weather(ts, temperature) values('1970-01-01 00:00:00', 1.0)");
ResultSet rs = stmt.executeQuery("select * from timezone_test.weather");
while (rs.next()) {
Timestamp ts = rs.getTimestamp("ts");
System.out.println("ts: " + ts.getTime() + "," + ts);
}
stmt.execute("insert into timezone_test.weather(ts, temperature, humidity) values('1970-01-02 00:00:00', 1.0, 2.0)");
rs = stmt.executeQuery("select * from timezone_test.weather");
while (rs.next()) {
Timestamp ts = rs.getTimestamp("ts");
System.out.println("ts: " + ts.getTime() + "," + ts);
}
stmt.execute("drop database if exists timezone_test");
stmt.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (

View File

@ -1,6 +1,7 @@
# TDengine Connector for Python
[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
[TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
## Install
@ -11,8 +12,417 @@ pip install ./TDengine/src/connector/python
## Source Code
[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
## License - AGPL
## Examples
### Query with PEP-249 API
```python
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("show databases")
results = cursor.fetchall()
for row in results:
print(row)
cursor.close()
conn.close()
```
### Query with objective API
```python
import taos
conn = taos.connect()
conn.exec("create database if not exists pytest")
result = conn.query("show databases")
num_of_fields = result.field_count
for field in result.fields:
print(field)
for row in result:
print(row)
result.close()
conn.exec("drop database pytest")
conn.close()
```
### Query with async API
```python
from taos import *
from ctypes import *
import time
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
conn.close()
if __name__ == "__main__":
test_query(connect())
```
### Statement API - Bind row after row
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589)
stmt.bind_param(params)
params[0].timestamp(1626861392590)
params[15].null()
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 2
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Bind multi rows
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Subscribe
```python
import taos
conn = taos.connect()
dbname = "pytest_taos_subscribe_callback"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
print("# consume with a stop condition")
for i in range(10):
conn.exec("insert into log values(now, %d)" % int(random() * 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
### Statement API - Subscribe asynchronously with callback
```python
from taos import *
from ctypes import *
import time
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
time.sleep(0.7)
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_subscribe_callback(connect())
```
### Statement API - Stream
```python
from taos import *
from ctypes import *
def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None
if p_result == None or p_row == None:
return
result = TaosResult(p_result)
row = TaosRow(result, p_row)
try:
ts, count = row()
p = cast(p_param, POINTER(Counter))
p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err:
print(err)
raise err
class Counter(ctypes.Structure):
_fields_ = [
("count", c_int),
]
def __str__(self):
return "%d" % self.count
def test_stream(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stream"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2
counter = Counter()
counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20):
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2)
stream.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stream(connect())
```
### Insert with line protocol
```python
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
print("inserted")
lines = [
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
result = conn.query("show tables")
for row in result:
print(row)
result.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
## License - AGPL-3.0
Keep same with [TDengine](https://github.com/taosdata/TDengine).

View File

@ -0,0 +1,50 @@
# encoding:UTF-8
from taos import *
conn = connect()
dbname = "pytest_taos_stmt_multi"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()

View File

@ -0,0 +1,57 @@
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589)
stmt.bind_param(params)
params[0].timestamp(1626861392590)
params[15].null()
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 2
# No need to explicitly close, but ok for you
# result.close()
result = conn.query("select * from log")
for row in result:
print(row)
# No need to explicitly close, but ok for you
# result.close()
# stmt.close()
# conn.close()

View File

@ -0,0 +1,22 @@
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
]
conn.insert_lines(lines)
print("inserted")
conn.insert_lines(lines)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,9 @@
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("show databases")
results = cursor.fetchall()
for row in results:
print(row)

View File

@ -0,0 +1,62 @@
from taos import *
from ctypes import *
import time
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
# should explicitly close the result in fetch completed or cause error
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
# explicitly close result while query failed
result.close()
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
# conn.close()
if __name__ == "__main__":
test_query(connect())

View File

@ -0,0 +1,12 @@
import taos
conn = taos.connect()
conn.execute("create database if not exists pytest")
result = conn.query("show databases")
num_of_fields = result.field_count
for field in result.fields:
print(field)
for row in result:
print(row)
conn.execute("drop database pytest")

View File

@ -0,0 +1,43 @@
from taos import *
from ctypes import *
import time
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.execute("insert into log values(now, %d)" % i)
time.sleep(0.7)
# sub.close()
conn.execute("drop database if exists %s" % dbname)
# conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
# conn.close()
raise err
if __name__ == "__main__":
test_subscribe_callback(connect())

View File

@ -0,0 +1,53 @@
import taos
import random
conn = taos.connect()
dbname = "pytest_taos_subscribe"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.execute("insert into log values(now, %d)" % i)
sub = conn.subscribe(False, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
sub.close(True)
print("# keep progress consume")
sub = conn.subscribe(False, "test", "select * from log", 1000)
result = sub.consume()
rows = result.fetch_all()
# consume from latest subscription needs root privilege(for /var/lib/taos).
assert result.row_count == 0
print("## consumed ", len(rows), "rows")
print("# consume with a stop condition")
for i in range(10):
conn.execute("insert into log values(now, %d)" % random.randint(0, 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
# sub.close()
conn.execute("drop database if exists %s" % dbname)
# conn.close()

View File

@ -0,0 +1,27 @@
[tool.poetry]
name = "taos"
version = "2.1.0"
description = "TDengine connector for python"
authors = ["Taosdata Inc. <support@taosdata.com>"]
license = "AGPL-3.0"
readme = "README.md"
[tool.poetry.dependencies]
python = "^2.7 || ^3.4"
typing = "*"
[tool.poetry.dev-dependencies]
pytest = [
{ version = "^4.6", python = "^2.7" },
{ version = "^6.2", python = "^3.7" }
]
pdoc = { version = "^7.1.1", python = "^3.7" }
mypy = { version = "^0.910", python = "^3.6" }
black = { version = "^21.7b0", python = "^3.6" }
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 119

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.11",
version="2.1.0",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -1,20 +1,478 @@
# encoding:UTF-8
"""
# TDengine Connector for Python
from .connection import TDengineConnection
from .cursor import TDengineCursor
[TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
# For some reason, the following is needed for VS Code (through PyLance) to
## Install
```sh
git clone --depth 1 https://github.com/taosdata/TDengine.git
pip install ./TDengine/src/connector/python
```
## Source Code
[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
## Examples
### Query with PEP-249 API
```python
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("show databases")
results = cursor.fetchall()
for row in results:
print(row)
cursor.close()
conn.close()
```
### Query with objective API
```python
import taos
conn = taos.connect()
conn.exec("create database if not exists pytest")
result = conn.query("show databases")
num_of_fields = result.field_count
for field in result.fields:
print(field)
for row in result:
print(row)
result.close()
conn.exec("drop database pytest")
conn.close()
```
### Query with async API
```python
from taos import *
from ctypes import *
import time
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
conn.close()
if __name__ == "__main__":
test_query(connect())
```
### Statement API - Bind row after row
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589)
stmt.bind_param(params)
params[0].timestamp(1626861392590)
params[15].null()
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 2
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Bind multi rows
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Subscribe
```python
import taos
conn = taos.connect()
dbname = "pytest_taos_subscribe_callback"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
print("# consume with a stop condition")
for i in range(10):
conn.exec("insert into log values(now, %d)" % int(random() * 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
### Statement API - Subscribe asynchronously with callback
```python
from taos import *
from ctypes import *
import time
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
time.sleep(0.7)
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_subscribe_callback(connect())
```
### Statement API - Stream
```python
from taos import *
from ctypes import *
def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None
if p_result == None or p_row == None:
return
result = TaosResult(p_result)
row = TaosRow(result, p_row)
try:
ts, count = row()
p = cast(p_param, POINTER(Counter))
p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err:
print(err)
raise err
class Counter(ctypes.Structure):
_fields_ = [
("count", c_int),
]
def __str__(self):
return "%d" % self.count
def test_stream(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stream"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2
counter = Counter()
counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20):
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2)
stream.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stream(connect())
```
### Insert with line protocol
```python
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns',
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
print("inserted")
lines = [
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
result = conn.query("show tables")
for row in result:
print(row)
result.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
## License - AGPL-3.0
Keep same with [TDengine](https://github.com/taosdata/TDengine).
"""
from .connection import TaosConnection
# For some reason, the following is needed for VS Code (through PyLance) to
# recognize that "error" is a valid module of the "taos" package.
from .error import ProgrammingError
from .error import *
from .bind import *
from .field import *
from .cursor import *
from .result import *
from .statement import *
from .subscription import *
try:
import importlib.metadata
__version__ = importlib.metadata.version("taos")
except:
None
# Globals
threadsafety = 0
paramstyle = 'pyformat'
__all__ = ['connection', 'cursor']
paramstyle = "pyformat"
__all__ = [
# functions
"connect",
"new_bind_param",
"new_bind_params",
"new_multi_binds",
"new_multi_bind",
# objects
"TaosBind",
"TaosConnection",
"TaosCursor",
"TaosResult",
"TaosRows",
"TaosRow",
"TaosStmt",
"PrecisionEnum",
]
def connect(*args, **kwargs):
""" Function to return a TDengine connector object
# type: (..., ...) -> TaosConnection
"""Function to return a TDengine connector object
Current supporting keyword parameters:
@dsn: Data source name as string
@ -25,4 +483,4 @@ def connect(*args, **kwargs):
@rtype: TDengineConnector
"""
return TDengineConnection(*args, **kwargs)
return TaosConnection(*args, **kwargs)

View File

@ -0,0 +1,432 @@
# encoding:UTF-8
import ctypes
from .constants import FieldType
from .error import *
from .precision import *
from datetime import datetime
from ctypes import *
import sys
_datetime_epoch = datetime.utcfromtimestamp(0)
def _is_not_none(obj):
obj != None
class TaosBind(ctypes.Structure):
_fields_ = [
("buffer_type", c_int),
("buffer", c_void_p),
("buffer_length", c_size_t),
("length", POINTER(c_size_t)),
("is_null", POINTER(c_int)),
("is_unsigned", c_int),
("error", POINTER(c_int)),
("u", c_int64),
("allocated", c_int),
]
def null(self):
self.buffer_type = FieldType.C_NULL
self.is_null = pointer(c_int(1))
def bool(self, value):
self.buffer_type = FieldType.C_BOOL
self.buffer = cast(pointer(c_bool(value)), c_void_p)
self.buffer_length = sizeof(c_bool)
def tinyint(self, value):
self.buffer_type = FieldType.C_TINYINT
self.buffer = cast(pointer(c_int8(value)), c_void_p)
self.buffer_length = sizeof(c_int8)
def smallint(self, value):
self.buffer_type = FieldType.C_SMALLINT
self.buffer = cast(pointer(c_int16(value)), c_void_p)
self.buffer_length = sizeof(c_int16)
def int(self, value):
self.buffer_type = FieldType.C_INT
self.buffer = cast(pointer(c_int32(value)), c_void_p)
self.buffer_length = sizeof(c_int32)
def bigint(self, value):
self.buffer_type = FieldType.C_BIGINT
self.buffer = cast(pointer(c_int64(value)), c_void_p)
self.buffer_length = sizeof(c_int64)
def float(self, value):
self.buffer_type = FieldType.C_FLOAT
self.buffer = cast(pointer(c_float(value)), c_void_p)
self.buffer_length = sizeof(c_float)
def double(self, value):
self.buffer_type = FieldType.C_DOUBLE
self.buffer = cast(pointer(c_double(value)), c_void_p)
self.buffer_length = sizeof(c_double)
def binary(self, value):
buffer = None
length = 0
if isinstance(value, str):
bytes = value.encode("utf-8")
buffer = create_string_buffer(bytes)
length = len(bytes)
else:
buffer = value
length = len(value)
self.buffer_type = FieldType.C_BINARY
self.buffer = cast(buffer, c_void_p)
self.buffer_length = length
self.length = pointer(c_size_t(self.buffer_length))
def timestamp(self, value, precision=PrecisionEnum.Milliseconds):
if type(value) is datetime:
if precision == PrecisionEnum.Milliseconds:
ts = int(round((value - _datetime_epoch).total_seconds() * 1000))
elif precision == PrecisionEnum.Microseconds:
ts = int(round((value - _datetime_epoch).total_seconds() * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
elif type(value) is float:
if precision == PrecisionEnum.Milliseconds:
ts = int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
ts = int(round(value * 10000000))
else:
raise PrecisionError("time float do not support nanosecond precision")
elif isinstance(value, int) and not isinstance(value, bool):
ts = value
elif isinstance(value, str):
value = datetime.fromisoformat(value)
if precision == PrecisionEnum.Milliseconds:
ts = int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
ts = int(round(value * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
self.buffer_type = FieldType.C_TIMESTAMP
self.buffer = cast(pointer(c_int64(ts)), c_void_p)
self.buffer_length = sizeof(c_int64)
def nchar(self, value):
buffer = None
length = 0
if isinstance(value, str):
bytes = value.encode("utf-8")
buffer = create_string_buffer(bytes)
length = len(bytes)
else:
buffer = value
length = len(value)
self.buffer_type = FieldType.C_NCHAR
self.buffer = cast(buffer, c_void_p)
self.buffer_length = length
self.length = pointer(c_size_t(self.buffer_length))
def tinyint_unsigned(self, value):
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
self.buffer = cast(pointer(c_uint8(value)), c_void_p)
self.buffer_length = sizeof(c_uint8)
def smallint_unsigned(self, value):
self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
self.buffer = cast(pointer(c_uint16(value)), c_void_p)
self.buffer_length = sizeof(c_uint16)
def int_unsigned(self, value):
self.buffer_type = FieldType.C_INT_UNSIGNED
self.buffer = cast(pointer(c_uint32(value)), c_void_p)
self.buffer_length = sizeof(c_uint32)
def bigint_unsigned(self, value):
self.buffer_type = FieldType.C_BIGINT_UNSIGNED
self.buffer = cast(pointer(c_uint64(value)), c_void_p)
self.buffer_length = sizeof(c_uint64)
def _datetime_to_timestamp(value, precision):
# type: (datetime | float | int | str | c_int64, PrecisionEnum) -> c_int64
if value is None:
return FieldType.C_BIGINT_NULL
if type(value) is datetime:
if precision == PrecisionEnum.Milliseconds:
return int(round((value - _datetime_epoch).total_seconds() * 1000))
elif precision == PrecisionEnum.Microseconds:
return int(round((value - _datetime_epoch).total_seconds() * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
elif type(value) is float:
if precision == PrecisionEnum.Milliseconds:
return int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
return int(round(value * 10000000))
else:
raise PrecisionError("time float do not support nanosecond precision")
elif isinstance(value, int) and not isinstance(value, bool):
return c_int64(value)
elif isinstance(value, str):
value = datetime.fromisoformat(value)
if precision == PrecisionEnum.Milliseconds:
return int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
return int(round(value * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
elif isinstance(value, c_int64):
return value
return FieldType.C_BIGINT_NULL
class TaosMultiBind(ctypes.Structure):
_fields_ = [
("buffer_type", c_int),
("buffer", c_void_p),
("buffer_length", c_size_t),
("length", POINTER(c_int32)),
("is_null", c_char_p),
("num", c_int),
]
def null(self, num):
self.buffer_type = FieldType.C_NULL
self.is_null = cast((c_char * num)(*[1 for _ in range(num)]), c_char_p)
self.buffer = c_void_p(None)
self.num = num
def bool(self, values):
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int8 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_BOOL_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
self.buffer_type = FieldType.C_BOOL
self.buffer_length = sizeof(c_bool)
def tinyint(self, values):
self.buffer_type = FieldType.C_TINYINT
self.buffer_length = sizeof(c_int8)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int8 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def smallint(self, values):
self.buffer_type = FieldType.C_SMALLINT
self.buffer_length = sizeof(c_int16)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int16 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def int(self, values):
self.buffer_type = FieldType.C_INT
self.buffer_length = sizeof(c_int32)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int32 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_INT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def bigint(self, values):
self.buffer_type = FieldType.C_BIGINT
self.buffer_length = sizeof(c_int64)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int64 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def float(self, values):
self.buffer_type = FieldType.C_FLOAT
self.buffer_length = sizeof(c_float)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_float * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_FLOAT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def double(self, values):
self.buffer_type = FieldType.C_DOUBLE
self.buffer_length = sizeof(c_double)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_double * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_DOUBLE_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def binary(self, values):
self.num = len(values)
self.buffer = cast(c_char_p("".join(filter(_is_not_none, values)).encode("utf-8")), c_void_p)
self.length = (c_int * len(values))(*[len(value) if value is not None else 0 for value in values])
self.buffer_type = FieldType.C_BINARY
self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
def timestamp(self, values, precision=PrecisionEnum.Milliseconds):
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int64 * len(values)
buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values])
self.buffer_type = FieldType.C_TIMESTAMP
self.buffer = cast(buffer, c_void_p)
self.buffer_length = sizeof(c_int64)
self.num = len(values)
def nchar(self, values):
# type: (list[str]) -> None
if sys.version_info < (3, 0):
_bytes = [bytes(value) if value is not None else None for value in values]
buffer_length = max(len(b) + 1 for b in _bytes if b is not None)
buffers = [
create_string_buffer(b, buffer_length) if b is not None else create_string_buffer(buffer_length)
for b in _bytes
]
buffer_all = b''.join(v[:] for v in buffers)
self.buffer = cast(c_char_p(buffer_all), c_void_p)
else:
_bytes = [value.encode("utf-8") if value is not None else None for value in values]
buffer_length = max(len(b) for b in _bytes if b is not None)
self.buffer = cast(
c_char_p(
b"".join(
[
create_string_buffer(b, buffer_length)
if b is not None
else create_string_buffer(buffer_length)
for b in _bytes
]
)
),
c_void_p,
)
self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes])
self.buffer_length = buffer_length
self.num = len(values)
self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
self.buffer_type = FieldType.C_NCHAR
def tinyint_unsigned(self, values):
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
self.buffer_length = sizeof(c_uint8)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint8 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def smallint_unsigned(self, values):
self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
self.buffer_length = sizeof(c_uint16)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint16 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def int_unsigned(self, values):
self.buffer_type = FieldType.C_INT_UNSIGNED
self.buffer_length = sizeof(c_uint32)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint32 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_INT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def bigint_unsigned(self, values):
self.buffer_type = FieldType.C_BIGINT_UNSIGNED
self.buffer_length = sizeof(c_uint64)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint64 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def new_bind_param():
# type: () -> TaosBind
return TaosBind()
def new_bind_params(size):
# type: (int) -> Array[TaosBind]
return (TaosBind * size)()
def new_multi_bind():
# type: () -> TaosMultiBind
return TaosMultiBind()
def new_multi_binds(size):
# type: (int) -> Array[TaosMultiBind]
return (TaosMultiBind * size)()

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,15 @@
from .cursor import TDengineCursor
from .subscription import TDengineSubscription
from .cinterface import CTaosInterface
# encoding:UTF-8
from types import FunctionType
from .cinterface import *
from .cursor import TaosCursor
from .subscription import TaosSubscription
from .statement import TaosStmt
from .stream import TaosStream
from .result import *
class TDengineConnection(object):
""" TDengine connection object
"""
class TaosConnection(object):
"""TDengine connection object"""
def __init__(self, *args, **kwargs):
self._conn = None
@ -21,63 +25,130 @@ class TDengineConnection(object):
def config(self, **kwargs):
# host
if 'host' in kwargs:
self._host = kwargs['host']
if "host" in kwargs:
self._host = kwargs["host"]
# user
if 'user' in kwargs:
self._user = kwargs['user']
if "user" in kwargs:
self._user = kwargs["user"]
# password
if 'password' in kwargs:
self._password = kwargs['password']
if "password" in kwargs:
self._password = kwargs["password"]
# database
if 'database' in kwargs:
self._database = kwargs['database']
if "database" in kwargs:
self._database = kwargs["database"]
# port
if 'port' in kwargs:
self._port = kwargs['port']
if "port" in kwargs:
self._port = kwargs["port"]
# config
if 'config' in kwargs:
self._config = kwargs['config']
if "config" in kwargs:
self._config = kwargs["config"]
self._chandle = CTaosInterface(self._config)
self._conn = self._chandle.connect(
self._host,
self._user,
self._password,
self._database,
self._port)
self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port)
def close(self):
"""Close current connection.
"""
return CTaosInterface.close(self._conn)
"""Close current connection."""
if self._conn:
taos_close(self._conn)
self._conn = None
def subscribe(self, restart, topic, sql, interval):
"""Create a subscription.
"""
@property
def client_info(self):
# type: () -> str
return taos_get_client_info()
@property
def server_info(self):
# type: () -> str
return taos_get_server_info(self._conn)
def select_db(self, database):
# type: (str) -> None
taos_select_db(self._conn, database)
def execute(self, sql):
# type: (str) -> None
"""Simplely execute sql ignoring the results"""
res = taos_query(self._conn, sql)
taos_free_result(res)
def query(self, sql):
# type: (str) -> TaosResult
result = taos_query(self._conn, sql)
return TaosResult(result, True, self)
def query_a(self, sql, callback, param):
# type: (str, async_query_callback_type, c_void_p) -> None
"""Asynchronously query a sql with callback function"""
taos_query_a(self._conn, sql, callback, param)
def subscribe(self, restart, topic, sql, interval, callback=None, param=None):
# type: (bool, str, str, int, subscribe_callback_type, c_void_p) -> TaosSubscription
"""Create a subscription."""
if self._conn is None:
return None
sub = CTaosInterface.subscribe(
self._conn, restart, topic, sql, interval)
return TDengineSubscription(sub)
sub = taos_subscribe(self._conn, restart, topic, sql, interval, callback, param)
return TaosSubscription(sub, callback != None)
def insertLines(self, lines):
"""
insert lines through line protocol
"""
def statement(self, sql=None):
# type: (str | None) -> TaosStmt
if self._conn is None:
return None
return CTaosInterface.insertLines(self._conn, lines)
stmt = taos_stmt_init(self._conn)
if sql != None:
taos_stmt_prepare(stmt, sql)
return TaosStmt(stmt)
def load_table_info(self, tables):
# type: (str) -> None
taos_load_table_info(self._conn, tables)
def stream(self, sql, callback, stime=0, param=None, callback2=None):
# type: (str, Callable[[Any, TaosResult, TaosRows], None], int, Any, c_void_p) -> TaosStream
# cb = cast(callback, stream_callback_type)
# ref = byref(cb)
stream = taos_open_stream(self._conn, sql, callback, stime, param, callback2)
return TaosStream(stream)
def insert_lines(self, lines):
# type: (list[str]) -> None
"""Line protocol and schemaless support
## Example
```python
import taos
conn = taos.connect()
conn.exec("drop database if exists test")
conn.select_db("test")
lines = [
'ste,t2=5,t3=L"ste" c1=true,c2=4,c3="string" 1626056811855516532',
]
conn.insert_lines(lines)
```
## Exception
```python
try:
conn.insert_lines(lines)
except SchemalessError as err:
print(err)
```
"""
return taos_insert_lines(self._conn, lines)
def cursor(self):
"""Return a new Cursor object using the connection.
"""
return TDengineCursor(self)
# type: () -> TaosCursor
"""Return a new Cursor object using the connection."""
return TaosCursor(self)
def commit(self):
"""Commit any pending transaction to the database.
@ -87,17 +158,18 @@ class TDengineConnection(object):
pass
def rollback(self):
"""Void functionality
"""
"""Void functionality"""
pass
def clear_result_set(self):
"""Clear unused result set on this connection.
"""
"""Clear unused result set on this connection."""
pass
def __del__(self):
self.close()
if __name__ == "__main__":
conn = TDengineConnection(host='192.168.1.107')
conn = TaosConnection()
conn.close()
print("Hello world")

View File

@ -1,12 +1,11 @@
# encoding:UTF-8
"""Constants in TDengine python
"""
from .dbapi import *
class FieldType(object):
"""TDengine Field Types
"""
"""TDengine Field Types"""
# type_code
C_NULL = 0
C_BOOL = 1
@ -34,9 +33,9 @@ class FieldType(object):
C_INT_UNSIGNED_NULL = 4294967295
C_BIGINT_NULL = -9223372036854775808
C_BIGINT_UNSIGNED_NULL = 18446744073709551615
C_FLOAT_NULL = float('nan')
C_DOUBLE_NULL = float('nan')
C_BINARY_NULL = bytearray([int('0xff', 16)])
C_FLOAT_NULL = float("nan")
C_DOUBLE_NULL = float("nan")
C_BINARY_NULL = bytearray([int("0xff", 16)])
# Timestamp precision definition
C_TIMESTAMP_MILLI = 0
C_TIMESTAMP_MICRO = 1

View File

@ -1,18 +1,18 @@
from .cinterface import CTaosInterface
# encoding:UTF-8
from .cinterface import *
from .error import *
from .constants import FieldType
# querySeqNum = 0
from .result import *
class TDengineCursor(object):
class TaosCursor(object):
"""Database cursor which is used to manage the context of a fetch operation.
Attributes:
.description: Read-only attribute consists of 7-item sequences:
> name (mondatory)
> type_code (mondatory)
> name (mandatory)
> type_code (mandatory)
> display_size
> internal_size
> precision
@ -55,8 +55,7 @@ class TDengineCursor(object):
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchRow(
self._result, self._fields)
block, self._block_rows = taos_fetch_row(self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
@ -69,20 +68,17 @@ class TDengineCursor(object):
@property
def description(self):
"""Return the description of the object.
"""
"""Return the description of the object."""
return self._description
@property
def rowcount(self):
"""Return the rowcount of the object
"""
"""Return the rowcount of the object"""
return self._rowcount
@property
def affected_rows(self):
"""Return the rowcount of insertion
"""
"""Return the rowcount of insertion"""
return self._affected_rows
def callproc(self, procname, *args):
@ -96,8 +92,7 @@ class TDengineCursor(object):
self._logfile = logfile
def close(self):
"""Close the cursor.
"""
"""Close the cursor."""
if self._connection is None:
return False
@ -107,8 +102,7 @@ class TDengineCursor(object):
return True
def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command).
"""
"""Prepare and execute a database operation (query or command)."""
if not operation:
return None
@ -124,104 +118,91 @@ class TDengineCursor(object):
# global querySeqNum
# querySeqNum += 1
# localSeqNum = querySeqNum # avoid raice condition
# localSeqNum = querySeqNum # avoid race condition
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
self._result = CTaosInterface.query(self._connection._conn, stmt)
self._result = taos_query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
if (self._logfile):
if self._logfile:
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno == 0:
if CTaosInterface.fieldsCount(self._result) == 0:
self._affected_rows += CTaosInterface.affectedRows(
self._result)
return CTaosInterface.affectedRows(self._result)
else:
self._fields = CTaosInterface.useResult(
self._result)
return self._handle_result()
if taos_field_count(self._result) == 0:
affected_rows = taos_affected_rows(self._result)
self._affected_rows += affected_rows
return affected_rows
else:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
self._fields = taos_fetch_fields(self._result)
return self._handle_result()
def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters."""
pass
def fetchone(self):
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
"""
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available."""
pass
def fetchmany(self):
pass
def istype(self, col, dataType):
if (dataType.upper() == "BOOL"):
if (self._description[col][1] == FieldType.C_BOOL):
if dataType.upper() == "BOOL":
if self._description[col][1] == FieldType.C_BOOL:
return True
if (dataType.upper() == "TINYINT"):
if (self._description[col][1] == FieldType.C_TINYINT):
if dataType.upper() == "TINYINT":
if self._description[col][1] == FieldType.C_TINYINT:
return True
if (dataType.upper() == "TINYINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
if dataType.upper() == "TINYINT UNSIGNED":
if self._description[col][1] == FieldType.C_TINYINT_UNSIGNED:
return True
if (dataType.upper() == "SMALLINT"):
if (self._description[col][1] == FieldType.C_SMALLINT):
if dataType.upper() == "SMALLINT":
if self._description[col][1] == FieldType.C_SMALLINT:
return True
if (dataType.upper() == "SMALLINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
if dataType.upper() == "SMALLINT UNSIGNED":
if self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED:
return True
if (dataType.upper() == "INT"):
if (self._description[col][1] == FieldType.C_INT):
if dataType.upper() == "INT":
if self._description[col][1] == FieldType.C_INT:
return True
if (dataType.upper() == "INT UNSIGNED"):
if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
if dataType.upper() == "INT UNSIGNED":
if self._description[col][1] == FieldType.C_INT_UNSIGNED:
return True
if (dataType.upper() == "BIGINT"):
if (self._description[col][1] == FieldType.C_BIGINT):
if dataType.upper() == "BIGINT":
if self._description[col][1] == FieldType.C_BIGINT:
return True
if (dataType.upper() == "BIGINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
if dataType.upper() == "BIGINT UNSIGNED":
if self._description[col][1] == FieldType.C_BIGINT_UNSIGNED:
return True
if (dataType.upper() == "FLOAT"):
if (self._description[col][1] == FieldType.C_FLOAT):
if dataType.upper() == "FLOAT":
if self._description[col][1] == FieldType.C_FLOAT:
return True
if (dataType.upper() == "DOUBLE"):
if (self._description[col][1] == FieldType.C_DOUBLE):
if dataType.upper() == "DOUBLE":
if self._description[col][1] == FieldType.C_DOUBLE:
return True
if (dataType.upper() == "BINARY"):
if (self._description[col][1] == FieldType.C_BINARY):
if dataType.upper() == "BINARY":
if self._description[col][1] == FieldType.C_BINARY:
return True
if (dataType.upper() == "TIMESTAMP"):
if (self._description[col][1] == FieldType.C_TIMESTAMP):
if dataType.upper() == "TIMESTAMP":
if self._description[col][1] == FieldType.C_TIMESTAMP:
return True
if (dataType.upper() == "NCHAR"):
if (self._description[col][1] == FieldType.C_NCHAR):
if dataType.upper() == "NCHAR":
if self._description[col][1] == FieldType.C_NCHAR:
return True
return False
def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
"""
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation."""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchRow(
self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
block, num_of_fields = taos_fetch_row(self._result, self._fields)
errno = taos_errno(self._result)
if errno != 0:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
raise ProgrammingError(taos_errstr(self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
@ -230,19 +211,16 @@ class TDengineCursor(object):
return list(map(tuple, zip(*buffer)))
def fetchall(self):
if self._result is None or self._fields is None:
if self._result is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
fields = self._fields if self._fields is not None else taos_fetch_fields(self._result)
buffer = [[] for i in range(len(fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchBlock(
self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
block, num_of_fields = taos_fetch_block(self._result, self._fields)
errno = taos_errno(self._result)
if errno != 0:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
raise ProgrammingError(taos_errstr(self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
@ -250,9 +228,12 @@ class TDengineCursor(object):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def stop_query(self):
if self._result != None:
taos_stop_query(self._result)
def nextset(self):
"""
"""
""" """
pass
def setinputsize(self, sizes):
@ -262,12 +243,11 @@ class TDengineCursor(object):
pass
def _reset_result(self):
"""Reset the result to unused version.
"""
"""Reset the result to unused version."""
self._description = []
self._rowcount = -1
if self._result is not None:
CTaosInterface.freeResult(self._result)
taos_free_result(self._result)
self._result = None
self._fields = None
self._block = None
@ -276,11 +256,12 @@ class TDengineCursor(object):
self._affected_rows = 0
def _handle_result(self):
"""Handle the return result from query.
"""
"""Handle the return result from query."""
self._description = []
for ele in self._fields:
self._description.append(
(ele['name'], ele['type'], None, None, None, None, False))
self._description.append((ele["name"], ele["type"], None, None, None, None, False))
return self._result
def __del__(self):
self.close()

View File

@ -1,44 +0,0 @@
"""Type Objects and Constructors.
"""
import time
import datetime
class DBAPITypeObject(object):
def __init__(self, *values):
self.values = values
def __com__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DataFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = bytes
# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
# ROWID = DBAPITypeObject()

View File

@ -1,66 +1,86 @@
# encoding:UTF-8
"""Python exceptions
"""
class Error(Exception):
def __init__(self, msg=None, errno=None):
def __init__(self, msg=None, errno=0xffff):
self.msg = msg
self._full_msg = self.msg
self.errno = errno
self._full_msg = "[0x%04x]: %s" % (self.errno & 0xffff, self.msg)
def __str__(self):
return self._full_msg
class Warning(Exception):
"""Exception raised for important warnings like data truncations while inserting.
"""
"""Exception raised for important warnings like data truncations while inserting."""
pass
class InterfaceError(Error):
"""Exception raised for errors that are related to the database interface rather than the database itself.
"""
"""Exception raised for errors that are related to the database interface rather than the database itself."""
pass
class DatabaseError(Error):
"""Exception raised for errors that are related to the database.
"""
"""Exception raised for errors that are related to the database."""
pass
class ConnectionError(Error):
"""Exceptin raised for connection failed"""
pass
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
"""
"""Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range."""
pass
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
"""
"""Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer"""
pass
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database is affected.
"""
"""Exception raised when the relational integrity of the database is affected."""
pass
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal error.
"""
"""Exception raised when the database encounters an internal error."""
pass
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors.
"""
"""Exception raised for programming errors."""
pass
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used which is not supported by the database,.
"""
"""Exception raised in case a method or database API was used which is not supported by the database,."""
pass
class StatementError(DatabaseError):
"""Exception raised in STMT API."""
pass
class ResultError(DatabaseError):
"""Result related APIs."""
pass
class LinesError(DatabaseError):
"""taos_insert_lines errors."""
pass

View File

@ -0,0 +1,302 @@
# encoding:UTF-8
import ctypes
import math
import datetime
from ctypes import *
from .constants import FieldType
from .error import *
_datetime_epoch = datetime.datetime.fromtimestamp(0)
def _convert_millisecond_to_datetime(milli):
return _datetime_epoch + datetime.timedelta(seconds=milli / 1000.0)
def _convert_microsecond_to_datetime(micro):
return _datetime_epoch + datetime.timedelta(seconds=micro / 1000000.0)
def _convert_nanosecond_to_datetime(nanosec):
return nanosec
def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bool row to python row"""
_timestamp_converter = _convert_millisecond_to_datetime
if precision == FieldType.C_TIMESTAMP_MILLI:
_timestamp_converter = _convert_millisecond_to_datetime
elif precision == FieldType.C_TIMESTAMP_MICRO:
_timestamp_converter = _convert_microsecond_to_datetime
elif precision == FieldType.C_TIMESTAMP_NANO:
_timestamp_converter = _convert_nanosecond_to_datetime
else:
raise DatabaseError("Unknown precision returned from database")
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele)
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[: abs(num_of_rows)]
]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bool row to python row"""
return [
None if ele == FieldType.C_BOOL_NULL else bool(ele)
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[: abs(num_of_rows)]
]
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C tinyint row to python row"""
return [
None if ele == FieldType.C_TINYINT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[: abs(num_of_rows)]
]
def _crow_tinyint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C tinyint row to python row"""
return [
None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_ubyte))[: abs(num_of_rows)]
]
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C smallint row to python row"""
return [
None if ele == FieldType.C_SMALLINT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[: abs(num_of_rows)]
]
def _crow_smallint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C smallint row to python row"""
return [
None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_ushort))[: abs(num_of_rows)]
]
def _crow_int_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C int row to python row"""
return [
None if ele == FieldType.C_INT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[: abs(num_of_rows)]
]
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C int row to python row"""
return [
None if ele == FieldType.C_INT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_uint))[: abs(num_of_rows)]
]
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bigint row to python row"""
return [
None if ele == FieldType.C_BIGINT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[: abs(num_of_rows)]
]
def _crow_bigint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bigint row to python row"""
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_uint64))[: abs(num_of_rows)]
]
def _crow_float_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C float row to python row"""
return [
None if math.isnan(ele) else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[: abs(num_of_rows)]
]
def _crow_double_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C double row to python row"""
return [
None if math.isnan(ele) else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[: abs(num_of_rows)]
]
def _crow_binary_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C binary row to python row"""
assert nbytes is not None
return [
None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode("utf-8")
for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[: abs(num_of_rows)]
]
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C nchar row to python row"""
assert nbytes is not None
res = []
for i in range(abs(num_of_rows)):
try:
if num_of_rows >= 0:
tmpstr = ctypes.c_char_p(data)
res.append(tmpstr.value.decode())
else:
res.append(
(
ctypes.cast(
data + nbytes * i,
ctypes.POINTER(ctypes.c_wchar * (nbytes // 4)),
)
)[0].value
)
except ValueError:
res.append(None)
return res
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C binary row to python row"""
assert nbytes is not None
res = []
for i in range(abs(num_of_rows)):
try:
rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop()
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
res.append(tmpstr.value.decode()[0:rbyte])
except ValueError:
res.append(None)
return res
def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C nchar row to python row"""
assert nbytes is not None
res = []
for i in range(abs(num_of_rows)):
try:
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
res.append(tmpstr.value.decode())
except ValueError:
res.append(None)
return res
CONVERT_FUNC = {
FieldType.C_BOOL: _crow_bool_to_python,
FieldType.C_TINYINT: _crow_tinyint_to_python,
FieldType.C_SMALLINT: _crow_smallint_to_python,
FieldType.C_INT: _crow_int_to_python,
FieldType.C_BIGINT: _crow_bigint_to_python,
FieldType.C_FLOAT: _crow_float_to_python,
FieldType.C_DOUBLE: _crow_double_to_python,
FieldType.C_BINARY: _crow_binary_to_python,
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
FieldType.C_NCHAR: _crow_nchar_to_python,
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python,
}
CONVERT_FUNC_BLOCK = {
FieldType.C_BOOL: _crow_bool_to_python,
FieldType.C_TINYINT: _crow_tinyint_to_python,
FieldType.C_SMALLINT: _crow_smallint_to_python,
FieldType.C_INT: _crow_int_to_python,
FieldType.C_BIGINT: _crow_bigint_to_python,
FieldType.C_FLOAT: _crow_float_to_python,
FieldType.C_DOUBLE: _crow_double_to_python,
FieldType.C_BINARY: _crow_binary_to_python_block,
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
FieldType.C_NCHAR: _crow_nchar_to_python_block,
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python,
}
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
_fields_ = [
("_name", ctypes.c_char * 65),
("_type", ctypes.c_uint8),
("_bytes", ctypes.c_uint16),
]
@property
def name(self):
return self._name.decode("utf-8")
@property
def length(self):
"""alias to self.bytes"""
return self._bytes
@property
def bytes(self):
return self._bytes
@property
def type(self):
return self._type
def __dict__(self):
return {"name": self.name, "type": self.type, "bytes": self.length}
def __str__(self):
return "{name: %s, type: %d, bytes: %d}" % (self.name, self.type, self.length)
def __getitem__(self, item):
return getattr(self, item)
class TaosFields(object):
def __init__(self, fields, count):
if isinstance(fields, c_void_p):
self._fields = cast(fields, POINTER(TaosField))
if isinstance(fields, POINTER(TaosField)):
self._fields = fields
self._count = count
self._iter = 0
def as_ptr(self):
return self._fields
@property
def count(self):
return self._count
@property
def fields(self):
return self._fields
def __next__(self):
return self._next_field()
def next(self):
return self._next_field()
def _next_field(self):
if self._iter < self.count:
field = self._fields[self._iter]
self._iter += 1
return field
else:
raise StopIteration
def __getitem__(self, item):
return self._fields[item]
def __iter__(self):
return self
def __len__(self):
return self.count

View File

@ -0,0 +1,12 @@
class PrecisionEnum(object):
"""Precision enums"""
Milliseconds = 0
Microseconds = 1
Nanoseconds = 2
class PrecisionError(Exception):
"""Python datetime does not support nanoseconds error"""
pass

View File

@ -0,0 +1,245 @@
from .cinterface import *
# from .connection import TaosConnection
from .error import *
class TaosResult(object):
"""TDengine result interface"""
def __init__(self, result, close_after=False, conn=None):
# type: (c_void_p, bool, TaosConnection) -> TaosResult
# to make the __del__ order right
self._conn = conn
self._close_after = close_after
self._result = result
self._fields = None
self._field_count = None
self._precision = None
self._block = None
self._block_length = None
self._row_count = 0
def __iter__(self):
return self
def __next__(self):
return self._next_row()
def next(self):
# fetch next row
return self._next_row()
def _next_row(self):
if self._result is None or self.fields is None:
raise OperationalError("Invalid use of fetch iterator")
if self._block == None or self._block_iter >= self._block_length:
self._block, self._block_length = self.fetch_block()
self._block_iter = 0
# self._row_count += self._block_length
raw = self._block[self._block_iter]
self._block_iter += 1
return raw
@property
def fields(self):
"""fields definitions of the current result"""
if self._result is None:
raise ResultError("no result object setted")
if self._fields == None:
self._fields = taos_fetch_fields(self._result)
return self._fields
@property
def field_count(self):
"""Field count of the current result, eq to taos_field_count(result)"""
return self.fields.count
@property
def row_count(self):
"""Return the rowcount of the object"""
return self._row_count
@property
def precision(self):
if self._precision == None:
self._precision = taos_result_precision(self._result)
return self._precision
@property
def affected_rows(self):
return taos_affected_rows(self._result)
# @property
def field_lengths(self):
return taos_fetch_lengths(self._result, self.field_count)
def rows_iter(self, num_of_rows=None):
return TaosRows(self, num_of_rows)
def blocks_iter(self):
return TaosBlocks(self)
def fetch_block(self):
if self._result is None:
raise OperationalError("Invalid use of fetch iterator")
block, length = taos_fetch_block_raw(self._result)
if length == 0:
raise StopIteration
precision = self.precision
field_count = self.field_count
fields = self.fields
blocks = [None] * field_count
lengths = self.field_lengths()
for i in range(field_count):
data = ctypes.cast(block, ctypes.POINTER(ctypes.c_void_p))[i]
if fields[i].type not in CONVERT_FUNC_BLOCK:
raise DatabaseError("Invalid data type returned from database")
blocks[i] = CONVERT_FUNC_BLOCK[fields[i].type](data, length, lengths[i], precision)
return list(map(tuple, zip(*blocks))), length
def fetch_all(self):
if self._result is None:
raise OperationalError("Invalid use of fetchall")
if self._fields == None:
self._fields = taos_fetch_fields(self._result)
buffer = [[] for i in range(len(self._fields))]
self._row_count = 0
while True:
block, num_of_fields = taos_fetch_block(self._result, self._fields)
errno = taos_errno(self._result)
if errno != 0:
raise ProgrammingError(taos_errstr(self._result), errno)
if num_of_fields == 0:
break
self._row_count += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def fetch_rows_a(self, callback, param):
taos_fetch_rows_a(self._result, callback, param)
def stop_query(self):
return taos_stop_query(self._result)
def errno(self):
"""**DO NOT** use this directly unless you know what you are doing"""
return taos_errno(self._result)
def errstr(self):
return taos_errstr(self._result)
def check_error(self, errno=None, close=True):
if errno == None:
errno = self.errno()
if errno != 0:
msg = self.errstr()
self.close()
raise OperationalError(msg, errno)
def close(self):
"""free result object."""
if self._result != None and self._close_after:
taos_free_result(self._result)
self._result = None
self._fields = None
self._field_count = None
self._field_lengths = None
def __del__(self):
self.close()
class TaosRows:
"""TDengine result rows iterator"""
def __init__(self, result, num_of_rows=None):
self._result = result
self._num_of_rows = num_of_rows
def __iter__(self):
return self
def __next__(self):
return self._next_row()
def next(self):
return self._next_row()
def _next_row(self):
if self._result is None:
raise OperationalError("Invalid use of fetch iterator")
if self._num_of_rows != None and self._num_of_rows <= self._result._row_count:
raise StopIteration
row = taos_fetch_row_raw(self._result._result)
if not row:
raise StopIteration
self._result._row_count += 1
return TaosRow(self._result, row)
@property
def row_count(self):
"""Return the rowcount of the object"""
return self._result._row_count
class TaosRow:
def __init__(self, result, row):
self._result = result
self._row = row
def __str__(self):
return taos_print_row(self._row, self._result.fields, self._result.field_count)
def __call__(self):
return self.as_tuple()
def _astuple(self):
return self.as_tuple()
def __iter__(self):
return self.as_tuple()
def as_ptr(self):
return self._row
def as_tuple(self):
precision = self._result.precision
field_count = self._result.field_count
blocks = [None] * field_count
fields = self._result.fields
field_lens = self._result.field_lengths()
for i in range(field_count):
data = ctypes.cast(self._row, ctypes.POINTER(ctypes.c_void_p))[i]
if fields[i].type not in CONVERT_FUNC:
raise DatabaseError("Invalid data type returned from database")
if data is None:
blocks[i] = None
else:
blocks[i] = CONVERT_FUNC[fields[i].type](data, 1, field_lens[i], precision)[0]
return tuple(blocks)
class TaosBlocks:
"""TDengine result blocks iterator"""
def __init__(self, result):
self._result = result
def __iter__(self):
return self
def __next__(self):
return self._result.fetch_block()
def next(self):
return self._result.fetch_block()

View File

@ -0,0 +1,85 @@
from taos.cinterface import *
from taos.error import *
from taos.result import *
class TaosStmt(object):
"""TDengine STMT interface"""
def __init__(self, stmt, conn = None):
self._conn = conn
self._stmt = stmt
def set_tbname(self, name):
"""Set table name if needed.
Note that the set_tbname* method should only used in insert statement
"""
if self._stmt is None:
raise StatementError("Invalid use of set_tbname")
taos_stmt_set_tbname(self._stmt, name)
def prepare(self, sql):
# type: (str) -> None
taos_stmt_prepare(self._stmt, sql)
def set_tbname_tags(self, name, tags):
# type: (str, Array[TaosBind]) -> None
"""Set table name with tags, tags is array of BindParams"""
if self._stmt is None:
raise StatementError("Invalid use of set_tbname")
taos_stmt_set_tbname_tags(self._stmt, name, tags)
def bind_param(self, params, add_batch=True):
# type: (Array[TaosBind], bool) -> None
if self._stmt is None:
raise StatementError("Invalid use of stmt")
taos_stmt_bind_param(self._stmt, params)
if add_batch:
taos_stmt_add_batch(self._stmt)
def bind_param_batch(self, binds, add_batch=True):
# type: (Array[TaosMultiBind], bool) -> None
if self._stmt is None:
raise StatementError("Invalid use of stmt")
taos_stmt_bind_param_batch(self._stmt, binds)
if add_batch:
taos_stmt_add_batch(self._stmt)
def add_batch(self):
if self._stmt is None:
raise StatementError("Invalid use of stmt")
taos_stmt_add_batch(self._stmt)
def execute(self):
if self._stmt is None:
raise StatementError("Invalid use of execute")
taos_stmt_execute(self._stmt)
def use_result(self):
result = taos_stmt_use_result(self._stmt)
return TaosResult(result)
def close(self):
"""Close stmt."""
if self._stmt is None:
return
taos_stmt_close(self._stmt)
self._stmt = None
def __del__(self):
self.close()
if __name__ == "__main__":
from taos.connection import TaosConnection
conn = TaosConnection()
stmt = conn.statement("select * from log.log limit 10")
stmt.execute()
result = stmt.use_result()
for row in result:
print(row)
stmt.close()
conn.close()

View File

@ -0,0 +1,22 @@
from taos.cinterface import *
from taos.error import *
from taos.result import *
class TaosStream(object):
"""TDengine Stream interface"""
def __init__(self, stream):
self._raw = stream
def as_ptr(self):
return self._raw
def close(self):
"""Close stmt."""
if self._raw is not None:
taos_close_stream(self._raw)
self._raw = None
def __del__(self):
self.close()

View File

@ -1,49 +1,41 @@
from .cinterface import CTaosInterface
from taos.result import TaosResult
from .cinterface import *
from .error import *
class TDengineSubscription(object):
"""TDengine subscription object
"""
class TaosSubscription(object):
"""TDengine subscription object"""
def __init__(self, sub):
def __init__(self, sub, with_callback = False):
self._sub = sub
self._with_callback = with_callback
def consume(self):
"""Consume rows of a subscription
"""
"""Consume rows of a subscription"""
if self._sub is None:
raise OperationalError("Invalid use of consume")
result, fields = CTaosInterface.consume(self._sub)
buffer = [[] for i in range(len(fields))]
while True:
block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
if num_of_fields == 0:
break
for i in range(len(fields)):
buffer[i].extend(block[i])
self.fields = fields
return list(map(tuple, zip(*buffer)))
if self._with_callback:
raise OperationalError("DONOT use consume method in an subscription with callback")
result = taos_consume(self._sub)
return TaosResult(result)
def close(self, keepProgress=True):
"""Close the Subscription.
"""
"""Close the Subscription."""
if self._sub is None:
return False
CTaosInterface.unsubscribe(self._sub, keepProgress)
taos_unsubscribe(self._sub, keepProgress)
self._sub = None
return True
def __del__(self):
self.close()
if __name__ == '__main__':
from .connection import TDengineConnection
conn = TDengineConnection(
host="127.0.0.1",
user="root",
password="taosdata",
database="test")
if __name__ == "__main__":
from .connection import TaosConnection
conn = TaosConnection(host="127.0.0.1", user="root", password="taosdata", database="test")
# Generate a cursor object to run SQL commands
sub = conn.subscribe(True, "test", "select * from meters;", 1000)

View File

@ -0,0 +1,17 @@
class TimestampType(object):
"""Choose which type that parsing TDengine timestamp data to
- DATETIME: use python datetime.datetime, note that it does not support nanosecond precision,
and python taos will use raw c_int64 as a fallback for nanosecond results.
- NUMPY: use numpy.datetime64 type.
- RAW: use raw c_int64.
- TAOS: use taos' TaosTimestamp.
"""
DATETIME = 0,
NUMPY = 1,
RAW = 2,
TAOS = 3,
class TaosTimestamp:
pass

View File

@ -0,0 +1,162 @@
from taos.cinterface import *
from taos.precision import *
from taos.bind import *
import time
import datetime
import pytest
@pytest.fixture
def conn():
return CTaosInterface().connect()
def test_simple(conn, caplog):
dbname = "pytest_ctaos_simple"
try:
res = taos_query(conn, "create database if not exists %s" % dbname)
taos_free_result(res)
taos_select_db(conn, dbname)
res = taos_query(
conn,
"create table if not exists log(ts timestamp, level tinyint, content binary(100), ipaddr binary(134))",
)
taos_free_result(res)
res = taos_query(conn, "insert into log values(now, 1, 'hello', 'test')")
taos_free_result(res)
res = taos_query(conn, "select level,content,ipaddr from log limit 1")
fields = taos_fetch_fields_raw(res)
field_count = taos_field_count(res)
fields = taos_fetch_fields(res)
for field in fields:
print(field)
# field_lengths = taos_fetch_lengths(res, field_count)
# if not field_lengths:
# raise "fetch lengths error"
row = taos_fetch_row_raw(res)
rowstr = taos_print_row(row, fields, field_count)
assert rowstr == "1 hello test"
row, num = taos_fetch_row(res, fields)
print(row)
taos_free_result(res)
taos_query(conn, "drop database if exists " + dbname)
taos_close(conn)
except Exception as err:
taos_query(conn, "drop database if exists " + dbname)
raise err
def test_stmt(conn, caplog):
dbname = "pytest_ctaos_stmt"
try:
res = taos_query(conn, "drop database if exists %s" % dbname)
taos_free_result(res)
res = taos_query(conn, "create database if not exists %s" % dbname)
taos_free_result(res)
taos_select_db(conn, dbname)
res = taos_query(
conn,
"create table if not exists log(ts timestamp, nil tinyint, ti tinyint, si smallint, ii int,\
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100))",
)
taos_free_result(res)
stmt = taos_stmt_init(conn)
taos_stmt_prepare(stmt, "insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(14)
params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds)
params[1].null()
params[2].tinyint(2)
params[3].smallint(3)
params[4].int(4)
params[5].bigint(5)
params[6].tinyint_unsigned(6)
params[7].smallint_unsigned(7)
params[8].int_unsigned(8)
params[9].bigint_unsigned(9)
params[10].float(10.1)
params[11].double(10.11)
params[12].binary("hello")
params[13].nchar("stmt")
taos_stmt_bind_param(stmt, params)
taos_stmt_add_batch(stmt)
taos_stmt_execute(stmt)
res = taos_query(conn, "select * from log limit 1")
fields = taos_fetch_fields(res)
filed_count = taos_field_count(res)
row = taos_fetch_row_raw(res)
rowstr = taos_print_row(row, fields, filed_count, 100)
taos_free_result(res)
taos_query(conn, "drop database if exists " + dbname)
taos_close(conn)
assert rowstr == "1626861392589 NULL 2 3 4 5 6 7 8 9 10.100000 10.110000 hello stmt"
except Exception as err:
taos_query(conn, "drop database if exists " + dbname)
raise err
def stream_callback(param, result, row):
# type: (c_void_p, c_void_p, c_void_p) -> None
try:
if result == None or row == None:
return
result = c_void_p(result)
row = c_void_p(row)
fields = taos_fetch_fields_raw(result)
num_fields = taos_field_count(result)
s = taos_print_row(row, fields, num_fields)
print(s)
taos_stop_query(result)
except Exception as err:
print(err)
def test_stream(conn, caplog):
dbname = "pytest_ctaos_stream"
try:
res = taos_query(conn, "create database if not exists %s" % dbname)
taos_free_result(res)
taos_select_db(conn, dbname)
res = taos_query(
conn,
"create table if not exists log(ts timestamp, n int)",
)
taos_free_result(res)
res = taos_query(conn, "select count(*) from log interval(5s)")
cc = taos_num_fields(res)
assert cc == 2
stream = taos_open_stream(conn, "select count(*) from log interval(5s)", stream_callback, 0, None, None)
print("waiting for data")
time.sleep(1)
for i in range(0, 2):
res = taos_query(conn, "insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
taos_free_result(res)
time.sleep(2)
taos_close_stream(stream)
taos_query(conn, "drop database if exists " + dbname)
taos_close(conn)
except Exception as err:
taos_query(conn, "drop database if exists " + dbname)
raise err

View File

@ -0,0 +1,23 @@
from taos.cinterface import *
from taos import *
import pytest
@pytest.fixture
def conn():
return connect()
def test_client_info():
print(taos_get_client_info())
None
def test_server_info(conn):
# type: (TaosConnection) -> None
print(conn.client_info)
print(conn.server_info)
None
if __name__ == "__main__":
test_client_info()
test_server_info(connect())

View File

@ -0,0 +1,57 @@
from taos.error import OperationalError
from taos import connect, new_bind_params, PrecisionEnum
from taos import *
from ctypes import *
import taos
import pytest
@pytest.fixture
def conn():
# type: () -> taos.TaosConnection
return connect()
def test_insert_lines(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_insert_lines"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns',
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
print("inserted")
lines = [
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
print("inserted")
result = conn.query("select * from st")
print(*result.fields)
all = result.rows_iter()
for row in all:
print(row)
result.close()
print(result.row_count)
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err
if __name__ == "__main__":
test_insert_lines(connect())

View File

@ -0,0 +1,43 @@
from datetime import datetime
import taos
import pytest
@pytest.fixture
def conn():
return taos.connect()
def test_query(conn):
"""This test will use fetch_block for rows fetching, significantly faster than rows_iter"""
result = conn.query("select * from log.log limit 10000")
fields = result.fields
for field in fields:
print("field: %s" % field)
start = datetime.now()
for row in result:
# print(row)
None
end = datetime.now()
elapsed = end - start
print("elapsed time: ", elapsed)
result.close()
conn.close()
def test_query_row_iter(conn):
"""This test will use fetch_row for each row fetching, this is the only way in async callback"""
result = conn.query("select * from log.log limit 10000")
fields = result.fields
for field in fields:
print("field: %s" % field)
start = datetime.now()
for row in result.rows_iter():
# print(row)
None
end = datetime.now()
elapsed = end - start
print("elapsed time: ", elapsed)
result.close()
conn.close()
if __name__ == "__main__":
test_query(taos.connect(database = "log"))
test_query_row_iter(taos.connect(database = "log"))

View File

@ -0,0 +1,66 @@
from taos import *
from ctypes import *
import taos
import pytest
import time
@pytest.fixture
def conn():
return taos.connect()
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
conn.close()
if __name__ == "__main__":
test_query(taos.connect())

View File

@ -0,0 +1,149 @@
from taos import *
from ctypes import *
from datetime import datetime
import taos
import pytest
@pytest.fixture
def conn():
# type: () -> taos.TaosConnection
return connect()
def test_stmt_insert(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stmt"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
conn.load_table_info("log")
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 1
result.close()
stmt.close()
stmt = conn.statement("select * from log")
stmt.execute()
result = stmt.use_result()
row = result.next()
print(row)
assert row[2] == None
for i in range(3, 11):
assert row[i] == i - 1
#float == may not work as expected
# assert row[10] == c_float(10.1)
assert row[12] == 10.11
assert row[13] == "hello"
assert row[14] == "stmt"
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
raise err
def test_stmt_insert_multi(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stmt_multi"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
conn.load_table_info("log")
start = datetime.now()
stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
end = datetime.now()
print("elapsed time: ", end - start)
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
stmt.close()
stmt = conn.statement("select * from log")
stmt.execute()
result = stmt.use_result()
for row in result:
print(row)
result.close()
stmt.close()
# start = datetime.now()
# conn.query("insert into log values(1626861392660, true, NULL, 0, 3,3,3,3,3,3,3,3.0,3.0, 'abc','涛思数据',NULL)(1626861392661, true, NULL, 0, 3,3,3,3,3,3,3,3.0,3.0, 'abc','涛思数据',NULL)(1626861392662, true, NULL, 0, 3,3,3,3,3,3,3,3.0,3.0, 'abc','涛思数据',NULL)")
# end = datetime.now()
# print("elapsed time: ", end - start)
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stmt_insert(connect())
test_stmt_insert_multi(connect())

View File

@ -0,0 +1,70 @@
from taos.cinterface import *
from taos.precision import *
from taos.bind import *
from taos import *
from ctypes import *
import time
import pytest
@pytest.fixture
def conn():
return connect()
def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None
if p_result == None or p_row == None:
return
result = TaosResult(p_result)
row = TaosRow(result, p_row)
try:
ts, count = row()
p = cast(p_param, POINTER(Counter))
p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err:
print(err)
raise err
class Counter(ctypes.Structure):
_fields_ = [
("count", c_int),
]
def __str__(self):
return "%d" % self.count
def test_stream(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stream"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2
counter = Counter()
counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20):
conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2)
stream.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stream(connect())

View File

@ -0,0 +1,100 @@
from taos.subscription import TaosSubscription
from taos import *
from ctypes import *
import taos
import pytest
import time
from random import random
@pytest.fixture
def conn():
return taos.connect()
def test_subscribe(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.execute("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
print("# consume with a stop condition")
for i in range(10):
conn.execute("insert into log values(now, %d)" % int(random() * 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
raise err
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.execute("insert into log values(now, %d)" % i)
time.sleep(0.7)
sub.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_subscribe(taos.connect())
test_subscribe_callback(taos.connect())

View File

@ -224,6 +224,7 @@ do { \
#define TSDB_IPv4ADDR_LEN 16
#define TSDB_FILENAME_LEN 128
#define TSDB_SHOW_SQL_LEN 512
#define TSDB_SHOW_SUBQUERY_LEN 1000
#define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_STEP_NAME_LEN 32

View File

@ -878,7 +878,9 @@ typedef struct {
uint64_t sqlObjId;
int32_t pid;
char fqdn[TSDB_FQDN_LEN];
bool stableQuery;
int32_t numOfSub;
char subSqlInfo[TSDB_SHOW_SUBQUERY_LEN]; //include subqueries' index, Obj IDs and states(C-complete/I-imcomplete)
} SQueryDesc;
typedef struct {

View File

@ -94,7 +94,7 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH);
int tsdbCloseRepo(STsdbRepo *repo, int toCommit);
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg);
int tsdbGetState(STsdbRepo *repo);
bool tsdbInCompact(STsdbRepo *repo);
int8_t tsdbGetCompactState(STsdbRepo *repo);
// --------- TSDB TABLE DEFINITION
typedef struct {
uint64_t uid; // the unique table ID
@ -240,6 +240,7 @@ typedef struct {
int32_t minRows;
int32_t firstSeekTimeUs;
uint32_t numOfRowsInMemTable;
uint32_t numOfSmallBlocks;
SArray *dataBlockInfos;
} STableBlockDist;

View File

@ -76,7 +76,7 @@ extern char configDir[];
#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_PASSWORD_SIZE 16
#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
@ -215,7 +215,7 @@ typedef struct SArguments_S {
uint16_t port;
uint16_t iface;
char * user;
char * password;
char password[MAX_PASSWORD_SIZE];
char * database;
int replica;
char * tb_prefix;
@ -710,24 +710,24 @@ static void printHelp() {
printf("%s%s%s%s\n", indent, "-u", indent,
"The TDengine user name to use when connecting to the server. Default is 'root'.");
#ifdef _TD_POWER_
printf("%s%s%s%s\n", indent, "-P", indent,
printf("%s%s%s%s\n", indent, "-p", indent,
"The password to use when connecting to the server. Default is 'powerdb'.");
printf("%s%s%s%s\n", indent, "-c", indent,
"Configuration directory. Default is '/etc/power/'.");
#elif (_TD_TQ_ == true)
printf("%s%s%s%s\n", indent, "-P", indent,
printf("%s%s%s%s\n", indent, "-p", indent,
"The password to use when connecting to the server. Default is 'tqueue'.");
printf("%s%s%s%s\n", indent, "-c", indent,
"Configuration directory. Default is '/etc/tq/'.");
#else
printf("%s%s%s%s\n", indent, "-P", indent,
printf("%s%s%s%s\n", indent, "-p", indent,
"The password to use when connecting to the server. Default is 'taosdata'.");
printf("%s%s%s%s\n", indent, "-c", indent,
"Configuration directory. Default is '/etc/taos/'.");
#endif
printf("%s%s%s%s\n", indent, "-h", indent,
"The host to connect to TDengine. Default is localhost.");
printf("%s%s%s%s\n", indent, "-p", indent,
printf("%s%s%s%s\n", indent, "-P", indent,
"The TCP/IP port number to use for the connection. Default is 0.");
printf("%s%s%s%s\n", indent, "-I", indent,
#if STMT_IFACE_ENABLED == 1
@ -827,11 +827,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->host = argv[++i];
} else if (strcmp(argv[i], "-p") == 0) {
} else if (strcmp(argv[i], "-P") == 0) {
if ((argc == i+1) ||
(!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-p need a number following!\n");
errorPrint("%s", "\n\t-P need a number following!\n");
exit(EXIT_FAILURE);
}
arguments->port = atoi(argv[++i]);
@ -861,13 +861,13 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->user = argv[++i];
} else if (strcmp(argv[i], "-P") == 0) {
if (argc == i+1) {
printHelp();
errorPrint("%s", "\n\t-P need a valid string following!\n");
exit(EXIT_FAILURE);
} else if (strncmp(argv[i], "-p", 2) == 0) {
if (strlen(argv[i]) == 2) {
printf("Enter password:");
scanf("%s", arguments->password);
} else {
tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE);
}
arguments->password = argv[++i];
} else if (strcmp(argv[i], "-o") == 0) {
if (argc == i+1) {
printHelp();
@ -1065,7 +1065,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->debug_print = true;
} else if (strcmp(argv[i], "-gg") == 0) {
arguments->verbose_print = true;
} else if (strcmp(argv[i], "-pp") == 0) {
} else if (strcmp(argv[i], "-PP") == 0) {
arguments->performance_print = true;
} else if (strcmp(argv[i], "-O") == 0) {
if ((argc == i+1) ||
@ -2318,15 +2318,15 @@ static void printfDbInfoForQueryToFile(
}
static void printfQuerySystemInfo(TAOS * taos) {
char filename[BUFFER_SIZE+1] = {0};
char buffer[BUFFER_SIZE+1] = {0};
char filename[MAX_FILE_NAME_LEN] = {0};
char buffer[1024] = {0};
TAOS_RES* res;
time_t t;
struct tm* lt;
time(&t);
lt = localtime(&t);
snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d",
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
lt->tm_sec);
@ -2358,12 +2358,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
@ -2712,7 +2712,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char command[1024] = "\0";
char limitBuf[100] = "\0";
TAOS_RES * res;
@ -2726,7 +2726,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
}
//get all child table name use cmd: select tbname from superTblName;
snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s",
snprintf(command, 1024, "select tbname from %s.%s %s",
dbName, sTblName, limitBuf);
res = taos_query(taos, command);
@ -2804,13 +2804,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
static int getSuperTableFromServer(TAOS * taos, char* dbName,
SSuperTable* superTbls) {
char command[BUFFER_SIZE] = "\0";
char command[1024] = "\0";
TAOS_RES * res;
TAOS_ROW row = NULL;
int count = 0;
//get schema use cmd: describe superTblName;
snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName);
snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
if (code != 0) {
@ -2890,7 +2890,8 @@ static int createSuperTable(
TAOS * taos, char* dbName,
SSuperTable* superTbl) {
char command[BUFFER_SIZE] = "\0";
char *command = calloc(1, BUFFER_SIZE);
assert(command);
char cols[COL_BUFFER_LEN] = "\0";
int colIndex;
@ -2901,6 +2902,7 @@ static int createSuperTable(
if (superTbl->columnCount == 0) {
errorPrint("%s() LN%d, super table column count is %d\n",
__func__, __LINE__, superTbl->columnCount);
free(command);
return -1;
}
@ -2963,6 +2965,7 @@ static int createSuperTable(
taos_close(taos);
errorPrint("%s() LN%d, config error data type : %s\n",
__func__, __LINE__, dataType);
free(command);
exit(-1);
}
}
@ -2975,6 +2978,7 @@ static int createSuperTable(
errorPrint("%s() LN%d, Failed when calloc, size:%d",
__func__, __LINE__, len+1);
taos_close(taos);
free(command);
exit(-1);
}
@ -2985,6 +2989,7 @@ static int createSuperTable(
if (superTbl->tagCount == 0) {
errorPrint("%s() LN%d, super table tag count is %d\n",
__func__, __LINE__, superTbl->tagCount);
free(command);
return -1;
}
@ -3050,6 +3055,7 @@ static int createSuperTable(
taos_close(taos);
errorPrint("%s() LN%d, config error tag type : %s\n",
__func__, __LINE__, dataType);
free(command);
exit(-1);
}
}
@ -3065,13 +3071,16 @@ static int createSuperTable(
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
errorPrint( "create supertable %s failed!\n\n",
superTbl->sTblName);
free(command);
return -1;
}
debugPrint("create supertable %s success!\n\n", superTbl->sTblName);
free(command);
return 0;
}
static int createDatabasesAndStables() {
int createDatabasesAndStables(char *command) {
TAOS * taos = NULL;
int ret = 0;
taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
@ -3079,8 +3088,7 @@ static int createDatabasesAndStables() {
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
return -1;
}
char command[BUFFER_SIZE] = "\0";
for (int i = 0; i < g_Dbs.dbCount; i++) {
if (g_Dbs.db[i].drop) {
sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
@ -5093,7 +5101,9 @@ static int getRowDataFromSample(
static int64_t generateStbRowData(
SSuperTable* stbInfo,
char* recBuf, int64_t timestamp)
char* recBuf,
int64_t remainderBufLen,
int64_t timestamp)
{
int64_t dataLen = 0;
char *pstr = recBuf;
@ -5121,6 +5131,7 @@ static int64_t generateStbRowData(
rand_string(buf, stbInfo->columns[i].dataLen);
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
tmfree(buf);
} else {
char *tmp;
@ -5177,6 +5188,9 @@ static int64_t generateStbRowData(
tstrncpy(pstr + dataLen, ",", 2);
dataLen += 1;
}
if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1)))
return 0;
}
dataLen -= 1;
@ -5383,7 +5397,7 @@ static int32_t generateDataTailWithoutStb(
int32_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
char *data = pstr;
memset(data, 0, MAX_DATA_SIZE);
int64_t retLen = 0;
@ -5407,7 +5421,7 @@ static int32_t generateDataTailWithoutStb(
if (len > remainderBufLen)
break;
pstr += sprintf(pstr, "%s", data);
pstr += retLen;
k++;
len += retLen;
remainderBufLen -= retLen;
@ -5463,14 +5477,14 @@ static int32_t generateStbDataTail(
int32_t k;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
char *data = pstr;
int64_t lenOfRow = 0;
if (tsRand) {
if (superTblInfo->disorderRatio > 0) {
lenOfRow = generateStbRowData(superTblInfo, data,
remainderBufLen,
startTime + getTSRandTail(
superTblInfo->timeStampStep, k,
superTblInfo->disorderRatio,
@ -5478,6 +5492,7 @@ static int32_t generateStbDataTail(
);
} else {
lenOfRow = generateStbRowData(superTblInfo, data,
remainderBufLen,
startTime + superTblInfo->timeStampStep * k
);
}
@ -5490,11 +5505,15 @@ static int32_t generateStbDataTail(
pSamplePos);
}
if (lenOfRow == 0) {
data[0] = '\0';
break;
}
if ((lenOfRow + 1) > remainderBufLen) {
break;
}
pstr += snprintf(pstr , lenOfRow + 1, "%s", data);
pstr += lenOfRow;
k++;
len += lenOfRow;
remainderBufLen -= lenOfRow;
@ -5970,7 +5989,7 @@ static int32_t prepareStbStmtBind(
int64_t startTime, int32_t recSeq,
bool isColumn)
{
char *bindBuffer = calloc(1, g_args.len_of_binary);
char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
if (bindBuffer == NULL) {
errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, g_args.len_of_binary);
@ -6246,7 +6265,7 @@ static int32_t generateStbProgressiveData(
assert(buffer != NULL);
char *pstr = buffer;
memset(buffer, 0, *pRemainderBufLen);
memset(pstr, 0, *pRemainderBufLen);
int64_t headLen = generateStbSQLHead(
superTblInfo,
@ -6305,8 +6324,8 @@ static void printStatPerThread(threadInfo *pThreadInfo)
pThreadInfo->threadID,
pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows,
(pThreadInfo->totalDelay/1000.0)?
(double)(pThreadInfo->totalAffectedRows/(pThreadInfo->totalDelay/1000.0)):
(pThreadInfo->totalDelay)?
(double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)):
FLT_MAX);
}
@ -6526,7 +6545,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer);
startTs = taosGetTimestampMs();
startTs = taosGetTimestampUs();
if (recOfBatch == 0) {
errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
@ -6542,10 +6561,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
endTs = taosGetTimestampMs();
endTs = taosGetTimestampUs();
uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n",
__func__, __LINE__, delay);
performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
__func__, __LINE__, delay / 1000.0);
verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
pThreadInfo->threadID,
__func__, __LINE__, affectedRows);
@ -6640,7 +6659,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
return NULL;
}
int64_t remainderBufLen = maxSqlLen;
int64_t remainderBufLen = maxSqlLen - 2000;
char *pstr = pThreadInfo->buffer;
int len = snprintf(pstr,
@ -6702,14 +6721,14 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
startTs = taosGetTimestampMs();
startTs = taosGetTimestampUs();
int32_t affectedRows = execInsert(pThreadInfo, generated);
endTs = taosGetTimestampMs();
endTs = taosGetTimestampUs();
uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
__func__, __LINE__, delay);
performancePrint("%s() LN%d, insert execution time is %10.f ms\n",
__func__, __LINE__, delay/1000.0);
verbosePrint("[%d] %s() LN%d affectedRows=%d\n",
pThreadInfo->threadID,
__func__, __LINE__, affectedRows);
@ -6822,10 +6841,14 @@ static void callBack(void *param, TAOS_RES *res, int code) {
&& rand_num < pThreadInfo->superTblInfo->disorderRatio) {
int64_t d = pThreadInfo->lastTs
- (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
generateStbRowData(pThreadInfo->superTblInfo, data, d);
generateStbRowData(pThreadInfo->superTblInfo, data,
MAX_DATA_SIZE,
d);
} else {
generateStbRowData(pThreadInfo->superTblInfo,
data, pThreadInfo->lastTs += 1000);
data,
MAX_DATA_SIZE,
pThreadInfo->lastTs += 1000);
}
pstr += sprintf(pstr, "%s", data);
pThreadInfo->counter++;
@ -7050,6 +7073,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->time_precision = timePrec;
pThreadInfo->superTblInfo = superTblInfo;
@ -7095,7 +7119,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit(-1);
}
char buffer[BUFFER_SIZE];
char *buffer = calloc(1, BUFFER_SIZE);
assert(buffer);
char *pstr = buffer;
if ((superTblInfo)
@ -7124,8 +7149,11 @@ static void startMultiThreadInsertData(int threads, char* db_name,
ret, taos_stmt_errstr(pThreadInfo->stmt));
free(pids);
free(infos);
free(buffer);
exit(-1);
}
free(buffer);
}
#endif
} else {
@ -7238,11 +7266,15 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
fprintf(stderr, "insert delay, avg: %10.2fms, max: %10.2fms, min: %10.2fms\n\n",
(double)avgDelay/1000.0,
(double)maxDelay/1000.0,
(double)minDelay/1000.0);
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %10.2fms, min: %10.2fms\n\n",
(double)avgDelay/1000.0,
(double)maxDelay/1000.0,
(double)minDelay/1000.0);
}
//taos_close(taos);
@ -7256,12 +7288,15 @@ static void *readTable(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
setThreadName("readTable");
char command[BUFFER_SIZE] = "\0";
char *command = calloc(1, BUFFER_SIZE);
assert(command);
uint64_t sTime = pThreadInfo->start_time;
char *tb_prefix = pThreadInfo->tb_prefix;
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
free(command);
return NULL;
}
@ -7300,6 +7335,7 @@ static void *readTable(void *sarg) {
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
free(command);
return NULL;
}
@ -7320,6 +7356,7 @@ static void *readTable(void *sarg) {
}
fprintf(fp, "\n");
fclose(fp);
free(command);
#endif
return NULL;
}
@ -7329,10 +7366,13 @@ static void *readMetric(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
setThreadName("readMetric");
char command[BUFFER_SIZE] = "\0";
char *command = calloc(1, BUFFER_SIZE);
assert(command);
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
free(command);
return NULL;
}
@ -7377,6 +7417,7 @@ static void *readMetric(void *sarg) {
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
free(command);
return NULL;
}
int count = 0;
@ -7394,6 +7435,7 @@ static void *readMetric(void *sarg) {
fprintf(fp, "\n");
}
fclose(fp);
free(command);
#endif
return NULL;
}
@ -7430,11 +7472,16 @@ static int insertTestProcess() {
init_rand_data();
// create database and super tables
if(createDatabasesAndStables() != 0) {
char *cmdBuffer = calloc(1, BUFFER_SIZE);
assert(cmdBuffer);
if(createDatabasesAndStables(cmdBuffer) != 0) {
if (g_fpOfInsertResult)
fclose(g_fpOfInsertResult);
free(cmdBuffer);
return -1;
}
free(cmdBuffer);
// pretreatement
if (prepareSampleData() != 0) {
@ -7603,7 +7650,9 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
}
static void *superTableQuery(void *sarg) {
char sqlstr[BUFFER_SIZE];
char *sqlstr = calloc(1, BUFFER_SIZE);
assert(sqlstr);
threadInfo *pThreadInfo = (threadInfo *)sarg;
setThreadName("superTableQuery");
@ -7618,6 +7667,7 @@ static void *superTableQuery(void *sarg) {
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
free(sqlstr);
return NULL;
} else {
pThreadInfo->taos = taos;
@ -7642,7 +7692,7 @@ static void *superTableQuery(void *sarg) {
st = taosGetTimestampMs();
for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
memset(sqlstr,0,sizeof(sqlstr));
memset(sqlstr, 0, BUFFER_SIZE);
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
if (g_queryInfo.superQueryInfo.result[j][0] != '\0') {
sprintf(pThreadInfo->filePath, "%s-%d",
@ -7673,6 +7723,7 @@ static void *superTableQuery(void *sarg) {
(double)(et - st)/1000.0);
}
free(sqlstr);
return NULL;
}
@ -7906,7 +7957,9 @@ static TAOS_SUB* subscribeImpl(
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[BUFFER_SIZE];
char *subSqlStr = calloc(1, BUFFER_SIZE);
assert(subSqlStr);
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;
@ -7915,6 +7968,7 @@ static void *superSubscribe(void *sarg) {
if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
pThreadInfo->ntables, MAX_QUERY_SQL_COUNT);
free(subSqlStr);
exit(-1);
}
@ -7927,6 +7981,7 @@ static void *superSubscribe(void *sarg) {
if (pThreadInfo->taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
free(subSqlStr);
return NULL;
}
}
@ -7937,6 +7992,7 @@ static void *superSubscribe(void *sarg) {
taos_close(pThreadInfo->taos);
errorPrint( "use database %s failed!\n\n",
g_queryInfo.dbName);
free(subSqlStr);
return NULL;
}
@ -7951,25 +8007,26 @@ static void *superSubscribe(void *sarg) {
pThreadInfo->end_table_to, i);
sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"",
i, pThreadInfo->querySeq);
memset(subSqlstr, 0, sizeof(subSqlstr));
memset(subSqlStr, 0, BUFFER_SIZE);
replaceChildTblName(
g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
subSqlstr, i);
subSqlStr, i);
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
}
verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n",
__func__, __LINE__, pThreadInfo->threadID, subSqlstr);
verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n",
__func__, __LINE__, pThreadInfo->threadID, subSqlStr);
tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS,
pThreadInfo, subSqlstr, topic,
pThreadInfo, subSqlStr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval);
if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos);
free(subSqlStr);
return NULL;
}
}
@ -8026,12 +8083,13 @@ static void *superSubscribe(void *sarg) {
consumed[tsubSeq]= 0;
tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS,
pThreadInfo, subSqlstr, topic,
pThreadInfo, subSqlStr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval
);
if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos);
free(subSqlStr);
return NULL;
}
}
@ -8051,6 +8109,7 @@ static void *superSubscribe(void *sarg) {
}
taos_close(pThreadInfo->taos);
free(subSqlStr);
return NULL;
}
@ -8353,9 +8412,7 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE);
}
if (g_args.password) {
tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE);
}
tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE);
if (g_args.port) {
g_Dbs.port = g_args.port;

View File

@ -231,8 +231,8 @@ static struct argp_option options[] = {
{"schemaonly", 's', 0, 0, "Only dump schema.", 2},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
{"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
#if TSDB_SUPPORT_NANOSECOND == 1
{"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6},
#else
@ -307,7 +307,7 @@ static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
FILE *fp, char* dbName);
static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
int numOfCols, FILE *fp, char* dbName);
static int32_t taosDumpTable(char *table, char *metric,
static int32_t taosDumpTable(char *tbName, char *metric,
FILE *fp, TAOS* taosCon, char* dbName);
static int taosDumpTableData(FILE *fp, char *tbName,
TAOS* taosCon, char* dbName,
@ -340,7 +340,7 @@ struct arguments g_args = {
false, // schemeonly
true, // with_property
false, // avro format
-INT64_MAX, // start_time
-INT64_MAX, // start_time
INT64_MAX, // end_time
"ms", // precision
1, // data_batch
@ -798,11 +798,11 @@ static int taosGetTableRecordInfo(
tstrncpy(pTableRecordInfo->tableRecord.name,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes + 1));
tstrncpy(pTableRecordInfo->tableRecord.metric,
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes + 1));
break;
}
@ -945,7 +945,7 @@ static int32_t taosSaveTableOfMetricToTempFile(
int32_t numOfThread = *totalNumOfThread;
int subFd = -1;
for (; numOfThread < maxThreads; numOfThread++) {
for (; numOfThread <= maxThreads; numOfThread++) {
memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
@ -1084,7 +1084,7 @@ _dump_db_point:
}
tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes) + 1);
min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
if (g_args.with_property) {
g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
@ -1093,7 +1093,7 @@ _dump_db_point:
g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes) + 1);
min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
@ -1107,7 +1107,7 @@ _dump_db_point:
g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes) + 1);
min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes + 1));
//g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@ -1237,7 +1237,7 @@ _exit_failure:
static int taosGetTableDes(
char* dbName, char *table,
STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
STableDef *stableDes, TAOS* taosCon, bool isSuperTable) {
TAOS_ROW row = NULL;
TAOS_RES* res = NULL;
int count = 0;
@ -1256,18 +1256,18 @@ static int taosGetTableDes(
TAOS_FIELD *fields = taos_fetch_fields(res);
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
tstrncpy(stableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
tstrncpy(tableDes->cols[count].field,
tstrncpy(stableDes->cols[count].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
min(TSDB_COL_NAME_LEN + 1,
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
tstrncpy(tableDes->cols[count].type,
tstrncpy(stableDes->cols[count].type,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
tableDes->cols[count].length =
stableDes->cols[count].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(tableDes->cols[count].note,
tstrncpy(stableDes->cols[count].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(COL_NOTE_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
@ -1284,11 +1284,11 @@ static int taosGetTableDes(
// if chidl-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
if (strcmp(stableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s",
tableDes->cols[i].field, dbName, table);
stableDes->cols[i].field, dbName, table);
res = taos_query(taosCon, sqlstr);
code = taos_errno(res);
@ -1310,7 +1310,7 @@ static int taosGetTableDes(
}
if (row[0] == NULL) {
sprintf(tableDes->cols[i].note, "%s", "NULL");
sprintf(stableDes->cols[i].note, "%s", "NULL");
taos_free_result(res);
res = NULL;
continue;
@ -1321,47 +1321,47 @@ static int taosGetTableDes(
//int32_t* length = taos_fetch_lengths(tmpResult);
switch (fields[0].type) {
case TSDB_DATA_TYPE_BOOL:
sprintf(tableDes->cols[i].note, "%d",
sprintf(stableDes->cols[i].note, "%d",
((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0));
break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0]));
sprintf(stableDes->cols[i].note, "%d", *((int8_t *)row[0]));
break;
case TSDB_DATA_TYPE_SMALLINT:
sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0]));
sprintf(stableDes->cols[i].note, "%d", *((int16_t *)row[0]));
break;
case TSDB_DATA_TYPE_INT:
sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0]));
sprintf(stableDes->cols[i].note, "%d", *((int32_t *)row[0]));
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0]));
sprintf(stableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0]));
break;
case TSDB_DATA_TYPE_FLOAT:
sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0]));
sprintf(stableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0]));
break;
case TSDB_DATA_TYPE_DOUBLE:
sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0]));
sprintf(stableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0]));
break;
case TSDB_DATA_TYPE_BINARY:
{
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
memset(stableDes->cols[i].note, 0, sizeof(stableDes->cols[i].note));
stableDes->cols[i].note[0] = '\'';
char tbuf[COL_NOTE_LEN];
converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
char* pstr = stpcpy(&(stableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR:
{
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
memset(stableDes->cols[i].note, 0, sizeof(stableDes->cols[i].note));
char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
sprintf(stableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
sprintf(stableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
#if 0
if (!g_args.mysqlFlag) {
sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
@ -1386,7 +1386,7 @@ static int taosGetTableDes(
return count;
}
static int convertSchemaToAvroSchema(STableDef *tableDes, char **avroSchema)
static int convertSchemaToAvroSchema(STableDef *stableDes, char **avroSchema)
{
errorPrint("%s() LN%d TODO: covert table schema to avro schema\n",
__func__, __LINE__);
@ -1394,7 +1394,7 @@ static int convertSchemaToAvroSchema(STableDef *tableDes, char **avroSchema)
}
static int32_t taosDumpTable(
char *table, char *metric,
char *tbName, char *metric,
FILE *fp, TAOS* taosCon, char* dbName) {
int count = 0;
@ -1415,7 +1415,7 @@ static int32_t taosDumpTable(
memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
*/
count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
count = taosGetTableDes(dbName, tbName, tableDes, taosCon, false);
if (count < 0) {
free(tableDes);
@ -1426,7 +1426,7 @@ static int32_t taosDumpTable(
taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName);
} else { // dump table definition
count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
count = taosGetTableDes(dbName, tbName, tableDes, taosCon, false);
if (count < 0) {
free(tableDes);
@ -1446,7 +1446,7 @@ static int32_t taosDumpTable(
int32_t ret = 0;
if (!g_args.schemaonly) {
ret = taosDumpTableData(fp, table, taosCon, dbName,
ret = taosDumpTableData(fp, tbName, taosCon, dbName,
jsonAvroSchema);
}
@ -1648,26 +1648,27 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
static int32_t taosDumpStable(char *table, FILE *fp,
TAOS* taosCon, char* dbName) {
uint64_t sizeOfTableDes = (uint64_t)(sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
STableDef *tableDes = (STableDef *)calloc(1, sizeOfTableDes);
if (NULL == tableDes) {
uint64_t sizeOfTableDes =
(uint64_t)(sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
STableDef *stableDes = (STableDef *)calloc(1, sizeOfTableDes);
if (NULL == stableDes) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
__func__, __LINE__, sizeOfTableDes);
exit(-1);
}
int count = taosGetTableDes(dbName, table, tableDes, taosCon, true);
int count = taosGetTableDes(dbName, table, stableDes, taosCon, true);
if (count < 0) {
free(tableDes);
free(stableDes);
errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
__func__, __LINE__, table);
exit(-1);
}
taosDumpCreateTableClause(tableDes, count, fp, dbName);
taosDumpCreateTableClause(stableDes, count, fp, dbName);
free(tableDes);
free(stableDes);
return 0;
}
@ -1707,7 +1708,7 @@ static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes + 1));
taosWrite(fd, &tableRecord, sizeof(STableRecord));
}
@ -1782,10 +1783,10 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes + 1));
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes + 1));
taosWrite(fd, &tableRecord, sizeof(STableRecord));
@ -1865,52 +1866,52 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
FILE *fp, char* dbName) {
int counter = 0;
int count_temp = 0;
char sqlstr[COMMAND_SIZE];
int counter = 0;
int count_temp = 0;
char sqlstr[COMMAND_SIZE];
char* pstr = sqlstr;
char* pstr = sqlstr;
pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
dbName, tableDes->name);
pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
dbName, tableDes->name);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (counter == 0) {
pstr += sprintf(pstr, " (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
if (counter == 0) {
pstr += sprintf(pstr, " (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
}
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
}
}
count_temp = counter;
count_temp = counter;
for (; counter < numOfCols; counter++) {
if (counter == count_temp) {
pstr += sprintf(pstr, ") TAGS (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
for (; counter < numOfCols; counter++) {
if (counter == count_temp) {
pstr += sprintf(pstr, ") TAGS (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
}
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
}
}
pstr += sprintf(pstr, ");");
pstr += sprintf(pstr, ");");
fprintf(fp, "%s\n\n", sqlstr);
fprintf(fp, "%s\n\n", sqlstr);
}
static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,

View File

@ -32,7 +32,8 @@
#define CONN_KEEP_TIME (tsShellActivityTimer * 3)
#define CONN_CHECK_TIME (tsShellActivityTimer * 2)
#define QUERY_ID_SIZE 20
#define QUERY_OBJ_ID_SIZE 10
#define QUERY_OBJ_ID_SIZE 18
#define SUBQUERY_INFO_SIZE 6
#define QUERY_STREAM_SAVE_SIZE 20
static SCacheObj *tsMnodeConnCache = NULL;
@ -380,12 +381,24 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 1;
pSchema[cols].type = TSDB_DATA_TYPE_BOOL;
strcpy(pSchema[cols].name, "stable_query");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "sub_queries");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "sub_query_info");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "sql");
@ -459,12 +472,8 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = htobe64(pDesc->useconds);
cols++;
/*
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = htobe64(pDesc->sqlObjId);
cols++;
*/
snprintf(str, tListLen(str), "0x%08" PRIx64, htobe64(pDesc->sqlObjId));
snprintf(str, tListLen(str), "0x%" PRIx64, htobe64(pDesc->sqlObjId));
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->bytes[cols]);
cols++;
@ -479,10 +488,18 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, epBuf, pShow->bytes[cols]);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(bool *)pWrite = pDesc->stableQuery;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = htonl(pDesc->numOfSub);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->subSqlInfo, pShow->bytes[cols]);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->sql, pShow->bytes[cols]);
cols++;

View File

@ -97,7 +97,7 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, int32_t timePrecision);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);

View File

@ -397,7 +397,7 @@ static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t time
* n - Months (30 days)
* y - Years (365 days)
*/
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration, int32_t timePrecision) {
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration, char* unit, int32_t timePrecision) {
errno = 0;
char* endPtr = NULL;
@ -408,12 +408,12 @@ int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration,
}
/* natual month/year are not allowed in absolute duration */
char unit = token[tokenlen - 1];
if (unit == 'n' || unit == 'y') {
*unit = token[tokenlen - 1];
if (*unit == 'n' || *unit == 'y') {
return -1;
}
return getDuration(timestamp, unit, duration, timePrecision);
return getDuration(timestamp, *unit, duration, timePrecision);
}
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision) {

View File

@ -80,7 +80,8 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
uint32_t tableMetaSize;
size_t tableMetaCapacity;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>

View File

@ -479,7 +479,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*}
%destructor select {destroySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
}

View File

@ -512,6 +512,28 @@ int32_t countRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
return BLK_DATA_NO_NEEDED;
}
#define LIST_ADD_N_DOUBLE_FLOAT(x, ctx, p, t, numOfElem, tsdbType) \
do { \
t *d = (t *)(p); \
for (int32_t i = 0; i < (ctx)->size; ++i) { \
if (((ctx)->hasNull) && isNull((char *)&(d)[i], tsdbType)) { \
continue; \
}; \
SET_DOUBLE_VAL(&(x) , GET_DOUBLE_VAL(&(x)) + GET_FLOAT_VAL(&(d)[i])); \
(numOfElem)++; \
} \
} while(0)
#define LIST_ADD_N_DOUBLE(x, ctx, p, t, numOfElem, tsdbType) \
do { \
t *d = (t *)(p); \
for (int32_t i = 0; i < (ctx)->size; ++i) { \
if (((ctx)->hasNull) && isNull((char *)&(d)[i], tsdbType)) { \
continue; \
}; \
SET_DOUBLE_VAL(&(x) , (x) + (d)[i]); \
(numOfElem)++; \
} \
} while(0)
#define LIST_ADD_N(x, ctx, p, t, numOfElem, tsdbType) \
do { \
@ -575,7 +597,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
*retVal += (uint64_t)pCtx->preAggVals.statis.sum;
} else if (IS_FLOAT_TYPE(pCtx->inputType)) {
double *retVal = (double*) pCtx->pOutput;
*retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum));
SET_DOUBLE_VAL(retVal, *retVal + GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum)));
}
} else { // computing based on the true data block
void *pData = GET_INPUT_DATA_LIST(pCtx);
@ -607,10 +629,10 @@ static void do_sum(SQLFunctionCtx *pCtx) {
}
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
double *retVal = (double *)pCtx->pOutput;
LIST_ADD_N(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType);
LIST_ADD_N_DOUBLE(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = (double *)pCtx->pOutput;
LIST_ADD_N(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType);
LIST_ADD_N_DOUBLE_FLOAT(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType);
}
}
@ -654,7 +676,7 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) {
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
*(uint64_t *) pCtx->pOutput += pInput->usum;
} else {
*(double *)pCtx->pOutput += pInput->dsum;
SET_DOUBLE_VAL((double *)pCtx->pOutput, *(double *)pCtx->pOutput + pInput->dsum);
}
}
@ -778,9 +800,9 @@ static void avg_function(SQLFunctionCtx *pCtx) {
} else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
LIST_ADD_N(*pVal, pCtx, pData, int64_t, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
LIST_ADD_N(*pVal, pCtx, pData, double, notNullElems, pCtx->inputType);
LIST_ADD_N_DOUBLE(*pVal, pCtx, pData, double, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
LIST_ADD_N(*pVal, pCtx, pData, float, notNullElems, pCtx->inputType);
LIST_ADD_N_DOUBLE_FLOAT(*pVal, pCtx, pData, float, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
LIST_ADD_N(*pVal, pCtx, pData, uint8_t, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
@ -821,7 +843,7 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) {
continue;
}
*sum += pInput->sum;
SET_DOUBLE_VAL(sum, *sum + pInput->sum);
// keep the number of data into the temp buffer
*(int64_t *)GET_ROWCELL_INTERBUF(pResInfo) += pInput->num;
@ -841,8 +863,8 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
return;
}
*(double *)pCtx->pOutput = (*(double *)pCtx->pOutput) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo);
SET_DOUBLE_VAL((double *)pCtx->pOutput,(*(double *)pCtx->pOutput) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo));
} else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY
assert(IS_NUMERIC_TYPE(pCtx->inputType));
SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo);
@ -852,7 +874,7 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
return;
}
*(double *)pCtx->pOutput = pAvgInfo->sum / pAvgInfo->num;
SET_DOUBLE_VAL((double *)pCtx->pOutput, pAvgInfo->sum / pAvgInfo->num);
}
// cannot set the numOfIteratedElems again since it is set during previous iteration
@ -1049,7 +1071,7 @@ static bool min_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
*((float *)pCtx->pOutput) = FLT_MAX;
break;
case TSDB_DATA_TYPE_DOUBLE:
*((double *)pCtx->pOutput) = DBL_MAX;
SET_DOUBLE_VAL(((double *)pCtx->pOutput), DBL_MAX);
break;
default:
qError("illegal data type:%d in min/max query", pCtx->inputType);
@ -1076,7 +1098,7 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
*((float *)pCtx->pOutput) = -FLT_MAX;
break;
case TSDB_DATA_TYPE_DOUBLE:
*((double *)pCtx->pOutput) = -DBL_MAX;
SET_DOUBLE_VAL(((double *)pCtx->pOutput), -DBL_MAX);
break;
case TSDB_DATA_TYPE_BIGINT:
*((int64_t *)pCtx->pOutput) = INT64_MIN;
@ -1322,7 +1344,7 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
double *retValue = (double *)pCtx->pOutput;
*retValue = sqrt(pStd->res / pStd->num);
SET_DOUBLE_VAL(retValue, sqrt(pStd->res / pStd->num));
SET_VAL(pCtx, 1, 1);
}
@ -1455,7 +1477,7 @@ static void stddev_dst_finalizer(SQLFunctionCtx *pCtx) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
double *retValue = (double *)pCtx->pOutput;
*retValue = sqrt(pStd->res / pStd->num);
SET_DOUBLE_VAL(retValue, sqrt(pStd->res / pStd->num));
SET_VAL(pCtx, 1, 1);
}
@ -1947,7 +1969,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
case TSDB_DATA_TYPE_DOUBLE: {
double *output = (double *)pCtx->pOutput;
for (int32_t i = 0; i < len; ++i, output += step) {
*output = tvp[i]->v.dKey;
SET_DOUBLE_VAL(output, tvp[i]->v.dKey);
}
break;
}
@ -2366,7 +2388,7 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
assert(ppInfo->numOfElems == 0);
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
*(double *)pCtx->pOutput = getPercentile(pMemBucket, v);
SET_DOUBLE_VAL((double *)pCtx->pOutput, getPercentile(pMemBucket, v));
}
tMemBucketDestroy(pMemBucket);
@ -2782,7 +2804,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
if (!pDerivInfo->valueSet) { // initial value is not set yet
pDerivInfo->valueSet = true;
} else {
*pOutput = ((pData[i] - pDerivInfo->prevValue) * pDerivInfo->tsWindow) / (tsList[i] - pDerivInfo->prevTs);
SET_DOUBLE_VAL(pOutput, ((pData[i] - pDerivInfo->prevValue) * pDerivInfo->tsWindow) / (tsList[i] - pDerivInfo->prevTs));
if (pDerivInfo->ignoreNegative && *pOutput < 0) {
} else {
*pTimestamp = tsList[i];
@ -3017,7 +3039,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
}
if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet
*pOutput = pData[i] - pCtx->param[1].dKey; // direct previous may be null
SET_DOUBLE_VAL(pOutput, pData[i] - pCtx->param[1].dKey); // direct previous may be null
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
@ -3290,7 +3312,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) {
return;
}
*(double *)pCtx->pOutput = pCtx->param[3].dKey - pCtx->param[0].dKey;
SET_DOUBLE_VAL((double *)pCtx->pOutput, pCtx->param[3].dKey - pCtx->param[0].dKey);
} else {
assert(IS_NUMERIC_TYPE(pCtx->inputType) || (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP));
@ -3300,7 +3322,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) {
return;
}
*(double *)pCtx->pOutput = pInfo->max - pInfo->min;
SET_DOUBLE_VAL((double *)pCtx->pOutput, pInfo->max - pInfo->min);
}
GET_RES_INFO(pCtx)->numOfRes = 1; // todo add test case
@ -3628,9 +3650,9 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) {
assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult);
if (pInfo->win.ekey == pInfo->win.skey) {
*(double *)pCtx->pOutput = pInfo->p.val;
SET_DOUBLE_VAL((double *)pCtx->pOutput, pInfo->p.val);
} else {
*(double *)pCtx->pOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
SET_DOUBLE_VAL((double *)pCtx->pOutput , pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey));
}
GET_RES_INFO(pCtx)->numOfRes = 1;
@ -3923,7 +3945,7 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) {
return;
}
*(double*) pCtx->pOutput = do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64));
SET_DOUBLE_VAL((double*) pCtx->pOutput, do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64)));
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
@ -3996,6 +4018,7 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD
pDist->numOfTables += pSrc->numOfTables;
pDist->numOfRowsInMemTable += pSrc->numOfRowsInMemTable;
pDist->numOfSmallBlocks += pSrc->numOfSmallBlocks;
pDist->numOfFiles += pSrc->numOfFiles;
pDist->totalSize += pSrc->totalSize;
pDist->totalRows += pSrc->totalRows;
@ -4108,18 +4131,19 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
uint64_t totalLen = pTableBlockDist->totalSize;
int32_t rowSize = pTableBlockDist->rowSize;
int32_t smallBlocks = pTableBlockDist->numOfSmallBlocks;
double compRatio = (totalRows>0) ? ((double)(totalLen)/(rowSize*totalRows)) : 1;
int sz = sprintf(result + VARSTR_HEADER_SIZE,
"summary: \n\t "
"5th=[%d], 10th=[%d], 20th=[%d], 30th=[%d], 40th=[%d], 50th=[%d]\n\t "
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], SmallBlocks=[%d], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
"RowsInMem=[%d] \n\t",
percentiles[0], percentiles[1], percentiles[2], percentiles[3], percentiles[4], percentiles[5],
percentiles[6], percentiles[7], percentiles[8], percentiles[9], percentiles[10], percentiles[11],
min, max, avg, stdDev,
totalRows, totalBlocks, totalLen/1024.0, compRatio,
totalRows, totalBlocks, smallBlocks, totalLen/1024.0, compRatio,
pTableBlockDist->numOfRowsInMemTable);
varDataSetLen(result, sz);
UNUSED(sz);
@ -4132,6 +4156,11 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
pDist->rowSize = (uint16_t)pCtx->param[0].i64;
generateBlockDistResult(pDist, pCtx->pOutput);
if (pDist->dataBlockInfos != NULL) {
taosArrayDestroy(pDist->dataBlockInfos);
pDist->dataBlockInfos = NULL;
}
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
pResInfo->hasResult = DATA_SET_FLAG;
@ -4157,8 +4186,8 @@ int32_t functionCompatList[] = {
4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
// tid_tag, blk_info
6, 7
// tid_tag, derivative, blk_info
6, 8, 7,
};
SAggFunctionInfo aAggs[] = {{

View File

@ -6653,19 +6653,20 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
if (isNull(val, type)) {
continue;
}
char* p = val;
size_t keyLen = 0;
if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) {
tstr* var = (tstr*)(val);
p = var->data;
keyLen = varDataLen(var);
} else {
keyLen = bytes;
}
int dummy;
void* res = taosHashGet(pInfo->pSet, val, keyLen);
void* res = taosHashGet(pInfo->pSet, p, keyLen);
if (res == NULL) {
taosHashPut(pInfo->pSet, val, keyLen, &dummy, sizeof(dummy));
taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy));
char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows;
memcpy(start, val, bytes);
pRes->info.rows += 1;

View File

@ -32,8 +32,8 @@ typedef struct SJoinCond {
SColumn *colCond[2];
} SJoinCond;
static SQueryNode* createQueryNode(int32_t type, const char* name, SQueryNode** prev,
int32_t numOfPrev, SExprInfo** pExpr, int32_t numOfOutput, SQueryTableInfo* pTableInfo,
static SQueryNode* createQueryNode(int32_t type, const char* name, SQueryNode** prev, int32_t numOfPrev,
SExprInfo** pExpr, int32_t numOfOutput, SQueryTableInfo* pTableInfo,
void* pExtInfo) {
SQueryNode* pNode = calloc(1, sizeof(SQueryNode));
@ -112,8 +112,8 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
}
STimeWindow* window = &pQueryInfo->window;
SQueryNode* pNode = createQueryNode(QNODE_TABLESCAN, "TableScan", NULL, 0, NULL, 0,
info, window);
SQueryNode* pNode = createQueryNode(QNODE_TABLESCAN, "TableScan", NULL, 0, NULL, 0, info, window);
if (pQueryInfo->projectionQuery) {
int32_t numOfOutput = (int32_t) taosArrayGetSize(pExprs);
pNode = createQueryNode(QNODE_PROJECT, "Projection", &pNode, 1, pExprs->pData, numOfOutput, info, NULL);
@ -146,39 +146,41 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
}
static SQueryNode* doCreateQueryPlanForOneTableImpl(SQueryInfo* pQueryInfo, SQueryNode* pNode, SQueryTableInfo* info,
SArray* pExprs) {
// check for aggregation
if (pQueryInfo->interval.interval > 0) {
int32_t numOfOutput = (int32_t) taosArrayGetSize(pExprs);
SArray* pExprs) {
// check for aggregation
if (pQueryInfo->interval.interval > 0) {
int32_t numOfOutput = (int32_t)taosArrayGetSize(pExprs);
pNode = createQueryNode(QNODE_TIMEWINDOW, "TimeWindowAgg", &pNode, 1, pExprs->pData, numOfOutput, info,
&pQueryInfo->interval);
} else if (pQueryInfo->groupbyColumn) {
int32_t numOfOutput = (int32_t) taosArrayGetSize(pExprs);
pNode = createQueryNode(QNODE_GROUPBY, "Groupby", &pNode, 1, pExprs->pData, numOfOutput, info,
&pQueryInfo->groupbyExpr);
} else if (pQueryInfo->sessionWindow.gap > 0) {
pNode = createQueryNode(QNODE_SESSIONWINDOW, "SessionWindowAgg", &pNode, 1, NULL, 0, info, NULL);
} else if (pQueryInfo->simpleAgg) {
int32_t numOfOutput = (int32_t) taosArrayGetSize(pExprs);
pNode = createQueryNode(QNODE_AGGREGATE, "Aggregate", &pNode, 1, pExprs->pData, numOfOutput, info, NULL);
pNode = createQueryNode(QNODE_TIMEWINDOW, "TimeWindowAgg", &pNode, 1, pExprs->pData, numOfOutput, info,
&pQueryInfo->interval);
if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) {
pNode = createQueryNode(QNODE_GROUPBY, "Groupby", &pNode, 1, pExprs->pData, numOfOutput, info, &pQueryInfo->groupbyExpr);
}
} else if (pQueryInfo->groupbyColumn) {
int32_t numOfOutput = (int32_t)taosArrayGetSize(pExprs);
pNode = createQueryNode(QNODE_GROUPBY, "Groupby", &pNode, 1, pExprs->pData, numOfOutput, info,
&pQueryInfo->groupbyExpr);
} else if (pQueryInfo->sessionWindow.gap > 0) {
pNode = createQueryNode(QNODE_SESSIONWINDOW, "SessionWindowAgg", &pNode, 1, NULL, 0, info, NULL);
} else if (pQueryInfo->simpleAgg) {
int32_t numOfOutput = (int32_t)taosArrayGetSize(pExprs);
pNode = createQueryNode(QNODE_AGGREGATE, "Aggregate", &pNode, 1, pExprs->pData, numOfOutput, info, NULL);
}
if (pQueryInfo->havingFieldNum > 0 || pQueryInfo->arithmeticOnAgg) {
int32_t numOfExpr = (int32_t) taosArrayGetSize(pQueryInfo->exprList1);
pNode =
createQueryNode(QNODE_PROJECT, "Projection", &pNode, 1, pQueryInfo->exprList1->pData, numOfExpr, info, NULL);
}
if (pQueryInfo->havingFieldNum > 0 || pQueryInfo->arithmeticOnAgg) {
int32_t numOfExpr = (int32_t)taosArrayGetSize(pQueryInfo->exprList1);
pNode =
createQueryNode(QNODE_PROJECT, "Projection", &pNode, 1, pQueryInfo->exprList1->pData, numOfExpr, info, NULL);
}
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillEssInfo* pInfo = calloc(1, sizeof(SFillEssInfo));
pInfo->fillType = pQueryInfo->fillType;
pInfo->val = calloc(pNode->numOfOutput, sizeof(int64_t));
memcpy(pInfo->val, pQueryInfo->fillVal, pNode->numOfOutput);
pNode = createQueryNode(QNODE_FILL, "Fill", &pNode, 1, NULL, 0, info, pInfo);
}
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillEssInfo* pInfo = calloc(1, sizeof(SFillEssInfo));
pInfo->fillType = pQueryInfo->fillType;
pInfo->val = calloc(pNode->numOfOutput, sizeof(int64_t));
memcpy(pInfo->val, pQueryInfo->fillVal, pNode->numOfOutput);
pNode = createQueryNode(QNODE_FILL, "Fill", &pNode, 1, NULL, 0, info, pInfo);
}
if (pQueryInfo->limit.limit != -1 || pQueryInfo->limit.offset != 0) {
pNode = createQueryNode(QNODE_LIMIT, "Limit", &pNode, 1, NULL, 0, info, &pQueryInfo->limit);
@ -330,7 +332,7 @@ static int32_t doPrintPlan(char* buf, SQueryNode* pQueryNode, int32_t level, int
switch(pQueryNode->info.type) {
case QNODE_TABLESCAN: {
STimeWindow* win = (STimeWindow*)pQueryNode->pExtInfo;
len1 = sprintf(buf + len, "%s #0x%" PRIx64 ") time_range: %" PRId64 " - %" PRId64 "\n",
len1 = sprintf(buf + len, "%s #%" PRIu64 ") time_range: %" PRId64 " - %" PRId64 "\n",
pQueryNode->tableInfo.tableName, pQueryNode->tableInfo.id.uid, win->skey, win->ekey);
len += len1;
break;
@ -401,8 +403,8 @@ static int32_t doPrintPlan(char* buf, SQueryNode* pQueryNode, int32_t level, int
len += len1;
SInterval* pInterval = pQueryNode->pExtInfo;
len1 = sprintf(buf + len, "interval:%" PRId64 "(%c), sliding:%" PRId64 "(%c), offset:%" PRId64 "\n",
pInterval->interval, pInterval->intervalUnit, pInterval->sliding, pInterval->slidingUnit,
len1 = sprintf(buf + len, "interval:%" PRId64 "(%s), sliding:%" PRId64 "(%s), offset:%" PRId64 "\n",
pInterval->interval, TSDB_TIME_PRECISION_MILLI_STR, pInterval->sliding, TSDB_TIME_PRECISION_MILLI_STR,
pInterval->offset);
len += len1;

Some files were not shown because too many files have changed in this diff Show More