Merge branch 'develop' into feature/query
This commit is contained in:
commit
e1fd86ac66
|
@ -7,8 +7,9 @@ set -e
|
||||||
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
|
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
|
||||||
osType=linux # [linux | darwin | windows]
|
osType=linux # [linux | darwin | windows]
|
||||||
version=""
|
version=""
|
||||||
|
verType=stable # [stable, beta]
|
||||||
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
|
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
|
||||||
while getopts "h:c:o:n:" arg
|
while getopts "h:c:o:n:V:" arg
|
||||||
do
|
do
|
||||||
case $arg in
|
case $arg in
|
||||||
c)
|
c)
|
||||||
|
@ -23,6 +24,10 @@ do
|
||||||
#echo "version=$OPTARG"
|
#echo "version=$OPTARG"
|
||||||
version=$(echo $OPTARG)
|
version=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
|
V)
|
||||||
|
#echo "verType=$OPTARG"
|
||||||
|
verType=$(echo $OPTARG)
|
||||||
|
;;
|
||||||
h)
|
h)
|
||||||
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
|
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -55,6 +60,22 @@ cp alert alert.cfg install_driver.sh ./TDengine-alert/.
|
||||||
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
|
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
|
||||||
chmod 777 ./TDengine-alert/install_driver.sh
|
chmod 777 ./TDengine-alert/install_driver.sh
|
||||||
|
|
||||||
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
|
tar -I 'gzip -9' -cf ${scriptdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
|
||||||
rm -rf ./TDengine-alert
|
rm -rf ./TDengine-alert
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# mv package to comminuty/release/
|
||||||
|
pkg_name=TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}
|
||||||
|
|
||||||
|
if [ "$verType" == "beta" ]; then
|
||||||
|
pkg_name=TDengine-alert-${version}-${verType}-${osType^}-${archMap[${cpuType}]}
|
||||||
|
elif [ "$verType" == "stable" ]; then
|
||||||
|
pkg_name=${pkg_name}
|
||||||
|
else
|
||||||
|
echo "unknow verType, nor stabel or beta"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd ${scriptdir}/../release/
|
||||||
|
mv ${scriptdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz ${pkg_name}.tar.gz
|
||||||
|
|
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
||||||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||||
IF (TD_MVN_INSTALLED)
|
IF (TD_MVN_INSTALLED)
|
||||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.32-dist.jar DESTINATION connector/jdbc)
|
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF (TD_DARWIN)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "2.1.3.0")
|
SET(TD_VER_NUMBER "2.1.4.1")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -2,25 +2,25 @@
|
||||||
|
|
||||||
## <a class="anchor" id="install"></a>快捷安装
|
## <a class="anchor" id="install"></a>快捷安装
|
||||||
|
|
||||||
TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。
|
TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。
|
||||||
|
|
||||||
### <a class="anchor" id="source-install"></a>通过源码安装
|
### <a class="anchor" id="source-install"></a>通过源码安装
|
||||||
|
|
||||||
请参考我们的[TDengine github主页](https://github.com/taosdata/TDengine)下载源码并安装.
|
请参考我们的 [TDengine github 主页](https://github.com/taosdata/TDengine) 下载源码并安装.
|
||||||
|
|
||||||
### 通过Docker容器运行
|
### 通过 Docker 容器运行
|
||||||
|
|
||||||
暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。
|
暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OS X 和 Windows 环境下尝试 TDengine。
|
||||||
|
|
||||||
详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
|
详细操作方法请参照 [通过 Docker 快速体验 TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
|
||||||
|
|
||||||
### <a class="anchor" id="package-install"></a>通过安装包安装
|
### <a class="anchor" id="package-install"></a>通过安装包安装
|
||||||
|
|
||||||
TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择:
|
TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择:
|
||||||
|
|
||||||
安装包下载在[这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。
|
安装包下载在 [这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。
|
||||||
|
|
||||||
具体的安装过程,请参见[TDengine多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html)以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
|
具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
|
||||||
|
|
||||||
## <a class="anchor" id="start"></a>轻松启动
|
## <a class="anchor" id="start"></a>轻松启动
|
||||||
|
|
||||||
|
@ -53,21 +53,21 @@ $ systemctl status taosd
|
||||||
如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
|
如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
|
||||||
|
|
||||||
|
|
||||||
## <a class="anchor" id="console"></a>TDengine命令行程序
|
## <a class="anchor" id="console"></a>TDengine 命令行程序
|
||||||
|
|
||||||
执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。
|
执行 TDengine 命令行程序,您只要在 Linux 终端执行 `taos` 即可。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ taos
|
$ taos
|
||||||
```
|
```
|
||||||
|
|
||||||
如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/documentation/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下:
|
如果 TDengine 终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](https://www.taosdata.com/cn/documentation/faq/) 来解决终端连接服务端失败的问题)。TDengine 终端的提示符号如下:
|
||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
taos>
|
taos>
|
||||||
```
|
```
|
||||||
|
|
||||||
在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例:
|
在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
create database demo;
|
create database demo;
|
||||||
|
@ -76,24 +76,24 @@ create table t (ts timestamp, speed int);
|
||||||
insert into t values ('2019-07-15 00:00:00', 10);
|
insert into t values ('2019-07-15 00:00:00', 10);
|
||||||
insert into t values ('2019-07-15 01:00:00', 20);
|
insert into t values ('2019-07-15 01:00:00', 20);
|
||||||
select * from t;
|
select * from t;
|
||||||
ts | speed |
|
ts | speed |
|
||||||
===================================
|
========================================
|
||||||
19-07-15 00:00:00.000| 10|
|
2019-07-15 00:00:00.000 | 10 |
|
||||||
19-07-15 01:00:00.000| 20|
|
2019-07-15 01:00:00.000 | 20 |
|
||||||
Query OK, 2 row(s) in set (0.001700s)
|
Query OK, 2 row(s) in set (0.003128s)
|
||||||
```
|
```
|
||||||
|
|
||||||
除执行SQL语句外,系统管理员还可以从TDengine终端检查系统运行状态,添加删除用户账号等。
|
除执行 SQL 语句外,系统管理员还可以从 TDengine 终端检查系统运行状态,添加删除用户账号等。
|
||||||
|
|
||||||
### 命令行参数
|
### 命令行参数
|
||||||
|
|
||||||
您可通过配置命令行参数来改变TDengine终端的行为。以下为常用的几个命令行参数:
|
您可通过配置命令行参数来改变 TDengine 终端的行为。以下为常用的几个命令行参数:
|
||||||
|
|
||||||
- -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_
|
- -c, --config-dir: 指定配置文件目录,默认为 _/etc/taos_
|
||||||
- -h, --host: 指定服务的IP地址,默认为本地服务
|
- -h, --host: 指定服务的 FQDN 地址(也可以使用 IP),默认为连接本地服务
|
||||||
- -s, --commands: 在不进入终端的情况下运行TDengine命令
|
- -s, --commands: 在不进入终端的情况下运行 TDengine 命令
|
||||||
- -u, -- user: 连接TDengine服务器的用户名,缺省为root
|
- -u, --user: 连接 TDengine 服务器的用户名,缺省为 root
|
||||||
- -p, --password: 连接TDengine服务器的密码,缺省为taosdata
|
- -p, --password: 连接TDengine服务器的密码,缺省为 taosdata
|
||||||
- -?, --help: 打印出所有命令行参数
|
- -?, --help: 打印出所有命令行参数
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
|
@ -102,7 +102,7 @@ Query OK, 2 row(s) in set (0.001700s)
|
||||||
$ taos -h 192.168.0.1 -s "use db; show tables;"
|
$ taos -h 192.168.0.1 -s "use db; show tables;"
|
||||||
```
|
```
|
||||||
|
|
||||||
### 运行SQL命令脚本
|
### 运行 SQL 命令脚本
|
||||||
|
|
||||||
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
|
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
|
||||||
|
|
||||||
|
@ -110,27 +110,27 @@ TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
|
||||||
taos> source <filename>;
|
taos> source <filename>;
|
||||||
```
|
```
|
||||||
|
|
||||||
### Shell小技巧
|
### Shell 小技巧
|
||||||
|
|
||||||
- 可以使用上下光标键查看历史输入的指令
|
- 可以使用上下光标键查看历史输入的指令
|
||||||
- 修改用户密码。在 shell 中使用 alter user 指令
|
- 修改用户密码,在 shell 中使用 alter user 指令
|
||||||
- ctrl+c 中止正在进行中的查询
|
- ctrl+c 中止正在进行中的查询
|
||||||
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
|
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
|
||||||
|
|
||||||
|
|
||||||
## <a class="anchor" id="demo"></a>TDengine 极速体验
|
## <a class="anchor" id="demo"></a>TDengine 极速体验
|
||||||
|
|
||||||
启动TDengine的服务,在Linux终端执行taosdemo
|
启动 TDengine 的服务,在 Linux 终端执行 taosdemo
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ taosdemo
|
$ taosdemo
|
||||||
```
|
```
|
||||||
|
|
||||||
该命令将在数据库test下面自动创建一张超级表meters,该超级表下有1万张表,表名为"t0" 到"t9999",每张表有10万条记录,每条记录有 (f1, f2, f3)三个字段,时间戳从"2017-07-14 10:40:00 000" 到"2017-07-14 10:41:39 999",每张表带有标签areaid和loc, areaid被设置为1到10, loc被设置为"beijing"或者“shanghai"。
|
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "t0" 到 "t9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
||||||
|
|
||||||
执行这条命令大概需要10分钟,最后共插入10亿条记录。
|
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
|
||||||
|
|
||||||
在TDengine客户端输入查询命令,体验查询速度。
|
在 TDengine 客户端输入查询命令,体验查询速度。
|
||||||
|
|
||||||
- 查询超级表下记录总条数:
|
- 查询超级表下记录总条数:
|
||||||
|
|
||||||
|
@ -138,49 +138,43 @@ $ taosdemo
|
||||||
taos> select count(*) from test.meters;
|
taos> select count(*) from test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
- 查询10亿条记录的平均值、最大值、最小值等:
|
- 查询 1 亿条记录的平均值、最大值、最小值等:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> select avg(f1), max(f2), min(f3) from test.meters;
|
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
- 查询loc="beijing"的记录总条数:
|
- 查询 location="beijing" 的记录总条数:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> select count(*) from test.meters where loc="beijing";
|
taos> select count(*) from test.meters where location="beijing";
|
||||||
```
|
```
|
||||||
|
|
||||||
- 查询areaid=10的所有记录的平均值、最大值、最小值等:
|
- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
|
taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
|
||||||
```
|
```
|
||||||
|
|
||||||
- 对表t10按10s进行平均值、最大值和最小值聚合统计:
|
- 对表 t10 按 10s 进行平均值、最大值和最小值聚合统计:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
|
taos> select avg(current), max(voltage), min(phase) from test.t10 interval(10s);
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。
|
**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
|
||||||
|
|
||||||
|
|
||||||
## 客户端和报警模块
|
## 客户端和报警模块
|
||||||
|
|
||||||
如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下:
|
如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。
|
||||||
|
|
||||||
- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
|
报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。
|
||||||
- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
|
|
||||||
- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
|
|
||||||
|
|
||||||
报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
|
|
||||||
|
|
||||||
- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
|
|
||||||
|
|
||||||
|
|
||||||
## <a class="anchor" id="platforms"></a>支持平台列表
|
## <a class="anchor" id="platforms"></a>支持平台列表
|
||||||
|
|
||||||
### TDengine服务器支持的平台列表
|
### TDengine 服务器支持的平台列表
|
||||||
|
|
||||||
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
||||||
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
|
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
|
||||||
|
@ -201,9 +195,9 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### TDengine客户端和连接器支持的平台列表
|
### TDengine 客户端和连接器支持的平台列表
|
||||||
|
|
||||||
目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。
|
目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。
|
||||||
|
|
||||||
对照矩阵如下:
|
对照矩阵如下:
|
||||||
|
|
||||||
|
@ -220,5 +214,5 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
|
||||||
|
|
||||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||||
|
|
||||||
请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。
|
请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。
|
||||||
|
|
||||||
|
|
|
@ -123,8 +123,8 @@ taosd -C
|
||||||
- minRows:文件块中记录的最小条数。单位为条,默认值:100。
|
- minRows:文件块中记录的最小条数。单位为条,默认值:100。
|
||||||
- maxRows:文件块中记录的最大条数。单位为条,默认值:4096。
|
- maxRows:文件块中记录的最大条数。单位为条,默认值:4096。
|
||||||
- comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
|
- comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
|
||||||
- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)(可通过 alter database 修改)
|
- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)
|
||||||
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。(可通过 alter database 修改)
|
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
|
||||||
- cache:内存块的大小。单位为兆字节(MB),默认值:16。
|
- cache:内存块的大小。单位为兆字节(MB),默认值:16。
|
||||||
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
|
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
|
||||||
- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
|
- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
|
||||||
|
|
|
@ -129,16 +129,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
||||||
CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。)
|
CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。)
|
||||||
说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
|
说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
|
||||||
|
|
||||||
```mysql
|
|
||||||
ALTER DATABASE db_name WAL 1;
|
|
||||||
```
|
|
||||||
WAL 参数控制 WAL 日志的落盘方式。缺省值为 1,取值范围为 [1, 2]。1 表示写 WAL,但不执行 fsync;2 表示写 WAL,而且执行 fsync。
|
|
||||||
|
|
||||||
```mysql
|
|
||||||
ALTER DATABASE db_name FSYNC 3000;
|
|
||||||
```
|
|
||||||
FSYNC 参数控制执行 fsync 操作的周期。缺省值为 3000,单位是毫秒,取值范围为 [0, 180000]。如果设置为 0,表示每次写入,立即执行 fsync。该设置项主要用于调节 WAL 参数设为 2 时的系统行为。
|
|
||||||
|
|
||||||
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。
|
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。
|
||||||
|
|
||||||
- **显示系统所有数据库**
|
- **显示系统所有数据库**
|
||||||
|
@ -372,77 +362,82 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
||||||
|
|
||||||
## <a class="anchor" id="insert"></a>数据写入
|
## <a class="anchor" id="insert"></a>数据写入
|
||||||
|
|
||||||
- **插入一条记录**
|
### 写入语法:
|
||||||
```mysql
|
|
||||||
INSERT INTO tb_name VALUES (field_value, ...);
|
|
||||||
```
|
|
||||||
向表tb_name中插入一条记录。
|
|
||||||
|
|
||||||
- **插入一条记录,数据对应到指定的列**
|
```mysql
|
||||||
```mysql
|
INSERT INTO
|
||||||
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...);
|
tb_name
|
||||||
```
|
[USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
|
||||||
向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。
|
[(field1_name, ...)]
|
||||||
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
|
[tb2_name
|
||||||
|
[USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
|
||||||
|
[(field1_name, ...)]
|
||||||
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
|
...];
|
||||||
|
```
|
||||||
|
|
||||||
- **插入多条记录**
|
### 详细描述及示例:
|
||||||
```mysql
|
|
||||||
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
|
||||||
```
|
|
||||||
向表tb_name中插入多条记录。
|
|
||||||
**注意**:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。
|
|
||||||
|
|
||||||
- **按指定的列插入多条记录**
|
- **插入一条或多条记录**
|
||||||
|
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
|
||||||
```mysql
|
```mysql
|
||||||
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32);
|
||||||
```
|
```
|
||||||
向表tb_name中按指定的列插入多条记录。
|
或者,可以通过如下语句写入两行记录:
|
||||||
|
```mysql
|
||||||
|
INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33);
|
||||||
|
```
|
||||||
|
**注意:**
|
||||||
|
1)在第二个例子中,两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000,而如果是在微秒精度设置下就需要写为 1626164208000000。
|
||||||
|
2)在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为 NOW,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的实际执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
|
||||||
|
3)允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 keep 值(数据保留的天数);允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 days 值(数据文件存储数据的时间跨度,单位为天)。keep 和 days 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。
|
||||||
|
|
||||||
- **向多个表插入多条记录**
|
- **插入记录,数据对应到指定的列**
|
||||||
|
向数据子表中插入记录时,无论插入一行还是多行,都可以让数据对应到指定的列。对于 SQL 语句中没有出现的列,数据库将自动填充为 NULL。主键(时间戳)不能为 NULL。例如:
|
||||||
```mysql
|
```mysql
|
||||||
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
|
INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31);
|
||||||
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
|
||||||
```
|
```
|
||||||
同时向表tb1_name和tb2_name中分别插入多条记录。
|
**说明:**如果不指定列,也即使用全列模式——那么在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列模式写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入 NULL。
|
||||||
|
|
||||||
- **同时向多个表按列插入多条记录**
|
- **向多个表插入记录**
|
||||||
|
可以在一条语句中,分别向多个表插入一条或多条记录,并且也可以在插入过程中指定列。例如:
|
||||||
```mysql
|
```mysql
|
||||||
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
|
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
```
|
```
|
||||||
同时向表tb1_name和tb2_name中按列分别插入多条记录。
|
|
||||||
|
|
||||||
注意:
|
- <a class="anchor" id="auto_create_table"></a>**插入记录时自动建表**
|
||||||
1) 如果时间戳为now,系统将自动使用客户端当前时间作为该记录的时间戳;
|
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如:
|
||||||
2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
|
```mysql
|
||||||
|
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
|
||||||
|
```
|
||||||
|
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
|
||||||
|
```mysql
|
||||||
|
INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
|
||||||
|
```
|
||||||
|
自动建表语法也支持在一条语句中向多个表插入记录。例如:
|
||||||
|
```mysql
|
||||||
|
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
|
d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||||
|
d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
|
```
|
||||||
|
**说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。
|
||||||
|
|
||||||
- <a class="anchor" id="auto_create_table"></a>**插入记录时自动建表**
|
- **插入来自文件的数据记录**
|
||||||
```mysql
|
除了使用 VALUES 关键字插入一行或多行数据外,也可以把要写入的数据放在 CSV 文件中(英文逗号分隔、英文单引号括住每个值)供 SQL 指令读取。其中 CSV 文件无需表头。例如,如果 /tmp/csvfile.csv 文件的内容为:
|
||||||
INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
|
|
||||||
```
|
```
|
||||||
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。
|
'2021-07-13 14:07:34.630', '10.2', '219', '0.32'
|
||||||
|
'2021-07-13 14:07:35.779', '10.15', '217', '0.33'
|
||||||
- **插入记录时自动建表,并指定具体的 TAGS 列**
|
|
||||||
```mysql
|
|
||||||
INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
|
|
||||||
```
|
```
|
||||||
在自动建表时,可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将取为空值。
|
那么通过如下指令可以把这个文件中的数据写入子表中:
|
||||||
|
|
||||||
- **同时向多个表按列插入多条记录,自动建表**
|
|
||||||
```mysql
|
```mysql
|
||||||
INSERT INTO tb1_name (tb1_field1_name, ...) [USING stb1_name TAGS (tag_value1, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...
|
INSERT INTO d1001 FILE '/tmp/csvfile.csv';
|
||||||
tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
|
||||||
```
|
```
|
||||||
以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。
|
|
||||||
说明:`(tb1_field1_name, ...)`的部分可以省略掉,这样就是使用全列模式写入——也即在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入NULL。
|
|
||||||
从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写:
|
|
||||||
```mysql
|
|
||||||
INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
|
||||||
```
|
|
||||||
注意:虽然两种写法都可以,但并不能在一条 SQL 语句中混用,否则会报语法错误。
|
|
||||||
|
|
||||||
**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
|
**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
|
||||||
|
|
||||||
说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
|
**说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
|
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
|
||||||
|
@ -1342,6 +1337,7 @@ SELECT function_list FROM stb_name
|
||||||
- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
|
- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
|
||||||
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
|
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
|
||||||
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
|
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
|
||||||
|
* 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
|
||||||
2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
|
2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
|
||||||
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
|
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
|
||||||
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
|
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
|
||||||
|
|
|
@ -149,6 +149,8 @@ keepColumnName 1
|
||||||
|
|
||||||
# system time zone
|
# system time zone
|
||||||
# timezone Asia/Shanghai (CST, +0800)
|
# timezone Asia/Shanghai (CST, +0800)
|
||||||
|
# system time zone (for windows 10)
|
||||||
|
# timezone UTC-8
|
||||||
|
|
||||||
# system locale
|
# system locale
|
||||||
# locale en_US.UTF-8
|
# locale en_US.UTF-8
|
||||||
|
|
|
@ -123,7 +123,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
debname=${debname}-${verType}".deb"
|
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
debname=${debname}".deb"
|
debname=${debname}".deb"
|
||||||
else
|
else
|
||||||
|
@ -131,6 +131,8 @@ else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# make deb package
|
# make deb package
|
||||||
dpkg -b ${pkg_dir} $debname
|
dpkg -b ${pkg_dir} $debname
|
||||||
echo "make deb package success!"
|
echo "make deb package success!"
|
||||||
|
|
|
@ -5,22 +5,28 @@ set -e
|
||||||
# dockerbuild.sh
|
# dockerbuild.sh
|
||||||
# -n [version number]
|
# -n [version number]
|
||||||
# -p [xxxx]
|
# -p [xxxx]
|
||||||
|
# -V [stable | beta]
|
||||||
|
|
||||||
# set parameters by default value
|
# set parameters by default value
|
||||||
verNumber=""
|
version=""
|
||||||
passWord=""
|
passWord=""
|
||||||
|
verType=""
|
||||||
|
|
||||||
while getopts "hn:p:" arg
|
while getopts "hn:p:V:" arg
|
||||||
do
|
do
|
||||||
case $arg in
|
case $arg in
|
||||||
n)
|
n)
|
||||||
#echo "verNumber=$OPTARG"
|
#echo "version=$OPTARG"
|
||||||
verNumber=$(echo $OPTARG)
|
version=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
p)
|
p)
|
||||||
#echo "passWord=$OPTARG"
|
#echo "passWord=$OPTARG"
|
||||||
passWord=$(echo $OPTARG)
|
passWord=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
|
V)
|
||||||
|
#echo "verType=$OPTARG"
|
||||||
|
verType=$(echo $OPTARG)
|
||||||
|
;;
|
||||||
h)
|
h)
|
||||||
echo "Usage: `basename $0` -n [version number] "
|
echo "Usage: `basename $0` -n [version number] "
|
||||||
echo " -p [password for docker hub] "
|
echo " -p [password for docker hub] "
|
||||||
|
@ -33,13 +39,34 @@ do
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "verNumber=${verNumber}"
|
echo "version=${version}"
|
||||||
|
|
||||||
#docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber}
|
#docker manifest rm tdengine/tdengine
|
||||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
#docker manifest rm tdengine/tdengine:${version}
|
||||||
|
if [ "$verType" == "beta" ]; then
|
||||||
|
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||||
|
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||||
|
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||||
|
docker manifest push tdengine/tdengine-beta:latest
|
||||||
|
docker manifest push tdengine/tdengine-beta:${version}
|
||||||
|
|
||||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
elif [ "$verType" == "stable" ]; then
|
||||||
|
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||||
|
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||||
|
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||||
|
docker manifest push tdengine/tdengine:latest
|
||||||
|
docker manifest push tdengine/tdengine:${version}
|
||||||
|
|
||||||
docker manifest push tdengine/tdengine:latest
|
else
|
||||||
|
echo "unknow verType, nor stabel or beta"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# how set latest version ???
|
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||||
|
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||||
|
|
||||||
|
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||||
|
|
||||||
|
# docker manifest push tdengine/tdengine:latest
|
||||||
|
|
||||||
|
# # how set latest version ???
|
||||||
|
|
|
@ -1,20 +1,24 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
#set -x
|
#set -x
|
||||||
|
|
||||||
# dockerbuild.sh
|
# dockerbuild.sh
|
||||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||||
# -f [pkg file]
|
|
||||||
# -n [version number]
|
# -n [version number]
|
||||||
# -p [password for docker hub]
|
# -p [password for docker hub]
|
||||||
|
# -V [stable | beta]
|
||||||
|
# -f [pkg file]
|
||||||
|
|
||||||
# set parameters by default value
|
# set parameters by default value
|
||||||
cpuType=amd64
|
cpuType=""
|
||||||
verNumber=""
|
version=""
|
||||||
passWord=""
|
passWord=""
|
||||||
pkgFile=""
|
pkgFile=""
|
||||||
|
verType="stable"
|
||||||
|
|
||||||
while getopts "hc:n:p:f:" arg
|
while getopts "hc:n:p:f:V:" arg
|
||||||
do
|
do
|
||||||
case $arg in
|
case $arg in
|
||||||
c)
|
c)
|
||||||
|
@ -22,8 +26,8 @@ do
|
||||||
cpuType=$(echo $OPTARG)
|
cpuType=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
n)
|
n)
|
||||||
#echo "verNumber=$OPTARG"
|
#echo "version=$OPTARG"
|
||||||
verNumber=$(echo $OPTARG)
|
version=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
p)
|
p)
|
||||||
#echo "passWord=$OPTARG"
|
#echo "passWord=$OPTARG"
|
||||||
|
@ -33,11 +37,17 @@ do
|
||||||
#echo "pkgFile=$OPTARG"
|
#echo "pkgFile=$OPTARG"
|
||||||
pkgFile=$(echo $OPTARG)
|
pkgFile=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
|
V)
|
||||||
|
#echo "verType=$OPTARG"
|
||||||
|
verType=$(echo $OPTARG)
|
||||||
|
;;
|
||||||
h)
|
h)
|
||||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||||
echo " -f [pkg file] "
|
|
||||||
echo " -n [version number] "
|
echo " -n [version number] "
|
||||||
echo " -p [password for docker hub] "
|
echo " -p [password for docker hub] "
|
||||||
|
echo " -V [stable | beta] "
|
||||||
|
echo " -f [pkg file] "
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
?) #unknow option
|
?) #unknow option
|
||||||
|
@ -47,17 +57,44 @@ do
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
|
# if [ "$verType" == "beta" ]; then
|
||||||
|
# pkgFile=TDengine-server-${version}-Linux-${cpuType}-${verType}.tar.gz
|
||||||
|
# elif [ "$verType" == "stable" ]; then
|
||||||
|
# pkgFile=TDengine-server-${version}-Linux-${cpuType}.tar.gz
|
||||||
|
# else
|
||||||
|
# echo "unknow verType, nor stabel or beta"
|
||||||
|
# exit 1
|
||||||
|
|
||||||
|
if [ "$verType" == "beta" ]; then
|
||||||
|
dockername=${cpuType}-${verType}
|
||||||
|
elif [ "$verType" == "stable" ]; then
|
||||||
|
dockername=${cpuType}
|
||||||
|
else
|
||||||
|
echo "unknow verType, nor stabel or beta"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
|
||||||
echo "$(pwd)"
|
echo "$(pwd)"
|
||||||
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
||||||
|
|
||||||
dirName=${pkgFile%-Linux*}
|
scriptDir=$(dirname $(readlink -f $0))
|
||||||
#echo "dirName=${dirName}"
|
comunityArchiveDir=/nas/TDengine/v$version/community # community version’package directory
|
||||||
|
cd ${scriptDir}
|
||||||
|
cp -f ${comunityArchiveDir}/${pkgFile} .
|
||||||
|
|
||||||
docker build --rm -f "Dockerfile" -t tdengine/tdengine-${cpuType}:${verNumber} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
|
dirName=${pkgFile%-Linux*}
|
||||||
|
echo "dirName=${dirName}"
|
||||||
|
|
||||||
|
|
||||||
|
docker build --rm -f "Dockerfile" -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
|
||||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||||
docker push tdengine/tdengine-${cpuType}:${verNumber}
|
docker push tdengine/tdengine-${dockername}:${version}
|
||||||
|
|
||||||
# set this version to latest version
|
# set this version to latest version
|
||||||
docker tag tdengine/tdengine-${cpuType}:${verNumber} tdengine/tdengine-${cpuType}:latest
|
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
|
||||||
docker push tdengine/tdengine-${cpuType}:latest
|
docker push tdengine/tdengine-${dockername}:latest
|
||||||
|
|
||||||
|
|
||||||
|
rm -f ${pkgFile}
|
|
@ -73,7 +73,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
rpmname=${rpmname}-${verType}".rpm"
|
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
rpmname=${rpmname}".rpm"
|
rpmname=${rpmname}".rpm"
|
||||||
else
|
else
|
||||||
|
|
|
@ -47,24 +47,28 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
|
||||||
|
|
||||||
cd ${release_dir}
|
cd ${release_dir}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
elif [ "$verMode" == "edge" ]; then
|
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
# if [ "$verMode" == "cluster" ]; then
|
||||||
else
|
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
echo "unknow verMode, nor cluster or edge"
|
# elif [ "$verMode" == "edge" ]; then
|
||||||
exit 1
|
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
fi
|
# else
|
||||||
|
# echo "unknow verMode, nor cluster or edge"
|
||||||
|
# exit 1
|
||||||
|
# fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
pkg_name=${pkg_name}-${verType}
|
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
pkg_name=${pkg_name}
|
pkg_name=${pkg_name}
|
||||||
else
|
else
|
||||||
echo "unknow verType, nor stabel or beta"
|
echo "unknow verType, nor stabel or beta"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||||
exitcode=$?
|
exitcode=$?
|
||||||
if [ "$exitcode" != "0" ]; then
|
if [ "$exitcode" != "0" ]; then
|
||||||
|
|
|
@ -170,12 +170,24 @@ fi
|
||||||
|
|
||||||
cd ${release_dir}
|
cd ${release_dir}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
elif [ "$verMode" == "edge" ]; then
|
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
# if [ "$verMode" == "cluster" ]; then
|
||||||
|
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
|
# elif [ "$verMode" == "edge" ]; then
|
||||||
|
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
|
# else
|
||||||
|
# echo "unknow verMode, nor cluster or edge"
|
||||||
|
# exit 1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
if [ "$verType" == "beta" ]; then
|
||||||
|
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||||
|
elif [ "$verType" == "stable" ]; then
|
||||||
|
pkg_name=${pkg_name}
|
||||||
else
|
else
|
||||||
echo "unknow verMode, nor cluster or edge"
|
echo "unknow verType, nor stabel or beta"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -183,15 +195,6 @@ if [ "$pagMode" == "lite" ]; then
|
||||||
pkg_name=${pkg_name}-Lite
|
pkg_name=${pkg_name}-Lite
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
|
||||||
pkg_name=${pkg_name}-${verType}
|
|
||||||
elif [ "$verType" == "stable" ]; then
|
|
||||||
pkg_name=${pkg_name}
|
|
||||||
else
|
|
||||||
echo "unknow verType, nor stable or beta"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$osType" != "Darwin" ]; then
|
if [ "$osType" != "Darwin" ]; then
|
||||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||||
else
|
else
|
||||||
|
|
|
@ -203,21 +203,20 @@ fi
|
||||||
|
|
||||||
cd ${release_dir}
|
cd ${release_dir}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
elif [ "$verMode" == "edge" ]; then
|
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
|
||||||
else
|
|
||||||
echo "unknow verMode, nor cluster or edge"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$pagMode" == "lite" ]; then
|
# if [ "$verMode" == "cluster" ]; then
|
||||||
pkg_name=${pkg_name}-Lite
|
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
fi
|
# elif [ "$verMode" == "edge" ]; then
|
||||||
|
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
|
# else
|
||||||
|
# echo "unknow verMode, nor cluster or edge"
|
||||||
|
# exit 1
|
||||||
|
# fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
pkg_name=${pkg_name}-${verType}
|
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
pkg_name=${pkg_name}
|
pkg_name=${pkg_name}
|
||||||
else
|
else
|
||||||
|
@ -225,6 +224,10 @@ else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$pagMode" == "lite" ]; then
|
||||||
|
pkg_name=${pkg_name}-Lite
|
||||||
|
fi
|
||||||
|
|
||||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||||
exitcode=$?
|
exitcode=$?
|
||||||
if [ "$exitcode" != "0" ]; then
|
if [ "$exitcode" != "0" ]; then
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
name: tdengine
|
name: tdengine
|
||||||
base: core18
|
base: core18
|
||||||
version: '2.1.3.0'
|
version: '2.1.4.1'
|
||||||
icon: snap/gui/t-dengine.svg
|
icon: snap/gui/t-dengine.svg
|
||||||
summary: an open-source big data platform designed and optimized for IoT.
|
summary: an open-source big data platform designed and optimized for IoT.
|
||||||
description: |
|
description: |
|
||||||
|
@ -72,7 +72,7 @@ parts:
|
||||||
- usr/bin/taosd
|
- usr/bin/taosd
|
||||||
- usr/bin/taos
|
- usr/bin/taos
|
||||||
- usr/bin/taosdemo
|
- usr/bin/taosdemo
|
||||||
- usr/lib/libtaos.so.2.1.3.0
|
- usr/lib/libtaos.so.2.1.4.1
|
||||||
- usr/lib/libtaos.so.1
|
- usr/lib/libtaos.so.1
|
||||||
- usr/lib/libtaos.so
|
- usr/lib/libtaos.so
|
||||||
|
|
||||||
|
|
|
@ -1129,6 +1129,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
|
||||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||||
|
pInsertParam->objectId = pSql->self;
|
||||||
char* str = pInsertParam->sql;
|
char* str = pInsertParam->sql;
|
||||||
|
|
||||||
int32_t totalNum = 0;
|
int32_t totalNum = 0;
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1200,9 +1200,11 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
|
||||||
// wait for the callback function to post the semaphore
|
// wait for the callback function to post the semaphore
|
||||||
tsem_wait(&pStmt->pSql->rspSem);
|
tsem_wait(&pStmt->pSql->rspSem);
|
||||||
|
|
||||||
|
code = pStmt->pSql->res.code;
|
||||||
|
|
||||||
insertBatchClean(pStmt);
|
insertBatchClean(pStmt);
|
||||||
|
|
||||||
return pStmt->pSql->res.code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
||||||
|
@ -1470,6 +1472,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
||||||
pSql->fetchFp = waitForQueryRsp;
|
pSql->fetchFp = waitForQueryRsp;
|
||||||
|
|
||||||
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
||||||
|
pCmd->insertParam.objectId = pSql->self;
|
||||||
|
|
||||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||||
|
|
||||||
|
@ -1646,7 +1649,11 @@ int taos_stmt_close(TAOS_STMT* stmt) {
|
||||||
} else {
|
} else {
|
||||||
if (pStmt->multiTbInsert) {
|
if (pStmt->multiTbInsert) {
|
||||||
taosHashCleanup(pStmt->mtb.pTableHash);
|
taosHashCleanup(pStmt->mtb.pTableHash);
|
||||||
pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, false);
|
bool rmMeta = false;
|
||||||
|
if (pStmt->pSql && pStmt->pSql->res.code != 0) {
|
||||||
|
rmMeta = true;
|
||||||
|
}
|
||||||
|
pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, rmMeta);
|
||||||
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
|
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
|
||||||
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
|
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
|
||||||
taosArrayDestroy(pStmt->mtb.tags);
|
taosArrayDestroy(pStmt->mtb.tags);
|
||||||
|
|
|
@ -3342,6 +3342,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
pnCmd->insertParam.numOfTables = 0;
|
pnCmd->insertParam.numOfTables = 0;
|
||||||
pnCmd->insertParam.pTableNameList = NULL;
|
pnCmd->insertParam.pTableNameList = NULL;
|
||||||
pnCmd->insertParam.pTableBlockHashList = NULL;
|
pnCmd->insertParam.pTableBlockHashList = NULL;
|
||||||
|
pnCmd->insertParam.objectId = pNew->self;
|
||||||
|
|
||||||
memset(&pnCmd->insertParam.tagData, 0, sizeof(STagData));
|
memset(&pnCmd->insertParam.tagData, 0, sizeof(STagData));
|
||||||
|
|
||||||
|
@ -3608,6 +3609,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
|
||||||
pNew->signature = pNew;
|
pNew->signature = pNew;
|
||||||
pNew->sqlstr = strdup(pSql->sqlstr); // todo refactor
|
pNew->sqlstr = strdup(pSql->sqlstr); // todo refactor
|
||||||
pNew->fp = tscSubqueryCompleteCallback;
|
pNew->fp = tscSubqueryCompleteCallback;
|
||||||
|
tsem_init(&pNew->rspSem, 0, 0);
|
||||||
|
|
||||||
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
|
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
|
||||||
ps->pParentSql = pSql;
|
ps->pParentSql = pSql;
|
||||||
|
|
|
@ -8,8 +8,8 @@ IF (TD_MVN_INSTALLED)
|
||||||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||||
POST_BUILD
|
POST_BUILD
|
||||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.32-dist.jar ${LIBRARY_OUTPUT_PATH}
|
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMENT "build jdbc driver")
|
COMMENT "build jdbc driver")
|
||||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.32</version>
|
<version>2.0.33</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||||
|
@ -40,7 +40,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.13</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- for restful -->
|
<!-- for restful -->
|
||||||
|
@ -57,7 +57,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
<version>29.0-jre</version>
|
<version>30.0-jre</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,7 @@ public class Utils {
|
||||||
return rawSql;
|
return rawSql;
|
||||||
// toLowerCase
|
// toLowerCase
|
||||||
String preparedSql = rawSql.trim().toLowerCase();
|
String preparedSql = rawSql.trim().toLowerCase();
|
||||||
String[] clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)", "where\\s*.*"};
|
String[] clause = new String[]{"values\\s*\\([\\s\\S]*?\\)", "tags\\s*\\([\\s\\S]*?\\)", "where[\\s\\S]*"};
|
||||||
Map<Integer, Integer> placeholderPositions = new HashMap<>();
|
Map<Integer, Integer> placeholderPositions = new HashMap<>();
|
||||||
RangeSet<Integer> clauseRangeSet = TreeRangeSet.create();
|
RangeSet<Integer> clauseRangeSet = TreeRangeSet.create();
|
||||||
findPlaceholderPosition(preparedSql, placeholderPositions);
|
findPlaceholderPosition(preparedSql, placeholderPositions);
|
||||||
|
|
|
@ -169,6 +169,8 @@ DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr);
|
||||||
|
|
||||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
|
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
|
||||||
|
|
||||||
|
DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -124,6 +124,9 @@ typedef struct {
|
||||||
|
|
||||||
extern char version[];
|
extern char version[];
|
||||||
|
|
||||||
|
#define DB_PRECISION_LEN 8
|
||||||
|
#define DB_STATUS_LEN 16
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_DB_NAME_LEN];
|
char name[TSDB_DB_NAME_LEN];
|
||||||
char create_time[32];
|
char create_time[32];
|
||||||
|
@ -144,9 +147,9 @@ typedef struct {
|
||||||
int32_t fsync;
|
int32_t fsync;
|
||||||
int8_t comp;
|
int8_t comp;
|
||||||
int8_t cachelast;
|
int8_t cachelast;
|
||||||
char precision[8]; // time resolution
|
char precision[DB_PRECISION_LEN]; // time resolution
|
||||||
int8_t update;
|
int8_t update;
|
||||||
char status[16];
|
char status[DB_STATUS_LEN];
|
||||||
} SDbInfo;
|
} SDbInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -542,7 +545,8 @@ static void parse_precision_first(
|
||||||
free(tmp);
|
free(tmp);
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
strncpy(g_args.precision, tmp, strlen(tmp));
|
strncpy(g_args.precision, tmp,
|
||||||
|
min(DB_PRECISION_LEN - 1, strlen(tmp)));
|
||||||
free(tmp);
|
free(tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1596,6 +1600,7 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
|
||||||
NULL, g_args.port);
|
NULL, g_args.port);
|
||||||
if (pThread->taosCon == NULL) {
|
if (pThread->taosCon == NULL) {
|
||||||
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
|
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
|
||||||
|
free(threadObj);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pthread_attr_init(&thattr);
|
pthread_attr_init(&thattr);
|
||||||
|
@ -2607,6 +2612,7 @@ static void taosStartDumpInWorkThreads()
|
||||||
NULL, g_args.port);
|
NULL, g_args.port);
|
||||||
if (pThread->taosCon == NULL) {
|
if (pThread->taosCon == NULL) {
|
||||||
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
|
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
|
||||||
|
free(threadObj);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pthread_attr_init(&thattr);
|
pthread_attr_init(&thattr);
|
||||||
|
|
|
@ -24,7 +24,18 @@
|
||||||
memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \
|
memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define SET_RES_EXT_WINDOW_KEY(_k, _ori, _len, _uid, _buf) \
|
||||||
|
do { \
|
||||||
|
assert(sizeof(_uid) == sizeof(uint64_t)); \
|
||||||
|
*(void **)(_k) = (_buf); \
|
||||||
|
*(uint64_t *)((_k) + POINTER_BYTES) = (_uid); \
|
||||||
|
memcpy((_k) + POINTER_BYTES + sizeof(uint64_t), (_ori), (_len)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
|
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
|
||||||
|
#define GET_RES_EXT_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t) + POINTER_BYTES)
|
||||||
|
|
||||||
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
|
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
|
||||||
|
|
||||||
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
||||||
|
|
|
@ -1759,6 +1759,49 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6
|
||||||
memcpy((dst)->pTags, (src)->pTags, (size_t)(__l)); \
|
memcpy((dst)->pTags, (src)->pTags, (size_t)(__l)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
static int32_t topBotComparFn(const void *p1, const void *p2, const void *param)
|
||||||
|
{
|
||||||
|
uint16_t type = *(uint16_t *) param;
|
||||||
|
tValuePair *val1 = *(tValuePair **) p1;
|
||||||
|
tValuePair *val2 = *(tValuePair **) p2;
|
||||||
|
|
||||||
|
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
||||||
|
if (val1->v.i64 == val2->v.i64) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (val1->v.i64 > val2->v.i64) ? 1 : -1;
|
||||||
|
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
||||||
|
if (val1->v.u64 == val2->v.u64) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (val1->v.u64 > val2->v.u64) ? 1 : -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (val1->v.dKey == val2->v.dKey) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (val1->v.dKey > val2->v.dKey) ? 1 : -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void topBotSwapFn(void *dst, void *src, const void *param)
|
||||||
|
{
|
||||||
|
char tag[32768];
|
||||||
|
tValuePair temp;
|
||||||
|
uint16_t tagLen = *(uint16_t *) param;
|
||||||
|
tValuePair *vdst = *(tValuePair **) dst;
|
||||||
|
tValuePair *vsrc = *(tValuePair **) src;
|
||||||
|
|
||||||
|
memset(tag, 0, sizeof(tag));
|
||||||
|
temp.pTags = tag;
|
||||||
|
|
||||||
|
VALUEPAIRASSIGN(&temp, vdst, tagLen);
|
||||||
|
VALUEPAIRASSIGN(vdst, vsrc, tagLen);
|
||||||
|
VALUEPAIRASSIGN(vsrc, &temp, tagLen);
|
||||||
|
}
|
||||||
|
|
||||||
static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type,
|
static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type,
|
||||||
SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) {
|
SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) {
|
||||||
tVariant val = {0};
|
tVariant val = {0};
|
||||||
|
@ -1766,61 +1809,19 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData,
|
||||||
|
|
||||||
tValuePair **pList = pInfo->res;
|
tValuePair **pList = pInfo->res;
|
||||||
assert(pList != NULL);
|
assert(pList != NULL);
|
||||||
|
|
||||||
if (pInfo->num < maxLen) {
|
if (pInfo->num < maxLen) {
|
||||||
if (pInfo->num == 0 ||
|
valuePairAssign(pList[pInfo->num], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
|
||||||
(IS_SIGNED_NUMERIC_TYPE(type) && val.i64 >= pList[pInfo->num - 1]->v.i64) ||
|
|
||||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 >= pList[pInfo->num - 1]->v.u64) ||
|
taosheapsort((void *) pList, sizeof(tValuePair **), pInfo->num + 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 0);
|
||||||
(IS_FLOAT_TYPE(type) && val.dKey >= pList[pInfo->num - 1]->v.dKey)) {
|
|
||||||
valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage);
|
|
||||||
} else {
|
|
||||||
int32_t i = pInfo->num - 1;
|
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
|
||||||
while (i >= 0 && pList[i]->v.i64 > val.i64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
|
|
||||||
i -= 1;
|
|
||||||
}
|
|
||||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
|
||||||
while (i >= 0 && pList[i]->v.u64 > val.u64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
|
|
||||||
i -= 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (i >= 0 && pList[i]->v.dKey > val.dKey) {
|
|
||||||
VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
|
|
||||||
i -= 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
valuePairAssign(pList[i + 1], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage);
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->num++;
|
pInfo->num++;
|
||||||
} else {
|
} else {
|
||||||
int32_t i = 0;
|
|
||||||
|
|
||||||
if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 > pList[0]->v.i64) ||
|
if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 > pList[0]->v.i64) ||
|
||||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 > pList[0]->v.u64) ||
|
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 > pList[0]->v.u64) ||
|
||||||
(IS_FLOAT_TYPE(type) && val.dKey > pList[0]->v.dKey)) {
|
(IS_FLOAT_TYPE(type) && val.dKey > pList[0]->v.dKey)) {
|
||||||
// find the appropriate the slot position
|
valuePairAssign(pList[0], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
taosheapadjust((void *) pList, sizeof(tValuePair **), 0, maxLen - 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 0);
|
||||||
while (i + 1 < maxLen && pList[i + 1]->v.i64 < val.i64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
} if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
|
||||||
while (i + 1 < maxLen && pList[i + 1]->v.u64 < val.u64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (i + 1 < maxLen && pList[i + 1]->v.dKey < val.dKey) {
|
|
||||||
VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
valuePairAssign(pList[i], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1834,57 +1835,17 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa
|
||||||
assert(pList != NULL);
|
assert(pList != NULL);
|
||||||
|
|
||||||
if (pInfo->num < maxLen) {
|
if (pInfo->num < maxLen) {
|
||||||
if (pInfo->num == 0) {
|
valuePairAssign(pList[pInfo->num], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
|
||||||
valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage);
|
|
||||||
} else {
|
taosheapsort((void *) pList, sizeof(tValuePair **), pInfo->num + 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 1);
|
||||||
int32_t i = pInfo->num - 1;
|
|
||||||
|
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
|
||||||
while (i >= 0 && pList[i]->v.i64 < val.i64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
|
|
||||||
i -= 1;
|
|
||||||
}
|
|
||||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
|
||||||
while (i >= 0 && pList[i]->v.u64 < val.u64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
|
|
||||||
i -= 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (i >= 0 && pList[i]->v.dKey < val.dKey) {
|
|
||||||
VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
|
|
||||||
i -= 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
valuePairAssign(pList[i + 1], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage);
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->num++;
|
pInfo->num++;
|
||||||
} else {
|
} else {
|
||||||
int32_t i = 0;
|
|
||||||
|
|
||||||
if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 < pList[0]->v.i64) ||
|
if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 < pList[0]->v.i64) ||
|
||||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 < pList[0]->v.u64) ||
|
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 < pList[0]->v.u64) ||
|
||||||
(IS_FLOAT_TYPE(type) && val.dKey < pList[0]->v.dKey)) {
|
(IS_FLOAT_TYPE(type) && val.dKey < pList[0]->v.dKey)) {
|
||||||
// find the appropriate the slot position
|
valuePairAssign(pList[0], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
taosheapadjust((void *) pList, sizeof(tValuePair **), 0, maxLen - 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 1);
|
||||||
while (i + 1 < maxLen && pList[i + 1]->v.i64 > val.i64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
} if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
|
||||||
while (i + 1 < maxLen && pList[i + 1]->v.u64 > val.u64) {
|
|
||||||
VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (i + 1 < maxLen && pList[i + 1]->v.dKey > val.dKey) {
|
|
||||||
VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
valuePairAssign(pList[i], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -433,8 +433,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
|
||||||
existed = (pResultRowInfo->pResult[0] == (*p1));
|
existed = (pResultRowInfo->pResult[0] == (*p1));
|
||||||
pResultRowInfo->curPos = 0;
|
pResultRowInfo->curPos = 0;
|
||||||
} else { // check if current pResultRowInfo contains the existed pResultRow
|
} else { // check if current pResultRowInfo contains the existed pResultRow
|
||||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
|
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
|
||||||
int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
|
int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
|
||||||
if (index != NULL) {
|
if (index != NULL) {
|
||||||
pResultRowInfo->curPos = (int32_t) *index;
|
pResultRowInfo->curPos = (int32_t) *index;
|
||||||
existed = true;
|
existed = true;
|
||||||
|
@ -471,8 +471,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
|
||||||
pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
|
pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
|
||||||
|
|
||||||
int64_t index = pResultRowInfo->curPos;
|
int64_t index = pResultRowInfo->curPos;
|
||||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
|
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
|
||||||
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
|
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
// too many time window in query
|
// too many time window in query
|
||||||
|
@ -1790,7 +1790,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
||||||
|
|
||||||
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||||
pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||||
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
|
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES);
|
||||||
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
|
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
|
||||||
|
|
||||||
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
|
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
|
||||||
|
|
|
@ -34,6 +34,7 @@ typedef int (*__compar_fn_t) (const void *, const void *);
|
||||||
#define elePtrAt(base, size, idx) (void *)((char *)(base) + (size) * (idx))
|
#define elePtrAt(base, size, idx) (void *)((char *)(base) + (size) * (idx))
|
||||||
|
|
||||||
typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void *param);
|
typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void *param);
|
||||||
|
typedef void (*__ext_swap_fn_t)(void *p1, void *p2, const void *param);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* quick sort, with the compare function requiring additional parameters support
|
* quick sort, with the compare function requiring additional parameters support
|
||||||
|
@ -59,6 +60,38 @@ void taosqsort(void *src, size_t numOfElem, size_t size, const void* param, __ex
|
||||||
*/
|
*/
|
||||||
void *taosbsearch(const void *key, const void *base, size_t nmemb, size_t size, __compar_fn_t fn, int flags);
|
void *taosbsearch(const void *key, const void *base, size_t nmemb, size_t size, __compar_fn_t fn, int flags);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adjust heap
|
||||||
|
*
|
||||||
|
* @param base: the start address of array
|
||||||
|
* @param size: size of every item in array
|
||||||
|
* @param start: the first index
|
||||||
|
* @param end: the last index
|
||||||
|
* @param parcompar: parameters for compare function
|
||||||
|
* @param compar: user defined compare function
|
||||||
|
* @param parswap: parameters for swap function
|
||||||
|
* @param swap: user defined swap function, the default swap function doswap will be used if swap is NULL
|
||||||
|
* @param maxroot: if heap is max root heap
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* sort heap to make sure it is a max/min root heap
|
||||||
|
*
|
||||||
|
* @param base: the start address of array
|
||||||
|
* @param size: size of every item in array
|
||||||
|
* @param len: the length of array
|
||||||
|
* @param parcompar: parameters for compare function
|
||||||
|
* @param compar: user defined compare function
|
||||||
|
* @param parswap: parameters for swap function
|
||||||
|
* @param swap: user defined swap function, the default swap function doswap will be used if swap is NULL
|
||||||
|
* @param maxroot: if heap is max root heap
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot);
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -225,3 +225,89 @@ void * taosbsearch(const void *key, const void *base, size_t nmemb, size_t size,
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot)
|
||||||
|
{
|
||||||
|
int32_t parent;
|
||||||
|
int32_t child;
|
||||||
|
char *buf;
|
||||||
|
|
||||||
|
if (base && size > 0 && compar) {
|
||||||
|
parent = start;
|
||||||
|
child = 2 * parent + 1;
|
||||||
|
|
||||||
|
if (swap == NULL) {
|
||||||
|
buf = calloc(1, size);
|
||||||
|
if (buf == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (maxroot) {
|
||||||
|
while (child <= end) {
|
||||||
|
if (child + 1 <= end && (*compar)(elePtrAt(base, size, child), elePtrAt(base, size, child + 1), parcompar) < 0) {
|
||||||
|
child++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((*compar)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parcompar) > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (swap == NULL) {
|
||||||
|
doswap(elePtrAt(base, size, parent), elePtrAt(base, size, child), size, buf);
|
||||||
|
} else {
|
||||||
|
(*swap)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parswap);
|
||||||
|
}
|
||||||
|
|
||||||
|
parent = child;
|
||||||
|
child = 2 * parent + 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
while (child <= end) {
|
||||||
|
if (child + 1 <= end && (*compar)(elePtrAt(base, size, child), elePtrAt(base, size, child + 1), parcompar) > 0) {
|
||||||
|
child++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((*compar)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parcompar) < 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (swap == NULL) {
|
||||||
|
doswap(elePtrAt(base, size, parent), elePtrAt(base, size, child), size, buf);
|
||||||
|
} else {
|
||||||
|
(*swap)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parswap);
|
||||||
|
}
|
||||||
|
|
||||||
|
parent = child;
|
||||||
|
child = 2 * parent + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (swap == NULL) {
|
||||||
|
tfree(buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot)
|
||||||
|
{
|
||||||
|
int32_t i;
|
||||||
|
|
||||||
|
if (base && size > 0) {
|
||||||
|
for (i = len / 2 - 1; i >= 0; i--) {
|
||||||
|
taosheapadjust(base, size, i, len - 1, parcompar, compar, parswap, swap, maxroot);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
char *buf = calloc(1, size);
|
||||||
|
|
||||||
|
for (i = len - 1; i > 0; i--) {
|
||||||
|
doswap(elePtrAt(base, size, 0), elePtrAt(base, size, i));
|
||||||
|
taosheapadjust(base, size, 0, i - 1, parcompar, compar, parswap, swap, maxroot);
|
||||||
|
}
|
||||||
|
|
||||||
|
tfree(buf);
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,9 +47,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
char rootDir[TSDB_FILENAME_LEN] = {0};
|
|
||||||
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
|
||||||
|
|
||||||
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
|
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
|
||||||
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId);
|
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId);
|
||||||
if (tfsMkdir(vnodeDir) < 0) {
|
if (tfsMkdir(vnodeDir) < 0) {
|
||||||
|
@ -63,23 +60,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// STsdbCfg tsdbCfg = {0};
|
|
||||||
// tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
|
|
||||||
// tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
|
|
||||||
// tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
|
|
||||||
// tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
|
|
||||||
// tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
|
|
||||||
// tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1;
|
|
||||||
// tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2;
|
|
||||||
// tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
|
|
||||||
// tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
|
|
||||||
// tsdbCfg.precision = pVnodeCfg->cfg.precision;
|
|
||||||
// tsdbCfg.compression = pVnodeCfg->cfg.compression;
|
|
||||||
// tsdbCfg.update = pVnodeCfg->cfg.update;
|
|
||||||
// tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow;
|
|
||||||
|
|
||||||
// char tsdbDir[TSDB_FILENAME_LEN] = {0};
|
|
||||||
// sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId);
|
|
||||||
if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) {
|
if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) {
|
||||||
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
||||||
return TSDB_CODE_VND_INIT_FAILED;
|
return TSDB_CODE_VND_INIT_FAILED;
|
||||||
|
|
|
@ -75,7 +75,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.11</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
|
@ -87,14 +87,14 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.11</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
<version>29.0-jre</version>
|
<version>30.0-jre</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.13</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,11 @@
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<properties>
|
||||||
|
<maven.compiler.source>1.8</maven.compiler.source>
|
||||||
|
<maven.compiler.target>1.8</maven.compiler.target>
|
||||||
|
</properties>
|
||||||
|
|
||||||
<groupId>com.taosdata.demo</groupId>
|
<groupId>com.taosdata.demo</groupId>
|
||||||
<artifactId>connectionPools</artifactId>
|
<artifactId>connectionPools</artifactId>
|
||||||
<version>1.0-SNAPSHOT</version>
|
<version>1.0-SNAPSHOT</version>
|
||||||
|
@ -46,9 +51,9 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- log4j -->
|
<!-- log4j -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>log4j</groupId>
|
<groupId>org.apache.logging.log4j</groupId>
|
||||||
<artifactId>log4j</artifactId>
|
<artifactId>log4j-core</artifactId>
|
||||||
<version>1.2.17</version>
|
<version>2.14.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- proxool -->
|
<!-- proxool -->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -108,4 +113,4 @@
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
|
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,8 @@ import com.taosdata.example.pool.C3p0Builder;
|
||||||
import com.taosdata.example.pool.DbcpBuilder;
|
import com.taosdata.example.pool.DbcpBuilder;
|
||||||
import com.taosdata.example.pool.DruidPoolBuilder;
|
import com.taosdata.example.pool.DruidPoolBuilder;
|
||||||
import com.taosdata.example.pool.HikariCpBuilder;
|
import com.taosdata.example.pool.HikariCpBuilder;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
import java.sql.Connection;
|
import java.sql.Connection;
|
||||||
|
@ -17,7 +18,7 @@ import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public class ConnectionPoolDemo {
|
public class ConnectionPoolDemo {
|
||||||
|
|
||||||
private static Logger logger = Logger.getLogger(DruidPoolBuilder.class);
|
private static Logger logger = LogManager.getLogger(DruidPoolBuilder.class);
|
||||||
private static final String dbName = "pool_test";
|
private static final String dbName = "pool_test";
|
||||||
|
|
||||||
private static String poolType = "hikari";
|
private static String poolType = "hikari";
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package com.taosdata.example.common;
|
package com.taosdata.example.common;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
import java.sql.Connection;
|
import java.sql.Connection;
|
||||||
|
@ -10,7 +11,7 @@ import java.util.Random;
|
||||||
|
|
||||||
public class InsertTask implements Runnable {
|
public class InsertTask implements Runnable {
|
||||||
private final Random random = new Random(System.currentTimeMillis());
|
private final Random random = new Random(System.currentTimeMillis());
|
||||||
private static final Logger logger = Logger.getLogger(InsertTask.class);
|
private static final Logger logger = LogManager.getLogger(InsertTask.class);
|
||||||
|
|
||||||
private final DataSource ds;
|
private final DataSource ds;
|
||||||
private final String dbName;
|
private final String dbName;
|
||||||
|
|
|
@ -68,7 +68,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.12</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>com.taosdata</groupId>
|
<groupId>com.taosdata</groupId>
|
||||||
<artifactId>taosdemo</artifactId>
|
<artifactId>taosdemo</artifactId>
|
||||||
<version>2.0</version>
|
<version>2.0.1</version>
|
||||||
<name>taosdemo</name>
|
<name>taosdemo</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<description>Demo project for TDengine</description>
|
<description>Demo project for TDengine</description>
|
||||||
|
@ -81,20 +81,20 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>mysql</groupId>
|
<groupId>mysql</groupId>
|
||||||
<artifactId>mysql-connector-java</artifactId>
|
<artifactId>mysql-connector-java</artifactId>
|
||||||
<version>5.1.47</version>
|
<version>8.0.16</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- log4j -->
|
<!-- log4j -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>log4j</groupId>
|
<groupId>org.apache.logging.log4j</groupId>
|
||||||
<artifactId>log4j</artifactId>
|
<artifactId>log4j-core</artifactId>
|
||||||
<version>1.2.17</version>
|
<version>2.14.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- junit -->
|
<!-- junit -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.12</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- lombok -->
|
<!-- lombok -->
|
||||||
|
|
|
@ -8,7 +8,8 @@ import com.taosdata.taosdemo.service.SqlExecuteTask;
|
||||||
import com.taosdata.taosdemo.service.SubTableService;
|
import com.taosdata.taosdemo.service.SubTableService;
|
||||||
import com.taosdata.taosdemo.service.SuperTableService;
|
import com.taosdata.taosdemo.service.SuperTableService;
|
||||||
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
|
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -20,7 +21,7 @@ import java.util.Map;
|
||||||
|
|
||||||
public class TaosDemoApplication {
|
public class TaosDemoApplication {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(TaosDemoApplication.class);
|
private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
|
||||||
|
|
||||||
public static void main(String[] args) throws IOException {
|
public static void main(String[] args) throws IOException {
|
||||||
// 读配置参数
|
// 读配置参数
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
package com.taosdata.taosdemo.dao;
|
package com.taosdata.taosdemo.dao;
|
||||||
|
|
||||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.springframework.jdbc.core.JdbcTemplate;
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public class DatabaseMapperImpl implements DatabaseMapper {
|
public class DatabaseMapperImpl implements DatabaseMapper {
|
||||||
private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class);
|
private static final Logger logger = LogManager.getLogger(DatabaseMapperImpl.class);
|
||||||
|
|
||||||
private final JdbcTemplate jdbcTemplate;
|
private final JdbcTemplate jdbcTemplate;
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,8 @@ package com.taosdata.taosdemo.dao;
|
||||||
import com.taosdata.taosdemo.domain.SubTableMeta;
|
import com.taosdata.taosdemo.domain.SubTableMeta;
|
||||||
import com.taosdata.taosdemo.domain.SubTableValue;
|
import com.taosdata.taosdemo.domain.SubTableValue;
|
||||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.springframework.jdbc.core.JdbcTemplate;
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
|
@ -11,7 +12,7 @@ import java.util.List;
|
||||||
|
|
||||||
public class SubTableMapperImpl implements SubTableMapper {
|
public class SubTableMapperImpl implements SubTableMapper {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class);
|
private static final Logger logger = LogManager.getLogger(SubTableMapperImpl.class);
|
||||||
private final JdbcTemplate jdbcTemplate;
|
private final JdbcTemplate jdbcTemplate;
|
||||||
|
|
||||||
public SubTableMapperImpl(DataSource dataSource) {
|
public SubTableMapperImpl(DataSource dataSource) {
|
||||||
|
|
|
@ -2,13 +2,14 @@ package com.taosdata.taosdemo.dao;
|
||||||
|
|
||||||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.springframework.jdbc.core.JdbcTemplate;
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
|
|
||||||
public class SuperTableMapperImpl implements SuperTableMapper {
|
public class SuperTableMapperImpl implements SuperTableMapper {
|
||||||
private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class);
|
private static final Logger logger = LogManager.getLogger(SuperTableMapperImpl.class);
|
||||||
private JdbcTemplate jdbcTemplate;
|
private JdbcTemplate jdbcTemplate;
|
||||||
|
|
||||||
public SuperTableMapperImpl(DataSource dataSource) {
|
public SuperTableMapperImpl(DataSource dataSource) {
|
||||||
|
|
|
@ -3,13 +3,14 @@ package com.taosdata.taosdemo.dao;
|
||||||
import com.taosdata.taosdemo.domain.TableMeta;
|
import com.taosdata.taosdemo.domain.TableMeta;
|
||||||
import com.taosdata.taosdemo.domain.TableValue;
|
import com.taosdata.taosdemo.domain.TableValue;
|
||||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.springframework.jdbc.core.JdbcTemplate;
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
public class TableMapperImpl implements TableMapper {
|
public class TableMapperImpl implements TableMapper {
|
||||||
private static final Logger logger = Logger.getLogger(TableMapperImpl.class);
|
private static final Logger logger = LogManager.getLogger(TableMapperImpl.class);
|
||||||
private JdbcTemplate template;
|
private JdbcTemplate template;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -8,7 +8,8 @@ import com.taosdata.taosdemo.domain.SubTableValue;
|
||||||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||||
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
|
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
|
||||||
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
|
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
import javax.sql.DataSource;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -20,7 +21,7 @@ import java.util.stream.IntStream;
|
||||||
public class SubTableService extends AbstractService {
|
public class SubTableService extends AbstractService {
|
||||||
|
|
||||||
private SubTableMapper mapper;
|
private SubTableMapper mapper;
|
||||||
private static final Logger logger = Logger.getLogger(SubTableService.class);
|
private static final Logger logger = LogManager.getLogger(SubTableService.class);
|
||||||
|
|
||||||
public SubTableService(DataSource datasource) {
|
public SubTableService(DataSource datasource) {
|
||||||
this.mapper = new SubTableMapperImpl(datasource);
|
this.mapper = new SubTableMapperImpl(datasource);
|
||||||
|
|
|
@ -12,7 +12,7 @@ static void prepare_data(TAOS* taos) {
|
||||||
result = taos_query(taos, "drop database if exists test;");
|
result = taos_query(taos, "drop database if exists test;");
|
||||||
taos_free_result(result);
|
taos_free_result(result);
|
||||||
usleep(100000);
|
usleep(100000);
|
||||||
result = taos_query(taos, "create database test;");
|
result = taos_query(taos, "create database test precision 'us';");
|
||||||
taos_free_result(result);
|
taos_free_result(result);
|
||||||
usleep(100000);
|
usleep(100000);
|
||||||
taos_select_db(taos, "test");
|
taos_select_db(taos, "test");
|
||||||
|
@ -949,13 +949,45 @@ void verify_stream(TAOS* taos) {
|
||||||
taos_close_stream(strm);
|
taos_close_stream(strm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t verify_schema_less(TAOS* taos) {
|
||||||
|
TAOS_RES *result;
|
||||||
|
result = taos_query(taos, "drop database if exists test;");
|
||||||
|
taos_free_result(result);
|
||||||
|
usleep(100000);
|
||||||
|
result = taos_query(taos, "create database test precision 'us';");
|
||||||
|
taos_free_result(result);
|
||||||
|
usleep(100000);
|
||||||
|
|
||||||
|
taos_select_db(taos, "test");
|
||||||
|
result = taos_query(taos, "create stable ste(ts timestamp, f int) tags(t1 bigint)");
|
||||||
|
taos_free_result(result);
|
||||||
|
usleep(100000);
|
||||||
|
|
||||||
|
char* lines[] = {
|
||||||
|
"st,t1=3i,t2=4,t3=\"t3\" c1=3i,c3=L\"passit\",c2=false,c4=4 1626006833639000000",
|
||||||
|
"st,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin\",c2=true,c4=5,c5=5 1626006833640000000",
|
||||||
|
"ste,t2=5,t3=L\"ste\" c1=true,c2=4,c3=\"iam\" 1626056811823316532",
|
||||||
|
"st,t1=4i,t2=5,t3=\"t4\" c1=3i,c3=L\"passitagain\",c2=true,c4=5 1626006833642000000",
|
||||||
|
"ste,t2=5,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532",
|
||||||
|
"ste,t2=5,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32b,c6=64s,c7=32w,c8=88.88f 1626056812843316532",
|
||||||
|
"st,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin\",c2=true,c4=5,c5=5,c6=7u 1626006933640000000",
|
||||||
|
"stf,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin\",c2=true,c4=5,c5=5,c6=7u 1626006933640000000",
|
||||||
|
"stf,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin_stf\",c2=false,c5=5,c6=7u 1626006933641a"
|
||||||
|
};
|
||||||
|
|
||||||
|
// int code = taos_insert_lines(taos, lines , sizeof(lines)/sizeof(char*));
|
||||||
|
int code = taos_insert_lines(taos, &lines[0], 1);
|
||||||
|
code = taos_insert_lines(taos, &lines[1], 1);
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
const char* host = "127.0.0.1";
|
const char* host = "127.0.0.1";
|
||||||
const char* user = "root";
|
const char* user = "root";
|
||||||
const char* passwd = "taosdata";
|
const char* passwd = "taosdata";
|
||||||
|
|
||||||
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
|
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
|
||||||
|
|
||||||
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos));
|
printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos));
|
||||||
|
@ -967,6 +999,12 @@ int main(int argc, char *argv[]) {
|
||||||
info = taos_get_client_info(taos);
|
info = taos_get_client_info(taos);
|
||||||
printf("client info: %s\n", info);
|
printf("client info: %s\n", info);
|
||||||
|
|
||||||
|
printf("************ verify shemaless *************\n");
|
||||||
|
int code = verify_schema_less(taos);
|
||||||
|
if (code == 0) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
printf("************ verify query *************\n");
|
printf("************ verify query *************\n");
|
||||||
verify_query(taos);
|
verify_query(taos);
|
||||||
|
|
||||||
|
|
|
@ -236,9 +236,10 @@ python3 ./test.py -f query/queryTscomputWithNow.py
|
||||||
python3 ./test.py -f query/computeErrorinWhere.py
|
python3 ./test.py -f query/computeErrorinWhere.py
|
||||||
python3 ./test.py -f query/queryTsisNull.py
|
python3 ./test.py -f query/queryTsisNull.py
|
||||||
python3 ./test.py -f query/subqueryFilter.py
|
python3 ./test.py -f query/subqueryFilter.py
|
||||||
# python3 ./test.py -f query/nestedQuery/queryInterval.py
|
python3 ./test.py -f query/nestedQuery/queryInterval.py
|
||||||
python3 ./test.py -f query/queryStateWindow.py
|
python3 ./test.py -f query/queryStateWindow.py
|
||||||
python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
|
python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
|
||||||
|
python3 ./test.py -f query/nestquery_last_row.py
|
||||||
|
|
||||||
|
|
||||||
#stream
|
#stream
|
||||||
|
|
|
@ -27,6 +27,7 @@ class TDTestCase:
|
||||||
self.rowNum = 100
|
self.rowNum = 100
|
||||||
self.ts = 1537146000000
|
self.ts = 1537146000000
|
||||||
self.ts1 = 1537146000000000
|
self.ts1 = 1537146000000000
|
||||||
|
self.ts2 = 1597146000000
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
@ -35,6 +36,8 @@ class TDTestCase:
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing', 10)")
|
tdSql.execute("create table test1 using test tags('beijing', 10)")
|
||||||
|
tdSql.execute("create table test2 using test tags('tianjing', 20)")
|
||||||
|
tdSql.execute("create table test3 using test tags('shanghai', 20)")
|
||||||
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
|
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
|
||||||
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
|
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
|
||||||
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
|
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
|
||||||
|
@ -48,6 +51,10 @@ class TDTestCase:
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
|
% (self.ts2 + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
|
% (self.ts2 + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
tdSql.execute("insert into gtest1 values(1537146000000,0);")
|
tdSql.execute("insert into gtest1 values(1537146000000,0);")
|
||||||
tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
|
tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
|
||||||
|
@ -69,7 +76,7 @@ class TDTestCase:
|
||||||
tdSql.execute("insert into gtest8 values(1537146000002,4);")
|
tdSql.execute("insert into gtest8 values(1537146000002,4);")
|
||||||
tdSql.execute("insert into gtest8 values(1537146002202,4);")
|
tdSql.execute("insert into gtest8 values(1537146002202,4);")
|
||||||
|
|
||||||
# irate verifacation
|
# irate verifacation --child table'query
|
||||||
tdSql.query("select irate(col1) from test1;")
|
tdSql.query("select irate(col1) from test1;")
|
||||||
tdSql.checkData(0, 0, 1)
|
tdSql.checkData(0, 0, 1)
|
||||||
tdSql.query("select irate(col1) from test1 interval(10s);")
|
tdSql.query("select irate(col1) from test1 interval(10s);")
|
||||||
|
@ -99,6 +106,32 @@ class TDTestCase:
|
||||||
tdSql.query("select irate(col2) from test1;")
|
tdSql.query("select irate(col2) from test1;")
|
||||||
tdSql.checkData(0, 0, 1)
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
# irate verifacation --super table'query
|
||||||
|
tdSql.query("select irate(col1) from test group by tbname,loc,tag1;")
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
tdSql.checkData(1, 1, "test2")
|
||||||
|
tdSql.checkData(2, 2, "shanghai")
|
||||||
|
|
||||||
|
# add function testcase of twa: query from super table
|
||||||
|
tdSql.query("select twa(col1) from test group by tbname,loc,tag1;")
|
||||||
|
tdSql.checkData(0, 0, 50.5)
|
||||||
|
tdSql.checkData(1, 1, "test2")
|
||||||
|
tdSql.checkData(2, 2, "shanghai")
|
||||||
|
|
||||||
|
# error: function of irate and twa has invalid operation
|
||||||
|
tdSql.error("select irate(col7) from test group by tbname,loc,tag1;")
|
||||||
|
tdSql.error("select irate(col7) from test group by tbname;")
|
||||||
|
tdSql.error("select irate(col1) from test group by loc,tbname,tag1;")
|
||||||
|
# tdSql.error("select irate(col1) from test group by tbname,col7;")
|
||||||
|
tdSql.error("select irate(col1) from test group by col7,tbname;")
|
||||||
|
tdSql.error("select twa(col7) from test group by tbname,loc,tag1;")
|
||||||
|
tdSql.error("select twa(col7) from test group by tbname;")
|
||||||
|
tdSql.error("select twa(col1) from test group by loc,tbname,tag1;")
|
||||||
|
# tdSql.error("select twa(col1) from test group by tbname,col7;")
|
||||||
|
tdSql.error("select twa(col1) from test group by col7,tbname;")
|
||||||
|
|
||||||
|
|
||||||
|
# general table'query
|
||||||
tdSql.query("select irate(col1) from gtest1;")
|
tdSql.query("select irate(col1) from gtest1;")
|
||||||
tdSql.checkData(0, 0, 1.2/1.1)
|
tdSql.checkData(0, 0, 1.2/1.1)
|
||||||
tdSql.query("select irate(col1) from gtest2;")
|
tdSql.query("select irate(col1) from gtest2;")
|
||||||
|
|
|
@ -0,0 +1,263 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import tdLog
|
||||||
|
from util.cases import tdCases
|
||||||
|
from util.sql import tdSql
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
self.ts = 1600000000000
|
||||||
|
self.num = 10
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
# test case for https://jira.taosdata.com:18080/browse/TD-4735
|
||||||
|
|
||||||
|
tdSql.execute('''create stable stable_1
|
||||||
|
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
|
||||||
|
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
|
||||||
|
q_float float , q_double double , q_ts timestamp)
|
||||||
|
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
|
||||||
|
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
|
||||||
|
t_float float , t_double double , t_ts timestamp);''')
|
||||||
|
tdSql.execute('''create table table_0 using stable_1
|
||||||
|
tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
|
||||||
|
tdSql.execute('''create table table_1 using stable_1
|
||||||
|
tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
|
||||||
|
'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
|
||||||
|
tdSql.execute('''create table table_2 using stable_1
|
||||||
|
tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
|
||||||
|
'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
|
||||||
|
tdSql.execute('''create table table_3 using stable_1
|
||||||
|
tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''')
|
||||||
|
tdSql.execute('''create table table_4 using stable_1
|
||||||
|
tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''')
|
||||||
|
tdSql.execute('''create table table_5 using stable_1
|
||||||
|
tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
|
||||||
|
#regular table
|
||||||
|
tdSql.execute('''create table regular_table_1
|
||||||
|
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
|
||||||
|
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
|
||||||
|
q_float float , q_double double , q_ts timestamp) ;''')
|
||||||
|
|
||||||
|
for i in range(self.num):
|
||||||
|
tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||||
|
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
|
||||||
|
i, i, random.random(), random.random(), 1262304000001 + i))
|
||||||
|
tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
|
||||||
|
i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
|
||||||
|
tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, random.randint(-2147483647, 2147483647),
|
||||||
|
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
|
||||||
|
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
|
||||||
|
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
|
||||||
|
tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||||
|
tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||||
|
|
||||||
|
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||||
|
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
|
||||||
|
i, i, random.random(), random.random(), 1262304000001 + i))
|
||||||
|
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
|
||||||
|
i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
|
||||||
|
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + 300 + i, random.randint(-2147483647, 2147483647),
|
||||||
|
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
|
||||||
|
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
|
||||||
|
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
|
||||||
|
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||||
|
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
|
||||||
|
% (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||||
|
|
||||||
|
sql = '''select * from stable_1'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(6*self.num)
|
||||||
|
sql = '''select * from regular_table_1'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(6*self.num)
|
||||||
|
|
||||||
|
tdLog.info("=======last_row(*)========")
|
||||||
|
sql = '''select last_row(*) from stable_1;'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
sql = '''select last_row(*) from regular_table_1;'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
|
||||||
|
sql = '''select * from stable_1
|
||||||
|
where loc = 'table_0';'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(self.num)
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
(select * from stable_1
|
||||||
|
where loc = 'table_0');'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
(select * from stable_1);'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
tdSql.checkData(0,2,self.num-1)
|
||||||
|
tdSql.checkData(0,3,self.num-1)
|
||||||
|
tdSql.checkData(0,4,self.num-1)
|
||||||
|
tdSql.checkData(0,5,'False')
|
||||||
|
tdSql.checkData(0,6,'binary5.9')
|
||||||
|
tdSql.checkData(0,7,'nchar5.9')
|
||||||
|
tdSql.checkData(0,8,9.00000)
|
||||||
|
tdSql.checkData(0,9,9.000000000)
|
||||||
|
tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
|
||||||
|
tdSql.checkData(0,11,'table_5')
|
||||||
|
tdSql.checkData(0,12,5)
|
||||||
|
tdSql.checkData(0,13,5)
|
||||||
|
tdSql.checkData(0,14,5)
|
||||||
|
tdSql.checkData(0,15,5)
|
||||||
|
tdSql.checkData(0,16,'True')
|
||||||
|
tdSql.checkData(0,17,'binary5')
|
||||||
|
tdSql.checkData(0,18,'nchar5')
|
||||||
|
tdSql.checkData(0,21,'1970-01-01 08:00:00.000')
|
||||||
|
|
||||||
|
sql = '''select * from regular_table_1 ;'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(6*self.num)
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
(select * from regular_table_1);'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
tdSql.checkData(0,2,self.num-1)
|
||||||
|
tdSql.checkData(0,3,self.num-1)
|
||||||
|
tdSql.checkData(0,4,self.num-1)
|
||||||
|
tdSql.checkData(0,5,'False')
|
||||||
|
tdSql.checkData(0,6,'binary5.9')
|
||||||
|
tdSql.checkData(0,7,'nchar5.9')
|
||||||
|
tdSql.checkData(0,8,9.00000)
|
||||||
|
tdSql.checkData(0,9,9.000000000)
|
||||||
|
tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
|
||||||
|
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
((select * from table_0) union all
|
||||||
|
(select * from table_1) union all
|
||||||
|
(select * from table_2));'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
tdSql.checkData(0,2,self.num-1)
|
||||||
|
tdSql.checkData(0,3,self.num-1)
|
||||||
|
tdSql.checkData(0,4,self.num-1)
|
||||||
|
tdSql.checkData(0,5,'False')
|
||||||
|
tdSql.checkData(0,6,'binary.9')
|
||||||
|
tdSql.checkData(0,7,'nchar.9')
|
||||||
|
tdSql.checkData(0,8,9.00000)
|
||||||
|
tdSql.checkData(0,9,9.000000000)
|
||||||
|
tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
|
||||||
|
|
||||||
|
# bug 5055
|
||||||
|
# sql = '''select last_row(*) from
|
||||||
|
# ((select * from stable_1) union all
|
||||||
|
# (select * from table_1) union all
|
||||||
|
# (select * from regular_table_1));'''
|
||||||
|
# tdSql.query(sql)
|
||||||
|
# tdSql.checkData(0,1,self.num-1)
|
||||||
|
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
((select last_row(*) from table_0) union all
|
||||||
|
(select last_row(*) from table_1) union all
|
||||||
|
(select last_row(*) from table_2));'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
tdSql.checkData(0,2,self.num-1)
|
||||||
|
tdSql.checkData(0,3,self.num-1)
|
||||||
|
tdSql.checkData(0,4,self.num-1)
|
||||||
|
tdSql.checkData(0,5,'False')
|
||||||
|
tdSql.checkData(0,6,'binary.9')
|
||||||
|
tdSql.checkData(0,7,'nchar.9')
|
||||||
|
tdSql.checkData(0,8,9.00000)
|
||||||
|
tdSql.checkData(0,9,9.000000000)
|
||||||
|
tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
|
||||||
|
|
||||||
|
# bug 5055
|
||||||
|
# sql = '''select last_row(*) from
|
||||||
|
# ((select last_row(*) from stable_1) union all
|
||||||
|
# (select last_row(*) from table_1) union all
|
||||||
|
# (select last_row(*) from regular_table_1));'''
|
||||||
|
# tdSql.query(sql)
|
||||||
|
# tdSql.checkData(0,1,self.num-1)
|
||||||
|
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
((select * from table_0 limit 5 offset 5) union all
|
||||||
|
(select * from table_1 limit 5 offset 5) union all
|
||||||
|
(select * from regular_table_1 limit 5 offset 5));'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0,1,self.num-1)
|
||||||
|
tdSql.checkData(0,2,self.num-1)
|
||||||
|
tdSql.checkData(0,3,self.num-1)
|
||||||
|
tdSql.checkData(0,4,self.num-1)
|
||||||
|
tdSql.checkData(0,5,'False')
|
||||||
|
tdSql.checkData(0,6,'binary.9')
|
||||||
|
tdSql.checkData(0,7,'nchar.9')
|
||||||
|
tdSql.checkData(0,8,9.00000)
|
||||||
|
tdSql.checkData(0,9,9.000000000)
|
||||||
|
tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
|
||||||
|
|
||||||
|
|
||||||
|
sql = '''select last_row(*) from
|
||||||
|
(select * from stable_1)
|
||||||
|
having q_int>5;'''
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.error(sql)
|
||||||
|
try:
|
||||||
|
tdSql.execute(sql)
|
||||||
|
tdLog.exit(" having only works with group by")
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.info(repr(e))
|
||||||
|
tdLog.info("invalid operation: having only works with group by")
|
||||||
|
|
||||||
|
#bug 5057
|
||||||
|
# sql = '''select last_row(*) from
|
||||||
|
# (select * from (select * from stable_1))'''
|
||||||
|
# tdLog.info(sql)
|
||||||
|
# tdSql.error(sql)
|
||||||
|
# try:
|
||||||
|
# tdSql.execute(sql)
|
||||||
|
# tdLog.exit(" core dumped")
|
||||||
|
# except Exception as e:
|
||||||
|
# tdLog.info(repr(e))
|
||||||
|
# tdLog.info("core dumped")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -51,7 +51,7 @@ class TDTestCase:
|
||||||
else:
|
else:
|
||||||
tdLog.info("taosdemo found in %s" % buildPath)
|
tdLog.info("taosdemo found in %s" % buildPath)
|
||||||
binPath = buildPath + "/build/bin/"
|
binPath = buildPath + "/build/bin/"
|
||||||
os.system("%staosdemo -y -t %d -n %d" %
|
os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" %
|
||||||
(binPath, self.numberOfTables, self.numberOfRecords))
|
(binPath, self.numberOfTables, self.numberOfRecords))
|
||||||
|
|
||||||
tdSql.execute("use test")
|
tdSql.execute("use test")
|
||||||
|
|
|
@ -54,7 +54,7 @@ class TDTestCase:
|
||||||
binPath = buildPath + "/build/bin/"
|
binPath = buildPath + "/build/bin/"
|
||||||
|
|
||||||
if(threadID == 0):
|
if(threadID == 0):
|
||||||
os.system("%staosdemo -y -t %d -n %d" %
|
os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT -m t" %
|
||||||
(binPath, self.numberOfTables, self.numberOfRecords))
|
(binPath, self.numberOfTables, self.numberOfRecords))
|
||||||
if(threadID == 1):
|
if(threadID == 1):
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
|
@ -60,7 +60,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute("use test")
|
tdSql.execute("use test")
|
||||||
tdSql.query(
|
tdSql.query(
|
||||||
"select count(*) from test.t%d" % (self.numberOfTables -1))
|
"select count(*) from test.d%d" % (self.numberOfTables -1))
|
||||||
tdSql.checkData(0, 0, self.numberOfRecords)
|
tdSql.checkData(0, 0, self.numberOfRecords)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
|
@ -28,6 +28,7 @@ class TDTestCase:
|
||||||
tdSql.init(conn.cursor(), logSql)
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
def getBuildPath(self):
|
def getBuildPath(self):
|
||||||
|
global selfPath
|
||||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
if ("community" in selfPath):
|
if ("community" in selfPath):
|
||||||
|
@ -53,7 +54,7 @@ class TDTestCase:
|
||||||
tdLog.info("taosd found in %s" % buildPath)
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
|
||||||
binPath = buildPath+ "/build/bin/"
|
binPath = buildPath+ "/build/bin/"
|
||||||
testPath = buildPath[:buildPath.find("debug")]
|
testPath = selfPath+ "/../../../"
|
||||||
walFilePath = testPath + "/sim/dnode1/data/mnode_bak/wal/"
|
walFilePath = testPath + "/sim/dnode1/data/mnode_bak/wal/"
|
||||||
|
|
||||||
#new db and insert data
|
#new db and insert data
|
||||||
|
|
|
@ -86,6 +86,18 @@ class TwoClients:
|
||||||
tdSql.execute("alter table stb2_0 add column col2 binary(4)")
|
tdSql.execute("alter table stb2_0 add column col2 binary(4)")
|
||||||
tdSql.execute("alter table stb2_0 drop column col1")
|
tdSql.execute("alter table stb2_0 drop column col1")
|
||||||
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
|
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
|
||||||
|
tdSql.execute("drop dnode 10")
|
||||||
|
sleep(10)
|
||||||
|
os.system("rm -rf /var/lib/taos/*")
|
||||||
|
print("clear dnode chenhaoran02'data files")
|
||||||
|
os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &")
|
||||||
|
print("start taosd")
|
||||||
|
sleep(10)
|
||||||
|
tdSql.execute("reset query cache ;")
|
||||||
|
tdSql.execute("create dnode chenhaoran02 ;")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# stop taosd and compact wal file
|
# stop taosd and compact wal file
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 2000
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
print =============== step1
|
||||||
|
$db = testlp
|
||||||
|
$mte = ste
|
||||||
|
$mt = st
|
||||||
|
sql drop database $db -x step1
|
||||||
|
step1:
|
||||||
|
sql create database $db precision 'us'
|
||||||
|
sql use $db
|
||||||
|
sql create stable $mte (ts timestamp, f int) TAGS(t1 bigint)
|
||||||
|
|
||||||
|
line_insert st,t1=3i,t2=4,t3="t3" c1=3i,c3=L"passit",c2=false,c4=4 1626006833639000000
|
||||||
|
line_insert st,t1=4i,t3="t41",t2=5 c1=3i,c3=L"passiT",c2=true,c4=5 1626006833640000000
|
||||||
|
line_insert stf,t1=4i,t2=5,t3="t4" c1=3i,c3=L"passitagain",c2=true,c4=5 1626006833642000000
|
||||||
|
line_insert ste,t2=5,t3=L"ste" c1=true,c2=4,c3="iam" 1626056811823316532
|
||||||
|
|
||||||
|
sql select * from st
|
||||||
|
if $rows != 2 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != @21-07-11 20:33:53.639000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data03 != @passit@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from stf
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from ste
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
#print =============== clear
|
||||||
|
sql drop database $db
|
||||||
|
sql show databases
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -190,32 +190,32 @@ if $rows != 12800 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql select top(c1, 100), tbname, t1, t2 from select_tags_mt0;
|
sql select top(c1, 80), tbname, t1, t2 from select_tags_mt0;
|
||||||
if $rows != 100 then
|
if $rows != 80 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data00 != @70-01-01 08:03:30.100@ then
|
if $data00 != @70-01-01 08:03:40.100@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data10 != @70-01-01 08:03:30.200@ then
|
if $data10 != @70-01-01 08:03:40.200@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data01 != 110 then
|
if $data01 != 111 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data02 != @select_tags_tb11@ then
|
if $data02 != @select_tags_tb12@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data03 != 11 then
|
if $data03 != 12 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data04 != @abc11@ then
|
if $data04 != @abc12@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
@ -248,8 +248,8 @@ if $data04 != @abc12@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql select bottom(c1, 100), tbname, t1, t2 from select_tags_mt0;
|
sql select bottom(c1, 72), tbname, t1, t2 from select_tags_mt0;
|
||||||
if $rows != 100 then
|
if $rows != 72 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -87,6 +87,8 @@ enum {
|
||||||
SIM_CMD_RESTFUL,
|
SIM_CMD_RESTFUL,
|
||||||
SIM_CMD_TEST,
|
SIM_CMD_TEST,
|
||||||
SIM_CMD_RETURN,
|
SIM_CMD_RETURN,
|
||||||
|
SIM_CMD_LINE_INSERT,
|
||||||
|
SIM_CMD_LINE_INSERT_ERROR,
|
||||||
SIM_CMD_END
|
SIM_CMD_END
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -172,6 +174,8 @@ bool simExecuteSqlCmd(SScript *script, char *option);
|
||||||
bool simExecuteSqlErrorCmd(SScript *script, char *rest);
|
bool simExecuteSqlErrorCmd(SScript *script, char *rest);
|
||||||
bool simExecuteSqlSlowCmd(SScript *script, char *option);
|
bool simExecuteSqlSlowCmd(SScript *script, char *option);
|
||||||
bool simExecuteRestfulCmd(SScript *script, char *rest);
|
bool simExecuteRestfulCmd(SScript *script, char *rest);
|
||||||
|
bool simExecuteLineInsertCmd(SScript *script, char *option);
|
||||||
|
bool simExecuteLineInsertErrorCmd(SScript *script, char *option);
|
||||||
void simVisuallizeOption(SScript *script, char *src, char *dst);
|
void simVisuallizeOption(SScript *script, char *src, char *dst);
|
||||||
|
|
||||||
#endif
|
#endif
|
|
@ -1067,3 +1067,49 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool simExecuteLineInsertCmd(SScript *script, char *rest) {
|
||||||
|
char buf[TSDB_MAX_BINARY_LEN];
|
||||||
|
|
||||||
|
simVisuallizeOption(script, rest, buf);
|
||||||
|
rest = buf;
|
||||||
|
|
||||||
|
SCmdLine *line = &script->lines[script->linePos];
|
||||||
|
|
||||||
|
simInfo("script:%s, %s", script->fileName, rest);
|
||||||
|
simLogSql(buf, true);
|
||||||
|
char * lines[] = {rest};
|
||||||
|
int32_t ret = taos_insert_lines(script->taos, lines, 1);
|
||||||
|
if (ret == TSDB_CODE_SUCCESS) {
|
||||||
|
simDebug("script:%s, taos:%p, %s executed. success.", script->fileName, script->taos, rest);
|
||||||
|
script->linePos++;
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
sprintf(script->error, "lineNum: %d. line: %s failed, ret:%d:%s", line->lineNum, rest,
|
||||||
|
ret & 0XFFFF, tstrerror(ret));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool simExecuteLineInsertErrorCmd(SScript *script, char *rest) {
|
||||||
|
char buf[TSDB_MAX_BINARY_LEN];
|
||||||
|
|
||||||
|
simVisuallizeOption(script, rest, buf);
|
||||||
|
rest = buf;
|
||||||
|
|
||||||
|
SCmdLine *line = &script->lines[script->linePos];
|
||||||
|
|
||||||
|
simInfo("script:%s, %s", script->fileName, rest);
|
||||||
|
simLogSql(buf, true);
|
||||||
|
char * lines[] = {rest};
|
||||||
|
int32_t ret = taos_insert_lines(script->taos, lines, 1);
|
||||||
|
if (ret == TSDB_CODE_SUCCESS) {
|
||||||
|
sprintf(script->error, "script:%s, taos:%p, %s executed. expect failed, but success.", script->fileName, script->taos, rest);
|
||||||
|
script->linePos++;
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
simDebug("lineNum: %d. line: %s failed, ret:%d:%s. Expect failed, so success", line->lineNum, rest,
|
||||||
|
ret & 0XFFFF, tstrerror(ret));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -838,6 +838,38 @@ bool simParseRunBackCmd(char *rest, SCommand *pCmd, int32_t lineNum) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool simParseLineInsertCmd(char* rest, SCommand* pCmd, int32_t lineNum) {
|
||||||
|
int32_t expLen;
|
||||||
|
|
||||||
|
rest++;
|
||||||
|
cmdLine[numOfLines].cmdno = SIM_CMD_LINE_INSERT;
|
||||||
|
cmdLine[numOfLines].lineNum = lineNum;
|
||||||
|
cmdLine[numOfLines].optionOffset = optionOffset;
|
||||||
|
expLen = (int32_t)strlen(rest);
|
||||||
|
memcpy(optionBuffer + optionOffset, rest, expLen);
|
||||||
|
optionOffset += expLen + 1;
|
||||||
|
*(optionBuffer + optionOffset - 1) = 0;
|
||||||
|
|
||||||
|
numOfLines++;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool simParseLineInsertErrorCmd(char* rest, SCommand* pCmd, int32_t lineNum) {
|
||||||
|
int32_t expLen;
|
||||||
|
|
||||||
|
rest++;
|
||||||
|
cmdLine[numOfLines].cmdno = SIM_CMD_LINE_INSERT;
|
||||||
|
cmdLine[numOfLines].lineNum = lineNum;
|
||||||
|
cmdLine[numOfLines].optionOffset = optionOffset;
|
||||||
|
expLen = (int32_t)strlen(rest);
|
||||||
|
memcpy(optionBuffer + optionOffset, rest, expLen);
|
||||||
|
optionOffset += expLen + 1;
|
||||||
|
*(optionBuffer + optionOffset - 1) = 0;
|
||||||
|
|
||||||
|
numOfLines++;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void simInitsimCmdList() {
|
void simInitsimCmdList() {
|
||||||
int32_t cmdno;
|
int32_t cmdno;
|
||||||
memset(simCmdList, 0, SIM_CMD_END * sizeof(SCommand));
|
memset(simCmdList, 0, SIM_CMD_END * sizeof(SCommand));
|
||||||
|
@ -1049,4 +1081,20 @@ void simInitsimCmdList() {
|
||||||
simCmdList[cmdno].parseCmd = simParseReturnCmd;
|
simCmdList[cmdno].parseCmd = simParseReturnCmd;
|
||||||
simCmdList[cmdno].executeCmd = simExecuteReturnCmd;
|
simCmdList[cmdno].executeCmd = simExecuteReturnCmd;
|
||||||
simAddCmdIntoHash(&(simCmdList[cmdno]));
|
simAddCmdIntoHash(&(simCmdList[cmdno]));
|
||||||
|
|
||||||
|
cmdno = SIM_CMD_LINE_INSERT;
|
||||||
|
simCmdList[cmdno].cmdno = cmdno;
|
||||||
|
strcpy(simCmdList[cmdno].name, "line_insert");
|
||||||
|
simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name);
|
||||||
|
simCmdList[cmdno].parseCmd = simParseLineInsertCmd;
|
||||||
|
simCmdList[cmdno].executeCmd = simExecuteLineInsertCmd;
|
||||||
|
simAddCmdIntoHash(&(simCmdList[cmdno]));
|
||||||
|
|
||||||
|
cmdno = SIM_CMD_LINE_INSERT_ERROR;
|
||||||
|
simCmdList[cmdno].cmdno = cmdno;
|
||||||
|
strcpy(simCmdList[cmdno].name, "line_insert_error");
|
||||||
|
simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name);
|
||||||
|
simCmdList[cmdno].parseCmd = simParseLineInsertErrorCmd;
|
||||||
|
simCmdList[cmdno].executeCmd = simExecuteLineInsertErrorCmd;
|
||||||
|
simAddCmdIntoHash(&(simCmdList[cmdno]));
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue