diff --git a/Jenkinsfile b/Jenkinsfile
index d007adf736..976812bd0a 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -174,14 +174,13 @@ pipeline {
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
- catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
- sh '''
- cd ${WKC}/tests/pytest
- rm -rf /var/lib/taos/*
- rm -rf /var/log/taos/*
- ./handle_crash_gen_val_log.sh
- '''
- }
+
+ sh '''
+ cd ${WKC}/tests/pytest
+ rm -rf /var/lib/taos/*
+ rm -rf /var/log/taos/*
+ ./handle_crash_gen_val_log.sh
+ '''
timeout(time: 45, unit: 'MINUTES'){
sh '''
date
@@ -226,6 +225,8 @@ pipeline {
./test-all.sh b4fq
cd ${WKC}/tests
./test-all.sh p4
+ cd ${WKC}/tests
+ ./test-all.sh full jdbc
date'''
}
}
diff --git a/deps/libcurl/lib/win64/libcurl_a.lib b/deps/libcurl/lib/win64/libcurl_a.lib
new file mode 100644
index 0000000000..69e9fe0a57
Binary files /dev/null and b/deps/libcurl/lib/win64/libcurl_a.lib differ
diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md
index 87666a7212..8e1b1e3ab1 100644
--- a/documentation20/cn/03.architecture/02.replica/docs.md
+++ b/documentation20/cn/03.architecture/02.replica/docs.md
@@ -16,7 +16,7 @@ TDengine面向的是物联网场景,需要支持数据的实时复制,来最
## 基本概念和定义
-TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgoup也指mnode group, 除非特别注明。
+TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgroup也指mnode group, 除非特别注明。
**版本(version)**:
diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md
index 89ecf5fce4..6f9f1699d4 100644
--- a/documentation20/cn/03.architecture/docs.md
+++ b/documentation20/cn/03.architecture/docs.md
@@ -218,9 +218,9 @@ TDengine 分布式架构的逻辑结构图如下:
TDengine存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分:
-- 时序数据:存放于vnode里,由data、head和last三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除和更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。
+- 时序数据:存放于vnode里,由data、head和last三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除操作,并且仅在update参数设置为1时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。
- 标签数据:存放于vnode里的meta文件,支持增删改查四个标准操作。数据量不大,有N张表,就有N条记录,因此可以全内存存储。如果标签过滤操作很多,查询将十分频繁,因此TDengine支持多核多线程并发查询。只要计算资源足够,即使有数千万张表,过滤结果能毫秒级返回。
-- 其他元数据:存放于mnode里,包含系统节点、用户、DB、Table Schema等等,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。
+- 元数据:存放于mnode里,包含系统节点、用户、DB、Table Schema等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。
与典型的NoSQL存储模型相比,TDengine将标签数据与时序数据完全分离存储,它具有两大优势:
@@ -315,7 +315,7 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
3. 在线的虚拟节点数过半,而且有虚拟节点是slave的话,该虚拟节点自动成为master
4. 对于2和3,如果多个虚拟节点满足成为master的要求,那么虚拟节点组的节点列表里,最前面的选为master
-更多的关于数据复制的流程,请见[TDengine 2.0数据复制模块设计](https://www.taosdata.com/cn/documentation/replica/)。
+更多的关于数据复制的流程,请见[TDengine 2.0数据复制模块设计](https://www.taosdata.com/cn/documentation/architecture/replica/)。
### 同步复制
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index e473588782..f3c2363d6c 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -723,9 +723,9 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
### 重要配置项
-下面仅列出一些与RESTFul接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
+下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
-- httpPort: 对外提供RESTFul服务的端口号,默认绑定到6041
+- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
- httpMaxThreads: 启动的线程数量,默认为2
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md
index bb28bd63b9..6a74ee382f 100644
--- a/documentation20/cn/09.connections/docs.md
+++ b/documentation20/cn/09.connections/docs.md
@@ -152,3 +152,10 @@ TDengine客户端暂不支持如下函数:
- dbListTables(conn):显示连接中的所有表
+## DataX
+
+[DataX](https://github.com/alibaba/DataX) 是阿里巴巴集团开源的一款通用离线数据采集/同步工具,能够简单、高效地接入 TDengine 进行数据写入和读取。
+
+* 数据读取集成的方法请参见 [TSDBReader 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbreader/doc/tsdbreader.md)
+* 数据写入集成的方法请参见 [TSDBWriter 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbwriter/doc/tsdbhttpwriter.md)
+
diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md
index 8d341ebecf..15ac449c1a 100644
--- a/documentation20/cn/10.cluster/docs.md
+++ b/documentation20/cn/10.cluster/docs.md
@@ -59,7 +59,7 @@ arbitrator ha.taosdata.com:6042
| 8 | charset | 字符集编码 |
| 9 | balance | 是否启动负载均衡 |
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
-| 11 | maxVgroupsPerDb | 每个DB中 能够使用的最大vnode个数 |
+| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 2543cd58ef..86ad8e5bb9 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -6,7 +6,7 @@
### 内存需求
-每个DB可以创建固定数目的vnode,默认与CPU核数相同,可通过maxVgroupsPerDb配置;每个vnode会占用固定大小的内存(大小与数据库的配置参数blocks和cache有关);每个Table会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个DB需要的系统内存可通过如下公式计算:
+每个DB可以创建固定数目的vgroup,默认与CPU核数相同,可通过maxVgroupsPerDb配置;vgroup中的每个副本会是一个vnode;每个vnode会占用固定大小的内存(大小与数据库的配置参数blocks和cache有关);每个Table会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个DB需要的系统内存可通过如下公式计算:
```
Memory Size = maxVgroupsPerDb * (blocks * cache + 10Mb) + numOfTables * (tagSizePerTable + 0.5Kb)
@@ -111,15 +111,16 @@ taosd -C
- days:一个数据文件存储数据的时间跨度,单位为天,默认值:10。
- keep:数据库中数据保留的天数,单位为天,默认值:3650。
-- minRows: 文件块中记录的最小条数,单位为条,默认值:100。
-- maxRows: 文件块中记录的最大条数,单位为条,默认值:4096。
-- comp: 文件压缩标志位,0:关闭,1:一阶段压缩,2:两阶段压缩。默认值:2。
-- walLevel:WAL级别。1:写wal, 但不执行fsync; 2:写wal, 而且执行fsync。默认值:1。
+- minRows:文件块中记录的最小条数,单位为条,默认值:100。
+- maxRows:文件块中记录的最大条数,单位为条,默认值:4096。
+- comp:文件压缩标志位,0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。
+- walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
-- cache: 内存块的大小,单位为兆字节(MB),默认值:16。
-- blocks: 每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。
+- cache:内存块的大小,单位为兆字节(MB),默认值:16。
+- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。
- replica:副本个数,取值范围:1-3。单位为个,默认值:1
- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms
+- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(从 2.0.11 版本开始支持此参数)
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
@@ -137,7 +138,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
-- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
+- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
- arbitrator: 系统中裁决器的end point,缺省为空。
- timezone、locale、charset 的配置见客户端配置。
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 3aee7da7db..33321348bb 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -42,17 +42,19 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
-| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
-| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
-| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] |
-| 5 | DOUBLE | 8 | 双精度浮点型,有效位数15-16,范围 [-1.7E308, 1.7E308] |
-| 6 | BINARY | 自定义 | 用于记录字符串,理论上,最长可以有16374字节,但由于每行数据最多16K字节,实际上限一般小于理论值。 binary仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如binary(20)定义了最长为20个字符的字符串,每个字符占1byte的存储空间。如果用户字符串超出20字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示, 即 **\’**。 |
-| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL |
-| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL |
-| 9 | BOOL | 1 | 布尔型,{true, false} |
-| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串,如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 **\’**。nchar使用时须指定字符串大小,类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符,会固定占用40bytes的空间。如用户字符串长度超出声明长度,则将会报错。 |
+| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
+| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
+| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
+| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
+| 6 | BINARY | 自定义 | 用于记录 ASCII 型字符串。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。 binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
+| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
+| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
+| 9 | BOOL | 1 | 布尔型,{true, false} |
+| 10 | NCHAR | 自定义 | 用于记录非 ASCII 型字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
-**Tips**: TDengine对SQL语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
+**Tips**:
+1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
+2. 应避免使用 BINARY 类型来保存非 ASCII 型的字符串,会很容易导致数据乱码等错误。正确的做法是使用 NCHAR 类型来保存中文字符。
## 数据库管理
@@ -96,27 +98,32 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
```mysql
ALTER DATABASE db_name COMP 2;
```
- COMP参数是指修改数据库文件压缩标志位,取值范围为[0, 2]. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。
+ COMP 参数是指修改数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。0 表示不压缩,1 表示一阶段压缩,2 表示两阶段压缩。
```mysql
ALTER DATABASE db_name REPLICA 2;
```
- REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。
+ REPLICA 参数是指修改数据库副本数,取值范围 [1, 3]。在集群中使用,副本数必须小于或等于 DNODE 的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
```
- KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。
+ KEEP 参数是指修改数据文件保存的天数,缺省值为 3650,取值范围 [days, 365000],必须大于或等于 days 参数值。
```mysql
ALTER DATABASE db_name QUORUM 2;
```
- QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
+ QUORUM 参数是指数据写入成功所需要的确认数,取值范围 [1, 3]。对于异步复制,quorum 设为 1,具有 master 角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于 2。原则上,Quorum >= 1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
```mysql
ALTER DATABASE db_name BLOCKS 100;
```
- BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。
+ BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache * blocks)。取值范围 [3, 1000]。
+
+ ```mysql
+ ALTER DATABASE db_name CACHELAST 0;
+ ```
+ CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0,取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持)
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
@@ -344,9 +351,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
- 注意:
- 1) 如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳;
- 2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
+ 注意:允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
- **插入记录时自动建表**
```mysql
diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md
index 96ef8a4ec0..4e72cbb21a 100644
--- a/documentation20/cn/13.faq/docs.md
+++ b/documentation20/cn/13.faq/docs.md
@@ -109,11 +109,8 @@ Properties properties = new Properties();
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
Connection = DriverManager.getConnection(url, properties);
```
-## 12.TDengine GO windows驱动的如何编译?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows驱动的编译/)
-
-## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
+## 12.JDBC报错: the excuted SQL is not a DML or a DDL?
请更新至最新的JDBC驱动
```JAVA
@@ -124,15 +121,15 @@ Connection = DriverManager.getConnection(url, properties);
```
-## 14. taos connect failed, reason: invalid timestamp
+## 13. taos connect failed, reason: invalid timestamp
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
-## 15. 表名显示不全
+## 14. 表名显示不全
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
-## 16. 如何进行数据迁移?
+## 15. 如何进行数据迁移?
TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事:
@@ -140,7 +137,7 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
-## 17. 如何在命令行程序 taos 中临时调整日志级别
+## 16. 如何在命令行程序 taos 中临时调整日志级别
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 3d57ece2ad..d24502a1cb 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -26,7 +26,6 @@ else
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 850c636940..36870b2ebe 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -51,7 +51,6 @@ cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
-cp ${compile_dir}/build/bin/taosdemox ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index a080876c60..92c917cb3d 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -62,7 +62,6 @@ cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepat
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
-cp %{_compiledir}/build/bin/taosdemox %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
@@ -140,7 +139,6 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 0fed7b531f..dca3dd2ff6 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -179,7 +179,6 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
@@ -191,7 +190,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
- [ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index d52428dc83..0a0a6633e3 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -86,7 +86,6 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
@@ -98,7 +97,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
- [ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh
index 04fd23d5ab..8d7463366f 100755
--- a/packaging/tools/install_client_power.sh
+++ b/packaging/tools/install_client_power.sh
@@ -86,7 +86,6 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/powerdemo || :
- ${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmpower || :
@@ -98,7 +97,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
- [ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || :
[ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || :
fi
[ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
index 58e19b1399..ba6ace4009 100755
--- a/packaging/tools/install_power.sh
+++ b/packaging/tools/install_power.sh
@@ -174,7 +174,6 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
- ${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@@ -185,7 +184,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
[ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || :
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
- [ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || :
[ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 52a4e05906..30e9fa51a7 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
- bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox\
+ bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \
${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
@@ -55,7 +55,11 @@ else
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
-cfg_dir="${top_dir}/packaging/cfg"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
install_files="${script_dir}/install_client.sh"
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
index 15f8994e94..181536b7f1 100755
--- a/packaging/tools/makeclient_power.sh
+++ b/packaging/tools/makeclient_power.sh
@@ -54,7 +54,11 @@ else
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
-cfg_dir="${top_dir}/packaging/cfg"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
install_files="${script_dir}/install_client_power.sh"
@@ -77,7 +81,6 @@ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
- cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 267338ed06..36b1fe5bd8 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -36,13 +36,18 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
- bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${build_dir}/bin/tarbitrator\
+ bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
-cfg_dir="${top_dir}/packaging/cfg"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+
install_files="${script_dir}/install.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
index 7227a08b7a..554e7884b1 100755
--- a/packaging/tools/makepkg_power.sh
+++ b/packaging/tools/makepkg_power.sh
@@ -42,7 +42,11 @@ fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
-cfg_dir="${top_dir}/packaging/cfg"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
install_files="${script_dir}/install_power.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
@@ -78,7 +82,6 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
- cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index c6ef73932d..8665b3fec3 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -96,7 +96,6 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@@ -107,7 +106,6 @@ function install_bin() {
[ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
[ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || :
- [ -x ${bin_dir}/taosdemox ] && ${csudo} ln -s ${bin_dir}/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
[ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
}
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index 8d96ef851c..2f2660d446 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -72,7 +72,6 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh
index e84cdd2620..7579162dc6 100755
--- a/packaging/tools/remove_client.sh
+++ b/packaging/tools/remove_client.sh
@@ -38,7 +38,6 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh
index 1842e86a5b..580c46e207 100755
--- a/packaging/tools/remove_client_power.sh
+++ b/packaging/tools/remove_client_power.sh
@@ -38,7 +38,6 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
- ${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh
index 59073105de..816869cf44 100755
--- a/packaging/tools/remove_power.sh
+++ b/packaging/tools/remove_power.sh
@@ -72,7 +72,6 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
- ${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 73fb6fb64c..8c72eefc15 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -6404,10 +6404,12 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
nameSize = taosArrayGetSize(pNameList);
if (valSize != nameSize) {
+ tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (schemaSize < valSize) {
+ tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
@@ -6460,11 +6462,13 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
}
if (!findColumnIndex) {
+ tdDestroyKVRowBuilder(&kvRowBuilder);
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z);
}
}
} else {
if (schemaSize != valSize) {
+ tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 6328c67940..cdf9aaea25 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -330,7 +330,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
pSql->cmd.submitSchema = 1;
}
- if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
+ if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
(rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 41cfb81442..380c438255 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -1848,7 +1848,7 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S
TSKEY key = INT64_MIN;
for(int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
+ if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) {
continue;
}
@@ -1981,7 +1981,8 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
// set the parameters for the second round query process
SSqlCmd *pPCmd = &pParent->cmd;
SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0);
-
+ int32_t resRows = pSup->numOfRows;
+
if (pSup->numOfRows > 0) {
SBufferWriter bw = tbufInitWriter(NULL, false);
interResToBinary(&bw, pSup->pResult, pSup->tagLen);
@@ -1999,6 +2000,12 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
taos_free_result(pSql);
+ if (resRows == 0) {
+ pParent->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
+ (*pParent->fp)(pParent->param, pParent, 0);
+ return;
+ }
+
pQueryInfo1->round = 1;
tscDoQuery(pParent);
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 95cf28ec49..727ca9ad7f 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -2057,6 +2057,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pnCmd->parseFinished = 1;
pnCmd->pTableNameList = NULL;
pnCmd->pTableBlockHashList = NULL;
+ pnCmd->tagData.data = NULL;
+ pnCmd->tagData.dataLen = 0;
if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 5bb4a285f4..349ccb35ac 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -1555,6 +1555,8 @@ int32_t taosCheckGlobalCfg() {
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;
}
+ uInfo(" check global cfg completed");
+ uInfo("==================================");
taosPrintGlobalCfg();
return 0;
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index c2a086347a..25ed3d22f2 100755
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -107,6 +107,7 @@
**/TaosInfoMonitorTest.java
**/FailOverTest.java
**/InvalidResultSetPointerTest.java
+ **/RestfulConnectionTest.java
true
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
index d1dc4e26b6..4e005d1291 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
@@ -12,7 +12,8 @@ import java.util.Properties;
public class RestfulConnectionTest {
private static final String host = "127.0.0.1";
- // private static final String host = "master";
+ // private static final String host = "master";
+
private static Connection conn;
@Test
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index 07be521db4..8bb9cde935 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -229,6 +229,10 @@
+
+
+
+
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_ILLEGAL 302
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index bf52784300..66e8cf7398 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -3,5 +3,4 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
-ADD_SUBDIRECTORY(taosdemox)
ADD_SUBDIRECTORY(taosdump)
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index 31db1e7971..1f3eb7927c 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -341,7 +341,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
} else {
int num_rows_affacted = taos_affected_rows(pSql);
et = taosGetTimestampUs();
- printf("Query OK, %d row(s) affected (%.6fs)\n", num_rows_affacted, (et - st) / 1E6);
+ printf("Query OK, %d of %d row(s) in database (%.6fs)\n", num_rows_affacted, num_rows_affacted, (et - st) / 1E6);
}
printf("\n");
diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt
index 390e10cc26..7e85ec6dac 100644
--- a/src/kit/taosdemo/CMakeLists.txt
+++ b/src/kit/taosdemo/CMakeLists.txt
@@ -2,28 +2,54 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
-INCLUDE_DIRECTORIES(inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include)
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
+ #find_program(HAVE_CURL NAMES curl)
+ IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32))
+ ADD_DEFINITIONS(-DTD_LOWA_CURL)
+ LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib)
+ ADD_LIBRARY(curl STATIC IMPORTED)
+ SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a)
+ TARGET_LINK_LIBRARIES(taosdemo curl)
+ ENDIF ()
+
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdemo taos_static)
+ TARGET_LINK_LIBRARIES(taosdemo taos_static cJson)
ELSE ()
- TARGET_LINK_LIBRARIES(taosdemo taos)
+ TARGET_LINK_LIBRARIES(taosdemo taos cJson)
ENDIF ()
ELSEIF (TD_WINDOWS)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
- TARGET_LINK_LIBRARIES(taosdemo taos_static)
-ELSEIF (TD_DARWIN)
- AUX_SOURCE_DIRECTORY(. SRC)
- ADD_EXECUTABLE(taosdemo ${SRC})
-
+ SET_SOURCE_FILES_PROPERTIES(./taosdemo.c PROPERTIES COMPILE_FLAGS -w)
+ find_library(LIBCURL_A libcurl_a HINTS ${TD_COMMUNITY_DIR}/deps/libcurl/lib/win64)
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdemo taos_static)
+ TARGET_LINK_LIBRARIES(taosdemo taos_static cJson ${LIBCURL_A})
ELSE ()
- TARGET_LINK_LIBRARIES(taosdemo taos)
+ TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LIBCURL_A})
ENDIF ()
+ELSEIF (TD_DARWIN)
+ # missing a few dependencies, such as
+ # AUX_SOURCE_DIRECTORY(. SRC)
+ # ADD_EXECUTABLE(taosdemo ${SRC})
+ #
+ # #find_program(HAVE_CURL NAMES curl)
+ # IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32))
+ # ADD_DEFINITIONS(-DTD_LOWA_CURL)
+ # LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib)
+ # ADD_LIBRARY(curl STATIC IMPORTED)
+ # SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a)
+ # TARGET_LINK_LIBRARIES(taosdemo curl)
+ # ENDIF ()
+ #
+ # IF (TD_SOMODE_STATIC)
+ # TARGET_LINK_LIBRARIES(taosdemo taos_static cJson)
+ # ELSE ()
+ # TARGET_LINK_LIBRARIES(taosdemo taos cJson)
+ # ENDIF ()
ENDIF ()
+
diff --git a/src/kit/taosdemox/insert.json b/src/kit/taosdemo/insert.json
similarity index 100%
rename from src/kit/taosdemox/insert.json
rename to src/kit/taosdemo/insert.json
diff --git a/src/kit/taosdemox/query.json b/src/kit/taosdemo/query.json
similarity index 100%
rename from src/kit/taosdemox/query.json
rename to src/kit/taosdemo/query.json
diff --git a/src/kit/taosdemox/subscribe.json b/src/kit/taosdemo/subscribe.json
similarity index 100%
rename from src/kit/taosdemox/subscribe.json
rename to src/kit/taosdemo/subscribe.json
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 8544c8a5ea..f5f2a02fb3 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -13,7 +13,17 @@
* along with this program. If not, see .
*/
+
+/*
+ when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread.
+*/
+
#define _GNU_SOURCE
+#define CURL_STATICLIB
+
+#ifdef TD_LOWA_CURL
+#include "curl/curl.h"
+#endif
#ifdef LINUX
#include "os.h"
@@ -39,24 +49,178 @@
#include
#include
#include "os.h"
+
+ #pragma comment ( lib, "libcurl_a.lib" )
+ #pragma comment ( lib, "ws2_32.lib" )
+ #pragma comment ( lib, "winmm.lib" )
+ #pragma comment ( lib, "wldap32.lib" )
#endif
+#include "cJSON.h"
+
#include "taos.h"
#include "tutil.h"
+#ifdef WINDOWS
+#include
+// Some old MinGW/CYGWIN distributions don't define this:
+#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
+#endif
+static HANDLE g_stdoutHandle;
+static DWORD g_consoleMode;
+
+void setupForAnsiEscape(void) {
+ DWORD mode = 0;
+ g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ if(g_stdoutHandle == INVALID_HANDLE_VALUE) {
+ exit(GetLastError());
+ }
+
+ if(!GetConsoleMode(g_stdoutHandle, &mode)) {
+ exit(GetLastError());
+ }
+
+ g_consoleMode = mode;
+
+ // Enable ANSI escape codes
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+
+ if(!SetConsoleMode(g_stdoutHandle, mode)) {
+ exit(GetLastError());
+ }
+}
+
+void resetAfterAnsiEscape(void) {
+ // Reset colors
+ printf("\x1b[0m");
+
+ // Reset console mode
+ if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) {
+ exit(GetLastError());
+ }
+}
+#else
+void setupForAnsiEscape(void) {}
+
+void resetAfterAnsiEscape(void) {
+ // Reset colors
+ printf("\x1b[0m");
+}
+#endif
+
extern char configDir[];
-#define BUFFER_SIZE 65536
-#define MAX_DB_NAME_SIZE 64
-#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE 16000
-#define MAX_NUM_DATATYPE 10
-#define OPT_ABORT 1 /* –abort */
-#define STRING_LEN 60000
-#define MAX_PREPARED_RAND 1000000
+#define INSERT_JSON_NAME "insert.json"
+#define QUERY_JSON_NAME "query.json"
+#define SUBSCRIBE_JSON_NAME "subscribe.json"
+
+#define INSERT_MODE 0
+#define QUERY_MODE 1
+#define SUBSCRIBE_MODE 2
+
+#define MAX_SQL_SIZE 65536
+#define BUFFER_SIZE (65536*2)
+#define MAX_DB_NAME_SIZE 64
+#define MAX_TB_NAME_SIZE 64
+#define MAX_DATA_SIZE 16000
+#define MAX_NUM_DATATYPE 10
+#define OPT_ABORT 1 /* –abort */
+#define STRING_LEN 60000
+#define MAX_PREPARED_RAND 1000000
+//#define MAX_SQL_SIZE 65536
+#define MAX_FILE_NAME_LEN 256
+
+#define MAX_SAMPLES_ONCE_FROM_FILE 10000
+#define MAX_NUM_DATATYPE 10
+
+#define MAX_DB_COUNT 8
+#define MAX_SUPER_TABLE_COUNT 8
+#define MAX_COLUMN_COUNT 1024
+#define MAX_TAG_COUNT 128
+
+#define MAX_QUERY_SQL_COUNT 10
+#define MAX_QUERY_SQL_LENGTH 256
+
+#define MAX_DATABASE_COUNT 256
+
+typedef enum CREATE_SUB_TALBE_MOD_EN {
+ PRE_CREATE_SUBTBL,
+ AUTO_CREATE_SUBTBL,
+ NO_CREATE_SUBTBL
+} CREATE_SUB_TALBE_MOD_EN;
+
+typedef enum TALBE_EXISTS_EN {
+ TBL_ALREADY_EXISTS,
+ TBL_NO_EXISTS,
+ TBL_EXISTS_BUTT
+} TALBE_EXISTS_EN;
+
+enum MODE {
+ SYNC,
+ ASYNC,
+ MODE_BUT
+};
+
+enum QUERY_TYPE {
+ NO_INSERT_TYPE,
+ INSERT_TYPE,
+ QUERY_TYPE_BUT
+} ;
+
+enum _show_db_index {
+ TSDB_SHOW_DB_NAME_INDEX,
+ TSDB_SHOW_DB_CREATED_TIME_INDEX,
+ TSDB_SHOW_DB_NTABLES_INDEX,
+ TSDB_SHOW_DB_VGROUPS_INDEX,
+ TSDB_SHOW_DB_REPLICA_INDEX,
+ TSDB_SHOW_DB_QUORUM_INDEX,
+ TSDB_SHOW_DB_DAYS_INDEX,
+ TSDB_SHOW_DB_KEEP_INDEX,
+ TSDB_SHOW_DB_CACHE_INDEX,
+ TSDB_SHOW_DB_BLOCKS_INDEX,
+ TSDB_SHOW_DB_MINROWS_INDEX,
+ TSDB_SHOW_DB_MAXROWS_INDEX,
+ TSDB_SHOW_DB_WALLEVEL_INDEX,
+ TSDB_SHOW_DB_FSYNC_INDEX,
+ TSDB_SHOW_DB_COMP_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_UPDATE_INDEX,
+ TSDB_SHOW_DB_STATUS_INDEX,
+ TSDB_MAX_SHOW_DB
+};
+
+// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
+enum _show_stables_index {
+ TSDB_SHOW_STABLES_NAME_INDEX,
+ TSDB_SHOW_STABLES_CREATED_TIME_INDEX,
+ TSDB_SHOW_STABLES_COLUMNS_INDEX,
+ TSDB_SHOW_STABLES_METRIC_INDEX,
+ TSDB_SHOW_STABLES_UID_INDEX,
+ TSDB_SHOW_STABLES_TID_INDEX,
+ TSDB_SHOW_STABLES_VGID_INDEX,
+ TSDB_MAX_SHOW_STABLES
+};
+enum _describe_table_index {
+ TSDB_DESCRIBE_METRIC_FIELD_INDEX,
+ TSDB_DESCRIBE_METRIC_TYPE_INDEX,
+ TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
+ TSDB_DESCRIBE_METRIC_NOTE_INDEX,
+ TSDB_MAX_DESCRIBE_METRIC
+};
+
+typedef struct {
+ char field[TSDB_COL_NAME_LEN + 1];
+ char type[16];
+ int length;
+ char note[128];
+} SColDes;
/* Used by main to communicate with parse_opt. */
-typedef struct DemoArguments {
+typedef struct SArguments_S {
+ char * metaFile;
char * host;
uint16_t port;
char * user;
@@ -78,370 +242,428 @@ typedef struct DemoArguments {
int num_of_tables;
int num_of_DPT;
int abort;
- int order;
- int rate;
+ int disorderRatio;
+ int disorderRange;
int method_of_delete;
char ** arg_list;
-} SDemoArguments;
+} SArguments;
+
+typedef struct SColumn_S {
+ char field[TSDB_COL_NAME_LEN + 1];
+ char dataType[MAX_TB_NAME_SIZE];
+ int dataLen;
+ char note[128];
+} StrColumn;
+
+typedef struct SSuperTable_S {
+ char sTblName[MAX_TB_NAME_SIZE];
+ int childTblCount;
+ bool superTblExists; // 0: no, 1: yes
+ bool childTblExists; // 0: no, 1: yes
+ int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
+ int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
+ char childTblPrefix[MAX_TB_NAME_SIZE];
+ char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
+ char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful
+ int insertRate; // 0: unlimit > 0 rows/s
+
+ int multiThreadWriteOneTbl; // 0: no, 1: yes
+ int numberOfTblInOneSql; // 0/1: one table, > 1: number of tbl
+ int rowsPerTbl; //
+ int disorderRatio; // 0: no disorder, >0: x%
+ int disorderRange; // ms or us by database precision
+ int maxSqlLen; //
+
+ int64_t insertRows; // 0: no limit
+ int timeStampStep;
+ char startTimestamp[MAX_TB_NAME_SIZE]; //
+ char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
+ char sampleFile[MAX_FILE_NAME_LEN];
+ char tagsFile[MAX_FILE_NAME_LEN];
+
+ int columnCount;
+ StrColumn columns[MAX_COLUMN_COUNT];
+ int tagCount;
+ StrColumn tags[MAX_TAG_COUNT];
+
+ char* childTblName;
+ char* colsOfCreatChildTable;
+ int lenOfOneRow;
+ int lenOfTagOfOneRow;
+
+ char* sampleDataBuf;
+ int sampleDataBufSize;
+ //int sampleRowCount;
+ //int sampleUsePos;
+
+ int tagSource; // 0: rand, 1: tag sample
+ char* tagDataBuf;
+ int tagSampleCount;
+ int tagUsePos;
+
+ // statistics
+ int64_t totalRowsInserted;
+ int64_t totalAffectedRows;
+} SSuperTable;
+
+typedef struct {
+ char name[TSDB_DB_NAME_LEN + 1];
+ char create_time[32];
+ int32_t ntables;
+ int32_t vgroups;
+ int16_t replica;
+ int16_t quorum;
+ int16_t days;
+ char keeplist[32];
+ int32_t cache; //MB
+ int32_t blocks;
+ int32_t minrows;
+ int32_t maxrows;
+ int8_t wallevel;
+ int32_t fsync;
+ int8_t comp;
+ int8_t cachelast;
+ char precision[8]; // time resolution
+ int8_t update;
+ char status[16];
+} SDbInfo;
+
+typedef struct SDbCfg_S {
+// int maxtablesPerVnode;
+ int minRows;
+ int maxRows;
+ int comp;
+ int walLevel;
+ int fsync;
+ int replica;
+ int update;
+ int keep;
+ int days;
+ int cache;
+ int blocks;
+ int quorum;
+ char precision[MAX_TB_NAME_SIZE];
+} SDbCfg;
+
+typedef struct SDataBase_S {
+ char dbName[MAX_DB_NAME_SIZE];
+ int drop; // 0: use exists, 1: if exists, drop then new create
+ SDbCfg dbCfg;
+ int superTblCount;
+ SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
+} SDataBase;
+
+typedef struct SDbs_S {
+ char cfgDir[MAX_FILE_NAME_LEN];
+ char host[MAX_DB_NAME_SIZE];
+ uint16_t port;
+ char user[MAX_DB_NAME_SIZE];
+ char password[MAX_DB_NAME_SIZE];
+ char resultFile[MAX_FILE_NAME_LEN];
+ bool use_metric;
+ bool insert_only;
+ bool do_aggreFunc;
+ bool queryMode;
+
+ int threadCount;
+ int threadCountByCreateTbl;
+ int dbCount;
+ SDataBase db[MAX_DB_COUNT];
+
+ // statistics
+ int64_t totalRowsInserted;
+ int64_t totalAffectedRows;
+
+} SDbs;
+
+typedef struct SuperQueryInfo_S {
+ int rate; // 0: unlimit > 0 loop/s
+ int concurrent;
+ int sqlCount;
+ int subscribeMode; // 0: sync, 1: async
+ int subscribeInterval; // ms
+ int subscribeRestart;
+ int subscribeKeepProgress;
+ char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
+ TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
+} SuperQueryInfo;
+
+typedef struct SubQueryInfo_S {
+ char sTblName[MAX_TB_NAME_SIZE];
+ int rate; // 0: unlimit > 0 loop/s
+ int threadCnt;
+ int subscribeMode; // 0: sync, 1: async
+ int subscribeInterval; // ms
+ int subscribeRestart;
+ int subscribeKeepProgress;
+ int childTblCount;
+ char childTblPrefix[MAX_TB_NAME_SIZE];
+ int sqlCount;
+ char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
+ TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
+
+ char* childTblName;
+} SubQueryInfo;
+
+typedef struct SQueryMetaInfo_S {
+ char cfgDir[MAX_FILE_NAME_LEN];
+ char host[MAX_DB_NAME_SIZE];
+ uint16_t port;
+ char user[MAX_DB_NAME_SIZE];
+ char password[MAX_DB_NAME_SIZE];
+ char dbName[MAX_DB_NAME_SIZE];
+ char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful
+
+ SuperQueryInfo superQueryInfo;
+ SubQueryInfo subQueryInfo;
+} SQueryMetaInfo;
+
+typedef struct SThreadInfo_S {
+ TAOS *taos;
+ #ifdef TD_LOWA_CURL
+ CURL *curl_handle;
+ #endif
+ int threadID;
+ char db_name[MAX_DB_NAME_SIZE];
+ char fp[4096];
+ char tb_prefix[MAX_TB_NAME_SIZE];
+ int start_table_id;
+ int end_table_id;
+ int data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
+ SSuperTable* superTblInfo;
+
+ // for async insert
+ tsem_t lock_sem;
+ int64_t counter;
+ int64_t st;
+ int64_t et;
+ int64_t lastTs;
+ int nrecords_per_request;
+
+ // statistics
+ int64_t totalRowsInserted;
+ int64_t totalAffectedRows;
+
+ // insert delay statistics
+ int64_t cntDelay;
+ int64_t totalDelay;
+ int64_t avgDelay;
+ int64_t maxDelay;
+ int64_t minDelay;
+
+} threadInfo;
+
+typedef struct curlMemInfo_S {
+ char *buf;
+ size_t sizeleft;
+ } curlMemInfo;
+
+
#ifdef LINUX
/* The options we understand. */
static struct argp_option options[] = {
- {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 0},
- {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1},
- {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
- #ifdef _TD_POWER_
- {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 3},
- #else
- {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
- #endif
- {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
- {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
- {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
- {0, 's', "sql file", 0, "The select sql file.", 3},
- {0, 'M', 0, 0, "Use metric flag.", 13},
- {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14},
- {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6},
- {0, 'b', "type_of_cols", 0, "The data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'.", 7},
- {0, 'w', "length_of_binary", 0, "The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8", 8},
- {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 3.", 8},
- {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 9},
- {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10},
- {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11},
- {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12},
+ {0, 'f', "meta file", 0, "The meta data to the execution procedure, if use -f, all others options invalid. Default is NULL.", 0},
#ifdef _TD_POWER_
- {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 14},
+ {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 1},
+ {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 2},
#else
- {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14},
- #endif
- {0, 'x', 0, 0, "Insert only flag.", 13},
- {0, 'y', 0, 0, "Default input yes for prompt", 13},
- {0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14},
- {0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14},
- {0, 'D', "delete table", 0, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database", 14},
+ {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 1},
+ {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 2},
+ #endif
+ {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 2},
+ {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 2},
+ {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
+ {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
+ {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 4},
+ {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 4},
+ {0, 's', "sql file", 0, "The select sql file.", 6},
+ {0, 'M', 0, 0, "Use metric flag.", 4},
+ {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 6},
+ {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 4},
+ {0, 'b', "type_of_cols", 0, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.", 4},
+ {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4},
+ {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4},
+ {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4},
+ // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4},
+ {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4},
+ {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4},
+ {0, 'x', 0, 0, "Not insert only flag.", 4},
+ {0, 'y', 0, 0, "Default input yes for prompt.", 4},
+ {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4},
+ {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4},
+ //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5},
{0}};
- /* Parse a single option. */
- static error_t parse_opt(int key, char *arg, struct argp_state *state) {
- /* Get the input argument from argp_parse, which we
- know is a pointer to our arguments structure. */
- SDemoArguments *arguments = state->input;
- wordexp_t full_path;
- char **sptr;
- switch (key) {
- case 'h':
- arguments->host = arg;
- break;
- case 'p':
- arguments->port = atoi(arg);
- break;
- case 'u':
- arguments->user = arg;
- break;
- case 'P':
- arguments->password = arg;
- break;
- case 'o':
- arguments->output_file = arg;
- break;
- case 's':
- arguments->sqlFile = arg;
- break;
- case 'q':
- arguments->mode = atoi(arg);
- break;
- case 'T':
- arguments->num_of_threads = atoi(arg);
- break;
- case 'r':
- arguments->num_of_RPR = atoi(arg);
- break;
- case 't':
- arguments->num_of_tables = atoi(arg);
- break;
- case 'n':
- arguments->num_of_DPT = atoi(arg);
- break;
- case 'd':
- arguments->database = arg;
- break;
- case 'l':
- arguments->num_of_CPR = atoi(arg);
- break;
- case 'b':
- sptr = arguments->datatype;
- if (strstr(arg, ",") == NULL) {
- if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 &&
- strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 &&
- strcasecmp(arg, "SMALLINT") != 0 &&
- strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 &&
- strcasecmp(arg, "BINARY") && strcasecmp(arg, "NCHAR")) {
+/* Parse a single option. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state) {
+ // Get the input argument from argp_parse, which we know is a pointer to our arguments structure.
+ SArguments *arguments = state->input;
+ wordexp_t full_path;
+ char **sptr;
+ switch (key) {
+ case 'f':
+ arguments->metaFile = arg;
+ break;
+ case 'h':
+ arguments->host = arg;
+ break;
+ case 'p':
+ arguments->port = atoi(arg);
+ break;
+ case 'u':
+ arguments->user = arg;
+ break;
+ case 'P':
+ arguments->password = arg;
+ break;
+ case 'o':
+ arguments->output_file = arg;
+ break;
+ case 's':
+ arguments->sqlFile = arg;
+ break;
+ case 'q':
+ arguments->mode = atoi(arg);
+ break;
+ case 'T':
+ arguments->num_of_threads = atoi(arg);
+ break;
+ //case 'r':
+ // arguments->num_of_RPR = atoi(arg);
+ // break;
+ case 't':
+ arguments->num_of_tables = atoi(arg);
+ break;
+ case 'n':
+ arguments->num_of_DPT = atoi(arg);
+ break;
+ case 'd':
+ arguments->database = arg;
+ break;
+ case 'l':
+ arguments->num_of_CPR = atoi(arg);
+ break;
+ case 'b':
+ sptr = arguments->datatype;
+ if (strstr(arg, ",") == NULL) {
+ if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 &&
+ strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 &&
+ strcasecmp(arg, "SMALLINT") != 0 && strcasecmp(arg, "TIMESTAMP") != 0 &&
+ strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 &&
+ strcasecmp(arg, "BINARY") != 0 && strcasecmp(arg, "NCHAR") != 0) {
+ argp_error(state, "Invalid data_type!");
+ }
+ sptr[0] = arg;
+ } else {
+ int index = 0;
+ char *dupstr = strdup(arg);
+ char *running = dupstr;
+ char *token = strsep(&running, ",");
+ while (token != NULL) {
+ if (strcasecmp(token, "INT") != 0 && strcasecmp(token, "FLOAT") != 0 &&
+ strcasecmp(token, "TINYINT") != 0 && strcasecmp(token, "BOOL") != 0 &&
+ strcasecmp(token, "SMALLINT") != 0 && strcasecmp(token, "TIMESTAMP") != 0 &&
+ strcasecmp(token, "BIGINT") != 0 && strcasecmp(token, "DOUBLE") != 0 &&
+ strcasecmp(token, "BINARY") != 0 && strcasecmp(token, "NCHAR") != 0) {
argp_error(state, "Invalid data_type!");
}
- sptr[0] = arg;
- } else {
- int index = 0;
- char *dupstr = strdup(arg);
- char *running = dupstr;
- char *token = strsep(&running, ",");
- while (token != NULL) {
- if (strcasecmp(token, "INT") != 0 &&
- strcasecmp(token, "FLOAT") != 0 &&
- strcasecmp(token, "TINYINT") != 0 &&
- strcasecmp(token, "BOOL") != 0 &&
- strcasecmp(token, "SMALLINT") != 0 &&
- strcasecmp(token, "BIGINT") != 0 &&
- strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) {
- argp_error(state, "Invalid data_type!");
- }
- sptr[index++] = token;
- token = strsep(&running, ",");
- if (index >= MAX_NUM_DATATYPE) break;
- }
+ sptr[index++] = token;
+ token = strsep(&running, ",");
+ if (index >= MAX_NUM_DATATYPE) break;
}
- break;
- case 'w':
- arguments->len_of_binary = atoi(arg);
- break;
- case 'm':
- arguments->tb_prefix = arg;
- break;
- case 'M':
- arguments->use_metric = true;
- break;
- case 'x':
- arguments->insert_only = true;
- break;
- case 'y':
- arguments->answer_yes = true;
- break;
- case 'c':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
- wordfree(&full_path);
- break;
- case 'O':
- arguments->order = atoi(arg);
- if (arguments->order > 1 || arguments->order < 0)
- {
- arguments->order = 0;
- } else if (arguments->order == 1)
- {
- arguments->rate = 10;
- }
- break;
- case 'R':
- arguments->rate = atoi(arg);
- if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0))
- {
- arguments->rate = 10;
- }
- break;
- case 'a':
- arguments->replica = atoi(arg);
- if (arguments->replica > 3 || arguments->replica < 1)
- {
- arguments->replica = 1;
- }
- break;
- case 'D':
- arguments->method_of_delete = atoi(arg);
- if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3)
- {
- arguments->method_of_delete = 0;
- }
- break;
- case OPT_ABORT:
- arguments->abort = 1;
- break;
- case ARGP_KEY_ARG:
- /*arguments->arg_list = &state->argv[state->next-1];
- state->next = state->argc;*/
- argp_usage(state);
- break;
+ }
+ break;
+ case 'w':
+ arguments->len_of_binary = atoi(arg);
+ break;
+ case 'm':
+ arguments->tb_prefix = arg;
+ break;
+ case 'M':
+ arguments->use_metric = true;
+ break;
+ case 'x':
+ arguments->insert_only = true;
+ case 'y':
+ arguments->answer_yes = true;
+ break;
+ case 'c':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ fprintf(stderr, "Invalid path %s\n", arg);
+ return -1;
+ }
+ taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
+ wordfree(&full_path);
+ break;
+ case 'O':
+ arguments->disorderRatio = atoi(arg);
+ if (arguments->disorderRatio < 0 || arguments->disorderRatio > 100)
+ {
+ argp_error(state, "Invalid disorder ratio, should 1 ~ 100!");
+ }
+ break;
+ case 'R':
+ arguments->disorderRange = atoi(arg);
+ break;
+ case 'a':
+ arguments->replica = atoi(arg);
+ if (arguments->replica > 3 || arguments->replica < 1)
+ {
+ arguments->replica = 1;
+ }
+ break;
+ //case 'D':
+ // arguments->method_of_delete = atoi(arg);
+ // break;
+ case OPT_ABORT:
+ arguments->abort = 1;
+ break;
+ case ARGP_KEY_ARG:
+ /*arguments->arg_list = &state->argv[state->next-1];
+ state->next = state->argc;*/
+ argp_usage(state);
+ break;
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
+ default:
+ return ARGP_ERR_UNKNOWN;
}
+ return 0;
+}
- static struct argp argp = {options, parse_opt, 0, 0};
+static struct argp argp = {options, parse_opt, 0, 0};
- void parse_args(int argc, char *argv[], SDemoArguments *arguments) {
- argp_parse(&argp, argc, argv, 0, 0, arguments);
- if (arguments->abort) {
- #ifndef _ALPINE
- error(10, 0, "ABORTED");
- #else
- abort();
- #endif
- }
+void parse_args(int argc, char *argv[], SArguments *arguments) {
+ argp_parse(&argp, argc, argv, 0, 0, arguments);
+ if (arguments->abort) {
+ #ifndef _ALPINE
+ error(10, 0, "ABORTED");
+ #else
+ abort();
+ #endif
}
+}
#else
void printHelp() {
char indent[10] = " ";
- printf("%s%s\n", indent, "-h");
- printf("%s%s%s\n", indent, indent, "host, The host to connect to TDengine. Default is localhost.");
- printf("%s%s\n", indent, "-p");
- printf("%s%s%s\n", indent, indent, "port, The TCP/IP port number to use for the connection. Default is 0.");
- printf("%s%s\n", indent, "-u");
- printf("%s%s%s\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'.");
- printf("%s%s\n", indent, "-p");
- #ifdef _TD_POWER_
- printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'powerdb'.");
- #else
- printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'.");
- #endif
- printf("%s%s\n", indent, "-d");
- printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'.");
- printf("%s%s\n", indent, "-a");
- printf("%s%s%s\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 3.");
- printf("%s%s\n", indent, "-m");
- printf("%s%s%s\n", indent, indent, "table_prefix, Table prefix name. Default is 't'.");
- printf("%s%s\n", indent, "-s");
- printf("%s%s%s\n", indent, indent, "sql file, The select sql file.");
- printf("%s%s\n", indent, "-M");
- printf("%s%s%s\n", indent, indent, "meteric, Use metric flag.");
- printf("%s%s\n", indent, "-o");
- printf("%s%s%s\n", indent, indent, "outputfile, Direct output to the named file. Default is './output.txt'.");
- printf("%s%s\n", indent, "-q");
- printf("%s%s%s\n", indent, indent, "query_mode, Query mode--0: SYNC, 1: ASYNC. Default is SYNC.");
- printf("%s%s\n", indent, "-b");
- printf("%s%s%s\n", indent, indent, "type_of_cols, data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'.");
- printf("%s%s\n", indent, "-w");
- printf("%s%s%s\n", indent, indent, "length_of_binary, The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8");
- printf("%s%s\n", indent, "-l");
- printf("%s%s%s\n", indent, indent, "num_of_cols_per_record, The number of columns per record. Default is 3.");
- printf("%s%s\n", indent, "-T");
- printf("%s%s%s\n", indent, indent, "num_of_threads, The number of threads. Default is 10.");
- printf("%s%s\n", indent, "-r");
- printf("%s%s%s\n", indent, indent, "num_of_records_per_req, The number of records per request. Default is 1000.");
- printf("%s%s\n", indent, "-t");
- printf("%s%s%s\n", indent, indent, "num_of_tables, The number of tables. Default is 10000.");
- printf("%s%s\n", indent, "-n");
- printf("%s%s%s\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 100000.");
+ printf("%s%s\n", indent, "-f");
+ printf("%s%s%s\n", indent, indent, "The meta file to the execution procedure. Default is './meta.json'.");
printf("%s%s\n", indent, "-c");
- #ifdef _TD_POWER_
- printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/power/'.");
- #else
printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'.");
- #endif
- printf("%s%s\n", indent, "-x");
- printf("%s%s%s\n", indent, indent, "flag, Insert only flag.");
- printf("%s%s\n", indent, "-y");
- printf("%s%s%s\n", indent, indent, "flag, Anser Yes for prompt.");
- printf("%s%s\n", indent, "-O");
- printf("%s%s%s\n", indent, indent, "order, Insert mode--0: In order, 1: Out of order. Default is in order.");
- printf("%s%s\n", indent, "-R");
- printf("%s%s%s\n", indent, indent, "rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50.");
- printf("%s%s\n", indent, "-D");
- printf("%s%s%s\n", indent, indent, "Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database.");
}
- void parse_args(int argc, char *argv[], SDemoArguments *arguments) {
- char **sptr;
+ void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-h") == 0) {
- arguments->host = argv[++i];
- } else if (strcmp(argv[i], "-p") == 0) {
- arguments->port = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-u") == 0) {
- arguments->user = argv[++i];
- } else if (strcmp(argv[i], "-P") == 0) {
- arguments->password = argv[++i];
- } else if (strcmp(argv[i], "-o") == 0) {
- arguments->output_file = argv[++i];
- } else if (strcmp(argv[i], "-s") == 0) {
- arguments->sqlFile = argv[++i];
- } else if (strcmp(argv[i], "-q") == 0) {
- arguments->mode = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-T") == 0) {
- arguments->num_of_threads = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-r") == 0) {
- arguments->num_of_RPR = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-t") == 0) {
- arguments->num_of_tables = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-n") == 0) {
- arguments->num_of_DPT = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-d") == 0) {
- arguments->database = argv[++i];
- } else if (strcmp(argv[i], "-l") == 0) {
- arguments->num_of_CPR = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-b") == 0) {
- sptr = arguments->datatype;
- ++i;
- if (strstr(argv[i], ",") == NULL) {
- if (strcasecmp(argv[i], "INT") != 0 && strcasecmp(argv[i], "FLOAT") != 0 &&
- strcasecmp(argv[i], "TINYINT") != 0 && strcasecmp(argv[i], "BOOL") != 0 &&
- strcasecmp(argv[i], "SMALLINT") != 0 &&
- strcasecmp(argv[i], "BIGINT") != 0 && strcasecmp(argv[i], "DOUBLE") != 0 &&
- strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) {
- fprintf(stderr, "Invalid data_type!\n");
- printHelp();
- exit(EXIT_FAILURE);
- }
- sptr[0] = argv[i];
- } else {
- int index = 0;
- char *dupstr = strdup(argv[i]);
- char *running = dupstr;
- char *token = strsep(&running, ",");
- while (token != NULL) {
- if (strcasecmp(token, "INT") != 0 &&
- strcasecmp(token, "FLOAT") != 0 &&
- strcasecmp(token, "TINYINT") != 0 &&
- strcasecmp(token, "BOOL") != 0 &&
- strcasecmp(token, "SMALLINT") != 0 &&
- strcasecmp(token, "BIGINT") != 0 &&
- strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) {
- fprintf(stderr, "Invalid data_type!\n");
- printHelp();
- exit(EXIT_FAILURE);
- }
- sptr[index++] = token;
- token = strsep(&running, ",");
- if (index >= MAX_NUM_DATATYPE) break;
- }
- }
- } else if (strcmp(argv[i], "-w") == 0) {
- arguments->len_of_binary = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-m") == 0) {
- arguments->tb_prefix = argv[++i];
- } else if (strcmp(argv[i], "-M") == 0) {
- arguments->use_metric = true;
- } else if (strcmp(argv[i], "-x") == 0) {
- arguments->insert_only = true;
- } else if (strcmp(argv[i], "-y") == 0) {
- arguments->answer_yes = true;
+ if (strcmp(argv[i], "-f") == 0) {
+ arguments->metaFile = argv[++i];
} else if (strcmp(argv[i], "-c") == 0) {
strcpy(configDir, argv[++i]);
- } else if (strcmp(argv[i], "-O") == 0) {
- arguments->order = atoi(argv[++i]);
- if (arguments->order > 1 || arguments->order < 0) {
- arguments->order = 0;
- } else if (arguments->order == 1) {
- arguments->rate = 10;
- }
- } else if (strcmp(argv[i], "-R") == 0) {
- arguments->rate = atoi(argv[++i]);
- if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0)) {
- arguments->rate = 10;
- }
- } else if (strcmp(argv[i], "-a") == 0) {
- arguments->replica = atoi(argv[++i]);
- if (arguments->rate > 3 || arguments->rate < 1) {
- arguments->rate = 1;
- }
- } else if (strcmp(argv[i], "-D") == 0) {
- arguments->method_of_delete = atoi(argv[++i]);
- if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3) {
- arguments->method_of_delete = 0;
- }
} else if (strcmp(argv[i], "--help") == 0) {
printHelp();
exit(EXIT_FAILURE);
@@ -452,323 +674,1528 @@ typedef struct DemoArguments {
}
}
}
-
#endif
-/* ******************************* Structure
- * definition******************************* */
-enum MODE {
- SYNC, ASYNC
+static bool getInfoFromJsonFile(char* file);
+//static int generateOneRowDataForStb(SSuperTable* stbInfo);
+//static int getDataIntoMemForStb(SSuperTable* stbInfo);
+static void init_rand_data();
+static int createDatabases();
+static void createChildTables();
+static int queryDbExec(TAOS *taos, char *command, int type);
+
+/* ************ Global variables ************ */
+
+int32_t randint[MAX_PREPARED_RAND];
+int64_t randbigint[MAX_PREPARED_RAND];
+float randfloat[MAX_PREPARED_RAND];
+double randdouble[MAX_PREPARED_RAND];
+char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"};
+
+SArguments g_args = {NULL,
+ "127.0.0.1", // host
+ 6030, // port
+ "root", // user
+ #ifdef _TD_POWER_
+ "powerdb", // password
+ #else
+ "taosdata", // password
+ #endif
+ "test", // database
+ 1, // replica
+ "t", // tb_prefix
+ NULL, // sqlFile
+ false, // use_metric
+ false, // insert_only
+ false, // answer_yes;
+ "./output.txt", // output_file
+ 0, // mode : sync or async
+ {
+ "TINYINT", // datatype
+ "SMALLINT",
+ "INT",
+ "BIGINT",
+ "FLOAT",
+ "DOUBLE",
+ "BINARY",
+ "NCHAR",
+ "BOOL",
+ "TIMESTAMP"
+ },
+ 16, // len_of_binary
+ 10, // num_of_CPR
+ 10, // num_of_connections/thread
+ 100, // num_of_RPR
+ 10000, // num_of_tables
+ 10000, // num_of_DPT
+ 0, // abort
+ 0, // disorderRatio
+ 1000, // disorderRange
+ 1, // method_of_delete
+ NULL // arg_list
};
-typedef struct {
- TAOS *taos;
- int threadID;
- char db_name[MAX_DB_NAME_SIZE];
- char fp[4096];
- char **datatype;
- int len_of_binary;
- char tb_prefix[MAX_TB_NAME_SIZE];
- int start_table_id;
- int end_table_id;
- int ncols_per_record;
- int nrecords_per_table;
- int nrecords_per_request;
- int data_of_order;
- int data_of_rate;
- int64_t start_time;
- bool do_aggreFunc;
-
- char* cols;
- bool use_metric;
-
- tsem_t mutex_sem;
- int notFinished;
- tsem_t lock_sem;
- int counter;
-
- // insert delay statitics
- int64_t cntDelay;
- int64_t totalDelay;
- int64_t avgDelay;
- int64_t maxDelay;
- int64_t minDelay;
-
-} info;
-
-typedef struct {
- TAOS *taos;
-
- char tb_name[MAX_TB_NAME_SIZE];
- int64_t timestamp;
- int target;
- int counter;
- int nrecords_per_request;
- int ncols_per_record;
- char **data_type;
- int len_of_binary;
- int data_of_order;
- int data_of_rate;
-
- tsem_t *mutex_sem;
- int *notFinished;
- tsem_t *lock_sem;
-} sTable;
-
-/* ******************************* Global
- * variables******************************* */
-char *aggreFunc[] = {"*", "count(*)", "avg(f1)", "sum(f1)", "max(f1)", "min(f1)", "first(f1)", "last(f1)"};
-void queryDB(TAOS *taos, char *command);
+static int g_jsonType = 0;
+static SDbs g_Dbs;
+static int g_totalChildTables = 0;
+static SQueryMetaInfo g_queryInfo;
+static FILE * g_fpOfInsertResult = NULL;
-void *readTable(void *sarg);
-void *readMetric(void *sarg);
-
-void *syncWrite(void *sarg);
-
-void *deleteTable();
-
-void *asyncWrite(void *sarg);
-
-int generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary);
-
-void rand_string(char *str, int size);
-
-void init_rand_data();
-
-double getCurrentTime();
-
-void callBack(void *param, TAOS_RES *res, int code);
-void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass);
-void querySqlFile(TAOS* taos, char* sqlFile);
-
-int main(int argc, char *argv[]) {
- SDemoArguments arguments = { NULL, // host
- 0, // port
- "root", // user
- #ifdef _TD_POWER_
- "powerdb", // password
- #else
- "taosdata", // password
- #endif
- "test", // database
- 1, // replica
- "t", // tb_prefix
- NULL,
- false, // use_metric
- false, // insert_only
- false, // answer_yes
- "./output.txt", // output_file
- 0, // mode
- {
- "int", // datatype
- "int",
- "int",
- "int",
- "int",
- "int",
- "int",
- "float"
- },
- 8, // len_of_binary
- 1, // num_of_CPR
- 1, // num_of_connections/thread
- 1, // num_of_RPR
- 1, // num_of_tables
- 50000, // num_of_DPT
- 0, // abort
- 0, // order
- 0, // rate
- 0, // method_of_delete
- NULL // arg_list
- };
-
- /* Parse our arguments; every option seen by parse_opt will be
- reflected in arguments. */
- // For demo use, change default values for some parameters;
- arguments.num_of_tables = 10000;
- arguments.num_of_CPR = 3;
- arguments.num_of_threads = 10;
- arguments.num_of_DPT = 100000;
- arguments.num_of_RPR = 1000;
- arguments.use_metric = true;
- arguments.insert_only = false;
- arguments.answer_yes = false;
- // end change
-
- parse_args(argc, argv, &arguments);
-
- enum MODE query_mode = arguments.mode;
- char *ip_addr = arguments.host;
- uint16_t port = arguments.port;
- char *user = arguments.user;
- char *pass = arguments.password;
- char *db_name = arguments.database;
- char *tb_prefix = arguments.tb_prefix;
- int len_of_binary = arguments.len_of_binary;
- int ncols_per_record = arguments.num_of_CPR;
- int order = arguments.order;
- int rate = arguments.rate;
- int method_of_delete = arguments.method_of_delete;
- int ntables = arguments.num_of_tables;
- int threads = arguments.num_of_threads;
- int nrecords_per_table = arguments.num_of_DPT;
- int nrecords_per_request = arguments.num_of_RPR;
- bool use_metric = arguments.use_metric;
- bool insert_only = arguments.insert_only;
- bool answer_yes = arguments.answer_yes;
- char **data_type = arguments.datatype;
- int count_data_type = 0;
- char dataString[STRING_LEN];
- bool do_aggreFunc = true;
- int replica = arguments.replica;
-
- if (NULL != arguments.sqlFile) {
- TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port);
- querySqlFile(qtaos, arguments.sqlFile);
- taos_close(qtaos);
- return 0;
+void tmfclose(FILE *fp) {
+ if (NULL != fp) {
+ fclose(fp);
}
- init_rand_data();
+}
- memset(dataString, 0, STRING_LEN);
- int len = 0;
-
- if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) {
- do_aggreFunc = false;
+void tmfree(char *buf) {
+ if (NULL != buf) {
+ free(buf);
}
- for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) {
- if (data_type[count_data_type] == NULL) {
+}
+
+static int queryDbExec(TAOS *taos, char *command, int type) {
+ int i;
+ TAOS_RES *res = NULL;
+ int32_t code = -1;
+
+ for (i = 0; i < 5; i++) {
+ if (NULL != res) {
+ taos_free_result(res);
+ res = NULL;
+ }
+
+ res = taos_query(taos, command);
+ code = taos_errno(res);
+ if (0 == code) {
break;
+ }
+ }
+
+ if (code != 0) {
+ fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res));
+ taos_free_result(res);
+ //taos_close(taos);
+ return -1;
+ }
+
+ if (INSERT_TYPE == type) {
+ int affectedRows = taos_affected_rows(res);
+ taos_free_result(res);
+ return affectedRows;
+ }
+
+ taos_free_result(res);
+ return 0;
+}
+
+static void getResult(TAOS_RES *res, char* resultFileName) {
+ TAOS_ROW row = NULL;
+ int num_rows = 0;
+ int num_fields = taos_field_count(res);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ FILE *fp = NULL;
+ if (resultFileName[0] != 0) {
+ fp = fopen(resultFileName, "at");
+ if (fp == NULL) {
+ fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName);
+ }
+ }
+
+ char* databuf = (char*) calloc(1, 100*1024*1024);
+ if (databuf == NULL) {
+ fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n");
+ return ;
+ }
+
+ int totalLen = 0;
+ char temp[16000];
+
+ // fetch the records row by row
+ while ((row = taos_fetch_row(res))) {
+ if (totalLen >= 100*1024*1024 - 32000) {
+ if (fp) fprintf(fp, "%s", databuf);
+ totalLen = 0;
+ memset(databuf, 0, 100*1024*1024);
+ }
+ num_rows++;
+ int len = taos_print_row(temp, row, fields, num_fields);
+ len += sprintf(temp + len, "\n");
+ //printf("query result:%s\n", temp);
+ memcpy(databuf + totalLen, temp, len);
+ totalLen += len;
+ }
+
+ if (fp) fprintf(fp, "%s", databuf);
+ tmfclose(fp);
+ free(databuf);
+}
+
+static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) {
+ TAOS_RES *res = taos_query(taos, command);
+ if (res == NULL || taos_errno(res) != 0) {
+ printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res));
+ taos_free_result(res);
+ return;
+ }
+
+ getResult(res, resultFileName);
+ taos_free_result(res);
+}
+
+double getCurrentTime() {
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) != 0) {
+ perror("Failed to get current time in ms");
+ return 0.0;
+ }
+
+ return tv.tv_sec + tv.tv_usec / 1E6;
+}
+
+static int32_t rand_bool(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor] % 2;
+}
+
+static int32_t rand_tinyint(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor] % 128;
+}
+
+static int32_t rand_smallint(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor] % 32767;
+}
+
+static int32_t rand_int(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor];
+}
+
+static int64_t rand_bigint(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randbigint[cursor];
+
+}
+
+static float rand_float(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randfloat[cursor];
+}
+
+static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
+void rand_string(char *str, int size) {
+ str[0] = 0;
+ if (size > 0) {
+ //--size;
+ int n;
+ for (n = 0; n < size; n++) {
+ int key = rand_tinyint() % (int)(sizeof(charset) - 1);
+ str[n] = charset[key];
+ }
+ str[n] = 0;
+ }
+}
+
+static double rand_double() {
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randdouble[cursor];
+
+}
+
+static void init_rand_data() {
+ for (int i = 0; i < MAX_PREPARED_RAND; i++){
+ randint[i] = (int)(rand() % 65535);
+ randbigint[i] = (int64_t)(rand() % 2147483648);
+ randfloat[i] = (float)(rand() / 1000.0);
+ randdouble[i] = (double)(rand() / 1000000.0);
+ }
+}
+
+static int printfInsertMeta() {
+ printf("\033[1m\033[40;32m================ insert.json parse result START ================\033[0m\n");
+ printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port);
+ printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
+ printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
+ printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
+ printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
+ printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
+
+ printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ printf("database[\033[33m%d\033[0m]:\n", i);
+ printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName);
+ if (0 == g_Dbs.db[i].drop) {
+ printf(" drop: \033[33mno\033[0m\n");
+ }else {
+ printf(" drop: \033[33myes\033[0m\n");
}
- len += snprintf(dataString + len, STRING_LEN - len, "%s ", data_type[count_data_type]);
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update);
+ }
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 0) {
+ printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision);
+ } else {
+ printf(" precision error: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision);
+ return -1;
+ }
+ }
+
+ printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount);
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%d\033[0m]:\n", j);
+
+ printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName);
+
+ if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
+ } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes");
+ } else {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "error");
+ }
+
+ if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "no");
+ } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "yes");
+ } else {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "error");
+ }
+
+ printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount);
+ printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix);
+ printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource);
+ printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode);
+ printf(" insertRate: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].insertRate);
+ printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows);
+
+ if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
+ printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
+ }else {
+ printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
+ }
+ printf(" numberOfTblInOneSql: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql);
+ printf(" rowsPerTbl: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].rowsPerTbl);
+ printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange);
+ printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio);
+ printf(" maxSqlLen: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
+
+ printf(" timeStampStep: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep);
+ printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp);
+ printf(" sampleFormat: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFormat);
+ printf(" sampleFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFile);
+ printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile);
+
+ printf(" columnCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].columnCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) {
+ printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ } else {
+ printf("column[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ }
+ }
+ printf("\n");
+
+ printf(" tagCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].tagCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) {
+ printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ } else {
+ printf("tag[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ }
+ }
+ printf("\n");
+ }
+ printf("\n");
+ }
+ printf("\033[1m\033[40;32m================ insert.json parse result END================\033[0m\n");
+
+ return 0;
+}
+
+static void printfInsertMetaToFile(FILE* fp) {
+ fprintf(fp, "================ insert.json parse result START================\n");
+ fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
+ fprintf(fp, "user: %s\n", g_Dbs.user);
+ fprintf(fp, "password: %s\n", g_Dbs.password);
+ fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
+ fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
+ fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
+
+ fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ fprintf(fp, "database[%d]:\n", i);
+ fprintf(fp, " database name: %s\n", g_Dbs.db[i].dbName);
+ if (0 == g_Dbs.db[i].drop) {
+ fprintf(fp, " drop: no\n");
+ }else {
+ fprintf(fp, " drop: yes\n");
+ }
+
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update);
+ }
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 0) {
+ fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision);
+ } else {
+ fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision);
+ }
+ }
+
+ fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount);
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ fprintf(fp, " super table[%d]:\n", j);
+
+ fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName);
+
+ if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ fprintf(fp, " autoCreateTable: %s\n", "no");
+ } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ fprintf(fp, " autoCreateTable: %s\n", "yes");
+ } else {
+ fprintf(fp, " autoCreateTable: %s\n", "error");
+ }
+
+ if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ fprintf(fp, " childTblExists: %s\n", "no");
+ } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ fprintf(fp, " childTblExists: %s\n", "yes");
+ } else {
+ fprintf(fp, " childTblExists: %s\n", "error");
+ }
+
+ fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount);
+ fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix);
+ fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource);
+ fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode);
+ fprintf(fp, " insertRate: %d\n", g_Dbs.db[i].superTbls[j].insertRate);
+ fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows);
+
+ if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
+ fprintf(fp, " multiThreadWriteOneTbl: no\n");
+ }else {
+ fprintf(fp, " multiThreadWriteOneTbl: yes\n");
+ }
+ fprintf(fp, " numberOfTblInOneSql: %d\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql);
+ fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl);
+ fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
+ fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
+ fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
+
+ fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep);
+ fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp);
+ fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat);
+ fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile);
+ fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile);
+
+ fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) {
+ fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ } else {
+ fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ }
+ }
+ fprintf(fp, "\n");
+
+ fprintf(fp, " tagCount: %d\n ", g_Dbs.db[i].superTbls[j].tagCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) {
+ fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ } else {
+ fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ }
+ }
+ fprintf(fp, "\n");
+ }
+ fprintf(fp, "\n");
+ }
+ fprintf(fp, "================ insert.json parse result END ================\n\n");
+}
+
+static void printfQueryMeta() {
+ printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n");
+ printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port);
+ printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
+ printf("password: \033[33m%s\033[0m\n", g_queryInfo.password);
+ printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
+
+ printf("\n");
+ printf("specified table query info: \n");
+ printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
+ printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent);
+ printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
+
+ if (SUBSCRIBE_MODE == g_jsonType) {
+ printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode);
+ printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress);
}
- FILE *fp = fopen(arguments.output_file, "a");
- if (NULL == fp) {
- fprintf(stderr, "Failed to open %s for writing\n", arguments.output_file);
- return 1;
- };
- time_t tTime = time(NULL);
- struct tm tm = *localtime(&tTime);
- printf("###################################################################\n");
- printf("# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port);
- printf("# User: %s\n", user);
- printf("# Password: %s\n", pass);
- printf("# Use metric: %s\n", use_metric ? "true" : "false");
- printf("# Datatype of Columns: %s\n", dataString);
- printf("# Binary Length(If applicable): %d\n",
- (strncasecmp(dataString, "BINARY", 6) == 0 || strncasecmp(dataString, "NCHAR", 5) == 0) ? len_of_binary : -1);
- printf("# Number of Columns per record: %d\n", ncols_per_record);
- printf("# Number of Threads: %d\n", threads);
- printf("# Number of Tables: %d\n", ntables);
- printf("# Number of Data per Table: %d\n", nrecords_per_table);
- printf("# Records/Request: %d\n", nrecords_per_request);
- printf("# Database name: %s\n", db_name);
- printf("# Table prefix: %s\n", tb_prefix);
- if (order == 1)
- {
- printf("# Data order: %d\n", order);
- printf("# Data out of order rate: %d\n", rate);
-
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]);
}
- printf("# Delete method: %d\n", method_of_delete);
- printf("# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+ printf("\n");
+ printf("super table query info: \n");
+ printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate);
+ printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName);
- if (!answer_yes) {
- printf("###################################################################\n\n");
- printf("Press enter key to continue");
- (void)getchar();
- }
-
- fprintf(fp, "###################################################################\n");
- fprintf(fp, "# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port);
- fprintf(fp, "# User: %s\n", user);
- fprintf(fp, "# Password: %s\n", pass);
- fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false");
- fprintf(fp, "# Datatype of Columns: %s\n", dataString);
- fprintf(fp, "# Binary Length(If applicable): %d\n",
- (strncasecmp(dataString, "BINARY", 6) == 0 || strncasecmp(dataString, "NCHAR", 5) == 0) ? len_of_binary : -1);
- fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record);
- fprintf(fp, "# Number of Threads: %d\n", threads);
- fprintf(fp, "# Number of Tables: %d\n", ntables);
- fprintf(fp, "# Number of Data per Table: %d\n", nrecords_per_table);
- fprintf(fp, "# Records/Request: %d\n", nrecords_per_request);
- fprintf(fp, "# Database name: %s\n", db_name);
- fprintf(fp, "# Table prefix: %s\n", tb_prefix);
- if (order == 1)
- {
- printf("# Data order: %d\n", order);
- printf("# Data out of order rate: %d\n", rate);
-
- }
- fprintf(fp, "# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- fprintf(fp, "###################################################################\n\n");
- fprintf(fp, "| WRecords | Records/Second | Requests/Second | WLatency(ms) |\n");
-
- if (taos_init()) {
- fprintf(stderr, "Failed to init taos\n");
- return 1;
+ if (SUBSCRIBE_MODE == g_jsonType) {
+ printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode);
+ printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress);
}
- TAOS *taos = taos_connect(ip_addr, user, pass, NULL, port);
+ printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount);
+ for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]);
+ }
+ printf("\n");
+ printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n");
+}
+
+
+static char* xFormatTimestamp(char* buf, int64_t val, int precision) {
+ time_t tt;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ tt = (time_t)(val / 1000000);
+ } else {
+ tt = (time_t)(val / 1000);
+ }
+
+/* comment out as it make testcases like select_with_tags.sim fail.
+ but in windows, this may cause the call to localtime crash if tt < 0,
+ need to find a better solution.
+ if (tt < 0) {
+ tt = 0;
+ }
+ */
+
+#ifdef WINDOWS
+ if (tt < 0) tt = 0;
+#endif
+
+ struct tm* ptm = localtime(&tt);
+ size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
+
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ sprintf(buf + pos, ".%06d", (int)(val % 1000000));
+ } else {
+ sprintf(buf + pos, ".%03d", (int)(val % 1000));
+ }
+
+ return buf;
+}
+
+static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) {
+ if (val == NULL) {
+ fprintf(fp, "%s", TSDB_DATA_NULL_STR);
+ return;
+ }
+
+ char buf[TSDB_MAX_BYTES_PER_ROW];
+ switch (field->type) {
+ case TSDB_DATA_TYPE_BOOL:
+ fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ fprintf(fp, "%d", *((int8_t *)val));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ fprintf(fp, "%d", *((int16_t *)val));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ fprintf(fp, "%d", *((int32_t *)val));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ fprintf(fp, "%" PRId64, *((int64_t *)val));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ memcpy(buf, val, length);
+ buf[length] = 0;
+ fprintf(fp, "\'%s\'", buf);
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ xFormatTimestamp(buf, *(int64_t*)val, precision);
+ fprintf(fp, "'%s'", buf);
+ break;
+ default:
+ break;
+ }
+}
+
+static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
+ TAOS_ROW row = taos_fetch_row(tres);
+ if (row == NULL) {
+ return 0;
+ }
+
+ FILE* fp = fopen(fname, "at");
+ if (fp == NULL) {
+ fprintf(stderr, "ERROR: failed to open file: %s\n", fname);
+ return -1;
+ }
+
+ int num_fields = taos_num_fields(tres);
+ TAOS_FIELD *fields = taos_fetch_fields(tres);
+ int precision = taos_result_precision(tres);
+
+ for (int col = 0; col < num_fields; col++) {
+ if (col > 0) {
+ fprintf(fp, ",");
+ }
+ fprintf(fp, "%s", fields[col].name);
+ }
+ fputc('\n', fp);
+
+ int numOfRows = 0;
+ do {
+ int32_t* length = taos_fetch_lengths(tres);
+ for (int i = 0; i < num_fields; i++) {
+ if (i > 0) {
+ fputc(',', fp);
+ }
+ xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision);
+ }
+ fputc('\n', fp);
+
+ numOfRows++;
+ row = taos_fetch_row(tres);
+ } while( row != NULL);
+
+ fclose(fp);
+
+ return numOfRows;
+}
+
+static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+ int count = 0;
+
+ res = taos_query(taos, "show databases;");
+ int32_t code = taos_errno(res);
+
+ if (code != 0) {
+ fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res));
+ return -1;
+ }
+
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ while ((row = taos_fetch_row(res)) != NULL) {
+ // sys database name : 'log'
+ if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue;
+
+ dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (dbInfos[count] == NULL) {
+ fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count);
+ return -1;
+ }
+
+ strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI);
+ dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+
+ strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+
+ strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
+ strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
+
+ count++;
+ if (count > MAX_DATABASE_COUNT) {
+ fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT);
+ break;
+ }
+ }
+
+ return count;
+}
+
+static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) {
+ FILE *fp = NULL;
+ if (filename[0] != 0) {
+ fp = fopen(filename, "at");
+ if (fp == NULL) {
+ fprintf(stderr, "failed to open file: %s\n", filename);
+ return;
+ }
+ }
+
+ fprintf(fp, "================ database[%d] ================\n", index);
+ fprintf(fp, "name: %s\n", dbInfos->name);
+ fprintf(fp, "created_time: %s\n", dbInfos->create_time);
+ fprintf(fp, "ntables: %d\n", dbInfos->ntables);
+ fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
+ fprintf(fp, "replica: %d\n", dbInfos->replica);
+ fprintf(fp, "quorum: %d\n", dbInfos->quorum);
+ fprintf(fp, "days: %d\n", dbInfos->days);
+ fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist);
+ fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
+ fprintf(fp, "blocks: %d\n", dbInfos->blocks);
+ fprintf(fp, "minrows: %d\n", dbInfos->minrows);
+ fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
+ fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
+ fprintf(fp, "fsync: %d\n", dbInfos->fsync);
+ fprintf(fp, "comp: %d\n", dbInfos->comp);
+ fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
+ fprintf(fp, "precision: %s\n", dbInfos->precision);
+ fprintf(fp, "update: %d\n", dbInfos->update);
+ fprintf(fp, "status: %s\n", dbInfos->status);
+ fprintf(fp, "\n");
+
+ fclose(fp);
+}
+
+static void printfQuerySystemInfo(TAOS * taos) {
+ char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
+ char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
+ TAOS_RES* res;
+
+ time_t t;
+ struct tm* lt;
+ time(&t);
+ lt = localtime(&t);
+ snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec);
+
+ // show variables
+ res = taos_query(taos, "show variables;");
+ //getResult(res, filename);
+ xDumpResultToFile(filename, res);
+
+ // show dnodes
+ res = taos_query(taos, "show dnodes;");
+ xDumpResultToFile(filename, res);
+ //getResult(res, filename);
+
+ // show databases
+ res = taos_query(taos, "show databases;");
+ SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
+ if (dbInfos == NULL) {
+ fprintf(stderr, "failed to allocate memory\n");
+ return;
+ }
+ int dbCount = getDbFromServer(taos, dbInfos);
+ if (dbCount <= 0) return;
+
+ for (int i = 0; i < dbCount; i++) {
+ // printf database info
+ printfDbInfoForQueryToFile(filename, dbInfos[i], i);
+
+ // show db.vgroups
+ snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
+ res = taos_query(taos, buffer);
+ xDumpResultToFile(filename, res);
+
+ // show db.stables
+ snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
+ res = taos_query(taos, buffer);
+ xDumpResultToFile(filename, res);
+
+ free(dbInfos[i]);
+ }
+
+ free(dbInfos);
+
+}
+
+
+#ifdef TD_LOWA_CURL
+static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp)
+{
+ size_t realsize = size * nmemb;
+ curlMemInfo* mem = (curlMemInfo*)userp;
+
+ char *ptr = realloc(mem->buf, mem->sizeleft + realsize + 1);
+ if(ptr == NULL) {
+ /* out of memory! */
+ printf("not enough memory (realloc returned NULL)\n");
+ return 0;
+ }
+
+ mem->buf = ptr;
+ memcpy(&(mem->buf[mem->sizeleft]), contents, realsize);
+ mem->sizeleft += realsize;
+ mem->buf[mem->sizeleft] = 0;
+
+ //printf("result:%s\n\n", mem->buf);
+
+ return realsize;
+}
+
+void curlProceLogin(void)
+{
+ CURL *curl_handle;
+ CURLcode res;
+
+ curlMemInfo chunk;
+
+ chunk.buf = malloc(1); /* will be grown as needed by the realloc above */
+ chunk.sizeleft = 0; /* no data at this point */
+
+ //curl_global_init(CURL_GLOBAL_ALL);
+
+ /* init the curl session */
+ curl_handle = curl_easy_init();
+
+ curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,"");
+ curl_easy_setopt(curl_handle, CURLOPT_POST, 1);
+
+ char dstUrl[128] = {0};
+ snprintf(dstUrl, 128, "http://%s:6041/rest/login/root/taosdata", g_Dbs.host);
+
+ /* specify URL to get */
+ curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl);
+
+ /* send all data to this function */
+ curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback);
+
+ /* we pass our 'chunk' struct to the callback function */
+ curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk);
+
+ /* do it! */
+ res = curl_easy_perform(curl_handle);
+
+ /* check for errors */
+ if(res != CURLE_OK) {
+ fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
+ }
+ else {
+ //printf("response len:%lu, content: %s \n", (unsigned long)chunk.sizeleft, chunk.buf);
+ ;
+ }
+
+ /* cleanup curl stuff */
+ curl_easy_cleanup(curl_handle);
+
+ free(chunk.buf);
+
+ /* we're done with libcurl, so clean it up */
+ //curl_global_cleanup();
+
+ return;
+}
+
+int curlProceSql(char* host, uint16_t port, char* sqlstr, CURL *curl_handle)
+{
+ //curlProceLogin();
+
+ //CURL *curl_handle;
+ CURLcode res;
+
+ curlMemInfo chunk;
+
+ chunk.buf = malloc(1); /* will be grown as needed by the realloc above */
+ chunk.sizeleft = 0; /* no data at this point */
+
+
+ char dstUrl[128] = {0};
+ snprintf(dstUrl, 128, "http://%s:%u/rest/sql", host, port+TSDB_PORT_HTTP);
+
+ //curl_global_init(CURL_GLOBAL_ALL);
+
+ /* init the curl session */
+ //curl_handle = curl_easy_init();
+
+ //curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,"");
+ curl_easy_setopt(curl_handle, CURLOPT_POST, 1L);
+
+ /* specify URL to get */
+ curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl);
+
+ /* enable TCP keep-alive for this transfer */
+ curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPALIVE, 1L);
+ /* keep-alive idle time to 120 seconds */
+ curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPIDLE, 120L);
+ /* interval time between keep-alive probes: 60 seconds */
+ curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPINTVL, 60L);
+
+ /* send all data to this function */
+ curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback);
+
+ /* we pass our 'chunk' struct to the callback function */
+ curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk);
+
+ struct curl_slist *list = NULL;
+ list = curl_slist_append(list, "Authorization: Basic cm9vdDp0YW9zZGF0YQ==");
+ curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list);
+ curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list);
+
+ /* Set the expected upload size. */
+ curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)strlen(sqlstr));
+ curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, sqlstr);
+
+ /* get it! */
+ res = curl_easy_perform(curl_handle);
+
+ /* check for errors */
+ if(res != CURLE_OK) {
+ fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
+ return -1;
+ }
+ else {
+ /* curl_easy_perform() block end and return result */
+ //printf("[%32.32s] sql response len:%lu, content: %s \n\n", sqlstr, (unsigned long)chunk.sizeleft, chunk.buf);
+ ;
+ }
+
+ curl_slist_free_all(list); /* free the list again */
+
+ /* cleanup curl stuff */
+ //curl_easy_cleanup(curl_handle);
+
+ free(chunk.buf);
+
+ /* we're done with libcurl, so clean it up */
+ //curl_global_cleanup();
+
+ return 0;
+}
+#endif
+
+char* getTagValueFromTagSample( SSuperTable* stbInfo, int tagUsePos) {
+ char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
+ if (NULL == dataBuf) {
+ printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
+ return NULL;
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
+
+ return dataBuf;
+}
+
+char* generateTagVaulesForStb(SSuperTable* stbInfo) {
+ char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
+ if (NULL == dataBuf) {
+ printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
+ return NULL;
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(");
+ for (int i = 0; i < stbInfo->tagCount; i++) {
+ if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", 5))) {
+ if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) {
+ printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN);
+ tmfree(dataBuf);
+ return NULL;
+ }
+
+ char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1);
+ if (NULL == buf) {
+ printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen);
+ tmfree(dataBuf);
+ return NULL;
+ }
+ rand_string(buf, stbInfo->tags[i].dataLen);
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf);
+ tmfree(buf);
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", 3)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_int());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", 6)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", 5)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_float());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", 6)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_double());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", 8)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_smallint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", 7)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_tinyint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", 4)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_bool());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", 4)) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint());
+ } else {
+ printf("No support data type: %s\n", stbInfo->tags[i].dataType);
+ tmfree(dataBuf);
+ return NULL;
+ }
+ }
+ dataLen -= 2;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")");
+ return dataBuf;
+}
+
+static int calcRowLen(SSuperTable* superTbls) {
+ int colIndex;
+ int lenOfOneRow = 0;
+
+ for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
+ char* dataType = superTbls->columns[colIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ lenOfOneRow += 11;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ lenOfOneRow += 21;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ lenOfOneRow += 6;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ lenOfOneRow += 4;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ lenOfOneRow += 6;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ lenOfOneRow += 22;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ lenOfOneRow += 42;
+ } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
+ lenOfOneRow += 21;
+ } else {
+ printf("get error data type : %s\n", dataType);
+ exit(-1);
+ }
+ }
+
+ superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
+
+ int tagIndex;
+ int lenOfTagOfOneRow = 0;
+ for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
+ char* dataType = superTbls->tags[tagIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42;
+ } else {
+ printf("get error tag type : %s\n", dataType);
+ exit(-1);
+ }
+ }
+
+ superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
+
+ return 0;
+}
+
+
+static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int* childTblCountOfSuperTbl) {
+ char command[BUFFER_SIZE] = "\0";
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+
+ char* childTblName = *childTblNameOfSuperTbl;
+
+ //get all child table name use cmd: select tbname from superTblName;
+ snprintf(command, BUFFER_SIZE, "select tbname from %s.%s", dbName, sTblName);
+ res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ printf("failed to run command %s\n", command);
+ taos_free_result(res);
+ taos_close(taos);
+ exit(-1);
+ }
+
+ int childTblCount = 10000;
+ int count = 0;
+ childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
+ char* pTblName = childTblName;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ int32_t* len = taos_fetch_lengths(res);
+ strncpy(pTblName, (char *)row[0], len[0]);
+ //printf("==== sub table name: %s\n", pTblName);
+ count++;
+ if (count >= childTblCount - 1) {
+ char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
+ if (tmp != NULL) {
+ childTblName = tmp;
+ childTblCount = (int)(childTblCount*1.5);
+ memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
+ } else {
+ // exit, if allocate more memory failed
+ printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName);
+ tmfree(childTblName);
+ taos_free_result(res);
+ taos_close(taos);
+ exit(-1);
+ }
+ }
+ pTblName = childTblName + count * TSDB_TABLE_NAME_LEN;
+ }
+
+ *childTblCountOfSuperTbl = count;
+ *childTblNameOfSuperTbl = childTblName;
+
+ taos_free_result(res);
+ return 0;
+}
+
+static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) {
+ char command[BUFFER_SIZE] = "\0";
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+ int count = 0;
+
+ //get schema use cmd: describe superTblName;
+ snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName);
+ res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ printf("failed to run command %s\n", command);
+ taos_free_result(res);
+ return -1;
+ }
+
+ int tagIndex = 0;
+ int columnIndex = 0;
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+ while ((row = taos_fetch_row(res)) != NULL) {
+ if (0 == count) {
+ count++;
+ continue;
+ }
+
+ if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
+ strncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ strncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ strncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+ tagIndex++;
+ } else {
+ strncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ strncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ strncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+ columnIndex++;
+ }
+ count++;
+ }
+
+ superTbls->columnCount = columnIndex;
+ superTbls->tagCount = tagIndex;
+ taos_free_result(res);
+
+ calcRowLen(superTbls);
+
+ if (TBL_ALREADY_EXISTS == superTbls->childTblExists) {
+ //get all child table name use cmd: select tbname from superTblName;
+ getAllChildNameOfSuperTable(taos, dbName, superTbls->sTblName, &superTbls->childTblName, &superTbls->childTblCount);
+ }
+ return 0;
+}
+
+static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) {
+ char command[BUFFER_SIZE] = "\0";
+
+ char cols[STRING_LEN] = "\0";
+ int colIndex;
+ int len = 0;
+
+ int lenOfOneRow = 0;
+ for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
+ char* dataType = superTbls->columns[colIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", superTbls->columns[colIndex].dataLen);
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "NCHAR", superTbls->columns[colIndex].dataLen);
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT");
+ lenOfOneRow += 11;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT");
+ lenOfOneRow += 21;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT");
+ lenOfOneRow += 6;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT");
+ lenOfOneRow += 4;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL");
+ lenOfOneRow += 6;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT");
+ lenOfOneRow += 22;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE");
+ lenOfOneRow += 42;
+ } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP");
+ lenOfOneRow += 21;
+ } else {
+ taos_close(taos);
+ printf("config error data type : %s\n", dataType);
+ exit(-1);
+ }
+ }
+
+ superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
+ //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow);
+
+ // save for creating child table
+ superTbls->colsOfCreatChildTable = (char*)calloc(len+20, 1);
+ if (NULL == superTbls->colsOfCreatChildTable) {
+ printf("Failed when calloc, size:%d", len+1);
+ taos_close(taos);
+ exit(-1);
+ }
+ snprintf(superTbls->colsOfCreatChildTable, len+20, "(ts timestamp%s)", cols);
+
+ if (use_metric) {
+ char tags[STRING_LEN] = "\0";
+ int tagIndex;
+ len = 0;
+
+ int lenOfTagOfOneRow = 0;
+ len += snprintf(tags + len, STRING_LEN - len, "(");
+ for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
+ char* dataType = superTbls->tags[tagIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "BINARY", superTbls->tags[tagIndex].dataLen);
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "NCHAR", superTbls->tags[tagIndex].dataLen);
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "INT");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BIGINT");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "SMALLINT");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "TINYINT");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BOOL");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE");
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42;
+ } else {
+ taos_close(taos);
+ printf("config error tag type : %s\n", dataType);
+ exit(-1);
+ }
+ }
+ len -= 2;
+ len += snprintf(tags + len, STRING_LEN - len, ")");
+
+ superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
+
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbls->sTblName, cols, tags);
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
+ return -1;
+ }
+ printf("\ncreate supertable %s success!\n\n", superTbls->sTblName);
+ }
+ return 0;
+}
+
+
+static int createDatabases() {
+ TAOS * taos = NULL;
+ int ret = 0;
+ taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
if (taos == NULL) {
fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
- return 1;
+ exit(-1);
}
char command[BUFFER_SIZE] = "\0";
- sprintf(command, "drop database %s;", db_name);
- TAOS_RES* res = taos_query(taos, command);
- taos_free_result(res);
- sprintf(command, "create database %s replica %d;", db_name, replica);
- res = taos_query(taos, command);
- taos_free_result(res);
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ if (g_Dbs.db[i].drop) {
+ sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
+ taos_close(taos);
+ return -1;
+ }
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "create database if not exists %s ", g_Dbs.db[i].dbName);
- char cols[STRING_LEN] = "\0";
- int colIndex = 0;
- len = 0;
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "blocks %d ", g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "cache %d ", g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "days %d ", g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "keep %d ", g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "replica %d ", g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "update %d ", g_Dbs.db[i].dbCfg.update);
+ }
+ //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
+ // dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode);
+ //}
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "minrows %d ", g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "maxrows %d ", g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "comp %d ", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "wal %d ", g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "fsync %d ", g_Dbs.db[i].dbCfg.fsync);
+ }
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
+ }
+
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
+ taos_close(taos);
+ return -1;
+ }
+ printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
- for (; colIndex < ncols_per_record - 1; colIndex++) {
- if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0) {
- len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ // describe super table, if exists
+ sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
+ g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS;
+ ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric);
+ } else {
+ g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS;
+ ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]);
+ }
+
+ if (0 != ret) {
+ taos_close(taos);
+ return -1;
+ }
+ }
+ }
+
+ taos_close(taos);
+ return 0;
+}
+
+
+void * createTable(void *sarg)
+{
+ threadInfo *winfo = (threadInfo *)sarg;
+ SSuperTable* superTblInfo = winfo->superTblInfo;
+
+ int64_t lastPrintTime = taosGetTimestampMs();
+
+ char* buffer = calloc(superTblInfo->maxSqlLen, 1);
+
+ int len = 0;
+ int batchNum = 0;
+ //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
+ for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
+ if (0 == g_Dbs.use_metric) {
+ snprintf(buffer, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable);
} else {
- len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
+ if (0 == len) {
+ batchNum = 0;
+ memset(buffer, 0, superTblInfo->maxSqlLen);
+ len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "create table ");
+ }
+
+ char* tagsValBuf = NULL;
+ if (0 == superTblInfo->tagSource) {
+ tagsValBuf = generateTagVaulesForStb(superTblInfo);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(superTblInfo, i % superTblInfo->tagSampleCount);
+ }
+ if (NULL == tagsValBuf) {
+ free(buffer);
+ return NULL;
+ }
+
+ len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
+ free(tagsValBuf);
+ batchNum++;
+
+ if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) {
+ continue;
+ }
+ }
+
+ len = 0;
+ if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){
+ free(buffer);
+ return NULL;
+ }
+
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] already create %d - %d tables\n", winfo->threadID, winfo->start_table_id, i);
+ lastPrintTime = currentPrintTime;
}
}
-
- if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0){
- len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
- } else {
- len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
- }
-
- if (use_metric) {
- /* Create metric table */
- printf("Creating meters super table...\n");
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s) tags (areaid int, loc binary(10))", db_name, cols);
- queryDB(taos, command);
- printf("meters created!\n");
- }
- taos_close(taos);
- /* Wait for table to create */
- multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
+ if (0 != len) {
+ (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE);
+ }
- /* Insert data */
- double ts = getCurrentTime();
- printf("Inserting data......\n");
+ free(buffer);
+ return NULL;
+}
+
+void startMultiThreadCreateChildTable(char* cols, int threads, int ntables, char* db_name, SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
- info *infos = malloc(threads * sizeof(info));
-
- memset(pids, 0, threads * sizeof(pthread_t));
- memset(infos, 0, threads * sizeof(info));
+ threadInfo *infos = malloc(threads * sizeof(threadInfo));
+
+ if ((NULL == pids) || (NULL == infos)) {
+ printf("malloc failed\n");
+ exit(-1);
+ }
+
+ if (threads < 1) {
+ threads = 1;
+ }
int a = ntables / threads;
if (a < 1) {
@@ -777,184 +2204,2891 @@ int main(int argc, char *argv[]) {
}
int b = 0;
- if (threads != 0)
- b = ntables % threads;
+ b = ntables % threads;
+
int last = 0;
for (int i = 0; i < threads; i++) {
- info *t_info = infos + i;
+ threadInfo *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
- tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE);
- t_info->datatype = data_type;
- t_info->ncols_per_record = ncols_per_record;
- t_info->nrecords_per_table = nrecords_per_table;
- t_info->start_time = 1500000000000;
- t_info->taos = taos_connect(ip_addr, user, pass, db_name, port);
- t_info->len_of_binary = len_of_binary;
- t_info->nrecords_per_request = nrecords_per_request;
+ t_info->superTblInfo = superTblInfo;
+ t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
t_info->start_table_id = last;
- t_info->data_of_order = order;
- t_info->data_of_rate = rate;
t_info->end_table_id = i < b ? last + a : last + a - 1;
last = t_info->end_table_id + 1;
- t_info->counter = 0;
+ t_info->use_metric = 1;
+ t_info->cols = cols;
t_info->minDelay = INT16_MAX;
-
- tsem_init(&(t_info->mutex_sem), 0, 1);
- t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1;
- tsem_init(&(t_info->lock_sem), 0, 0);
-
- if (query_mode == SYNC) {
- pthread_create(pids + i, NULL, syncWrite, t_info);
- } else {
- pthread_create(pids + i, NULL, asyncWrite, t_info);
- }
+ pthread_create(pids + i, NULL, createTable, t_info);
}
+
for (int i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
}
- double t = getCurrentTime() - ts;
- if (query_mode == SYNC) {
- printf("SYNC Insert with %d connections:\n", threads);
- } else {
- printf("ASYNC Insert with %d connections:\n", threads);
+ for (int i = 0; i < threads; i++) {
+ threadInfo *t_info = infos + i;
+ taos_close(t_info->taos);
}
- fprintf(fp, "|%"PRIu64" | %10.2f | %10.2f | %10.4f |\n\n",
- (int64_t)ntables * nrecords_per_table, ntables * nrecords_per_table / t,
- ((int64_t)ntables * nrecords_per_table) / (t * nrecords_per_request),
- t * 1000);
+ free(pids);
+ free(infos);
+}
- printf("Spent %.4f seconds to insert %"PRIu64" records with %d record(s) per request: %.2f records/second\n",
- t, (int64_t)ntables * nrecords_per_table, nrecords_per_request,
- (int64_t)ntables * nrecords_per_table / t);
+
+static void createChildTables() {
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
+ continue;
+ }
+ startMultiThreadCreateChildTable(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable, g_Dbs.threadCountByCreateTbl, g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
+ g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
+ }
+ }
+}
+
+/*
+static int taosGetLineNum(const char *fileName)
+{
+ int lineNum = 0;
+ char cmd[1024] = { 0 };
+ char buf[1024] = { 0 };
+ sprintf(cmd, "wc -l %s", fileName);
+
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ return lineNum;
+ }
+
+ if (fgets(buf, sizeof(buf), fp)) {
+ int index = strchr((const char*)buf, ' ') - buf;
+ buf[index] = '\0';
+ lineNum = atoi(buf);
+ }
+ pclose(fp);
+ return lineNum;
+}
+*/
+
+/*
+ Read 10000 lines at most. If more than 10000 lines, continue to read after using
+*/
+int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
+
+ FILE *fp = fopen(superTblInfo->tagsFile, "r");
+ if (fp == NULL) {
+ printf("Failed to open tags file: %s, reason:%s\n", superTblInfo->tagsFile, strerror(errno));
+ return -1;
+ }
+
+ if (superTblInfo->tagDataBuf) {
+ free(superTblInfo->tagDataBuf);
+ superTblInfo->tagDataBuf = NULL;
+ }
+
+ int tagCount = 10000;
+ int count = 0;
+ char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount);
+ if (tagDataBuf == NULL) {
+ printf("Failed to calloc, reason:%s\n", strerror(errno));
+ fclose(fp);
+ return -1;
+ }
+
+ while ((readLen = tgetline(&line, &n, fp)) != -1) {
+ if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
+ line[--readLen] = 0;
+ }
+
+ if (readLen == 0) {
+ continue;
+ }
+
+ memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen);
+ count++;
+
+ if (count >= tagCount - 1) {
+ char *tmp = realloc(tagDataBuf, (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
+ if (tmp != NULL) {
+ tagDataBuf = tmp;
+ tagCount = (int)(tagCount*1.5);
+ memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
+ } else {
+ // exit, if allocate more memory failed
+ printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile);
+ tmfree(tagDataBuf);
+ free(line);
+ fclose(fp);
+ return -1;
+ }
+ }
+ }
+
+ superTblInfo->tagDataBuf = tagDataBuf;
+ superTblInfo->tagSampleCount = count;
+
+ free(line);
+ fclose(fp);
+ return 0;
+}
+
+int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
+ // TODO
+ return 0;
+}
+
+
+/*
+ Read 10000 lines at most. If more than 10000 lines, continue to read after using
+*/
+int readSampleFromCsvFileToMem(FILE *fp, SSuperTable* superTblInfo, char* sampleBuf) {
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
+ int getRows = 0;
+
+ memset(sampleBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE* superTblInfo->lenOfOneRow);
+ while (1) {
+ readLen = tgetline(&line, &n, fp);
+ if (-1 == readLen) {
+ if(0 != fseek(fp, 0, SEEK_SET)) {
+ printf("Failed to fseek file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno));
+ return -1;
+ }
+ continue;
+ }
+
+ if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
+ line[--readLen] = 0;
+ }
+
+ if (readLen == 0) {
+ continue;
+ }
+
+ if (readLen > superTblInfo->lenOfOneRow) {
+ printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow);
+ continue;
+ }
+
+ memcpy(sampleBuf + getRows * superTblInfo->lenOfOneRow, line, readLen);
+ getRows++;
+
+ if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
+ break;
+ }
+ }
+
+ tmfree(line);
+ return 0;
+}
+
+/*
+void readSampleFromFileToMem(SSuperTable * supterTblInfo) {
+ int ret;
+ if (0 == strncasecmp(supterTblInfo->sampleFormat, "csv", 3)) {
+ ret = readSampleFromCsvFileToMem(supterTblInfo);
+ } else if (0 == strncasecmp(supterTblInfo->sampleFormat, "json", 4)) {
+ ret = readSampleFromJsonFileToMem(supterTblInfo);
+ }
+
+ if (0 != ret) {
+ exit(-1);
+ }
+}
+*/
+static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) {
+ bool ret = false;
+
+ // columns
+ cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
+ if (columns && columns->type != cJSON_Array) {
+ printf("failed to read json, columns not found\n");
+ goto PARSE_OVER;
+ } else if (NULL == columns) {
+ superTbls->columnCount = 0;
+ superTbls->tagCount = 0;
+ return true;
+ }
+
+ int columnSize = cJSON_GetArraySize(columns);
+ if (columnSize > MAX_COLUMN_COUNT) {
+ printf("failed to read json, column size overflow, max column size is %d\n", MAX_COLUMN_COUNT);
+ goto PARSE_OVER;
+ }
+
+ int count = 1;
+ int index = 0;
+ StrColumn columnCase;
+
+ //superTbls->columnCount = columnSize;
+ for (int k = 0; k < columnSize; ++k) {
+ cJSON* column = cJSON_GetArrayItem(columns, k);
+ if (column == NULL) continue;
+
+ count = 1;
+ cJSON* countObj = cJSON_GetObjectItem(column, "count");
+ if (countObj && countObj->type == cJSON_Number) {
+ count = countObj->valueint;
+ } else if (countObj && countObj->type != cJSON_Number) {
+ printf("failed to read json, column count not found");
+ goto PARSE_OVER;
+ } else {
+ count = 1;
+ }
+
+ // column info
+ memset(&columnCase, 0, sizeof(StrColumn));
+ cJSON *dataType = cJSON_GetObjectItem(column, "type");
+ if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) {
+ printf("failed to read json, column type not found");
+ goto PARSE_OVER;
+ }
+ //strncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+ strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+
+ cJSON* dataLen = cJSON_GetObjectItem(column, "len");
+ if (dataLen && dataLen->type == cJSON_Number) {
+ columnCase.dataLen = dataLen->valueint;
+ } else if (dataLen && dataLen->type != cJSON_Number) {
+ printf("failed to read json, column len not found");
+ goto PARSE_OVER;
+ } else {
+ columnCase.dataLen = 8;
+ }
+
+ for (int n = 0; n < count; ++n) {
+ strncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
+ superTbls->columns[index].dataLen = columnCase.dataLen;
+ index++;
+ }
+ }
+ superTbls->columnCount = index;
+
+ count = 1;
+ index = 0;
+ // tags
+ cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
+ if (!tags || tags->type != cJSON_Array) {
+ printf("failed to read json, tags not found");
+ goto PARSE_OVER;
+ }
+
+ int tagSize = cJSON_GetArraySize(tags);
+ if (tagSize > MAX_TAG_COUNT) {
+ printf("failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT);
+ goto PARSE_OVER;
+ }
+
+ //superTbls->tagCount = tagSize;
+ for (int k = 0; k < tagSize; ++k) {
+ cJSON* tag = cJSON_GetArrayItem(tags, k);
+ if (tag == NULL) continue;
+
+ count = 1;
+ cJSON* countObj = cJSON_GetObjectItem(tag, "count");
+ if (countObj && countObj->type == cJSON_Number) {
+ count = countObj->valueint;
+ } else if (countObj && countObj->type != cJSON_Number) {
+ printf("failed to read json, column count not found");
+ goto PARSE_OVER;
+ } else {
+ count = 1;
+ }
+
+ // column info
+ memset(&columnCase, 0, sizeof(StrColumn));
+ cJSON *dataType = cJSON_GetObjectItem(tag, "type");
+ if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) {
+ printf("failed to read json, tag type not found");
+ goto PARSE_OVER;
+ }
+ strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+
+ cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
+ if (dataLen && dataLen->type == cJSON_Number) {
+ columnCase.dataLen = dataLen->valueint;
+ } else if (dataLen && dataLen->type != cJSON_Number) {
+ printf("failed to read json, column len not found");
+ goto PARSE_OVER;
+ } else {
+ columnCase.dataLen = 0;
+ }
+
+ for (int n = 0; n < count; ++n) {
+ strncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
+ superTbls->tags[index].dataLen = columnCase.dataLen;
+ index++;
+ }
+ }
+ superTbls->tagCount = index;
+
+ ret = true;
+
+PARSE_OVER:
+ //free(content);
+ //cJSON_Delete(root);
+ //fclose(fp);
+ return ret;
+}
+
+static bool getMetaFromInsertJsonFile(cJSON* root) {
+ bool ret = false;
+
+ cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
+ if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
+ strncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
+ }
+
+ cJSON* host = cJSON_GetObjectItem(root, "host");
+ if (host && host->type == cJSON_String && host->valuestring != NULL) {
+ strncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!host) {
+ strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, host not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* port = cJSON_GetObjectItem(root, "port");
+ if (port && port->type == cJSON_Number) {
+ g_Dbs.port = port->valueint;
+ } else if (!port) {
+ g_Dbs.port = 6030;
+ }
+
+ cJSON* user = cJSON_GetObjectItem(root, "user");
+ if (user && user->type == cJSON_String && user->valuestring != NULL) {
+ strncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!user) {
+ strncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE);
+ }
+
+ cJSON* password = cJSON_GetObjectItem(root, "password");
+ if (password && password->type == cJSON_String && password->valuestring != NULL) {
+ strncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!password) {
+ strncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE);
+ }
+
+ cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
+ if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) {
+ strncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN);
+ } else if (!resultfile) {
+ strncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN);
+ }
+
+ cJSON* threads = cJSON_GetObjectItem(root, "thread_count");
+ if (threads && threads->type == cJSON_Number) {
+ g_Dbs.threadCount = threads->valueint;
+ } else if (!threads) {
+ g_Dbs.threadCount = 1;
+ } else {
+ printf("failed to read json, threads not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl");
+ if (threads2 && threads2->type == cJSON_Number) {
+ g_Dbs.threadCountByCreateTbl = threads2->valueint;
+ } else if (!threads2) {
+ g_Dbs.threadCountByCreateTbl = 1;
+ } else {
+ printf("failed to read json, threads2 not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
+ if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) {
+ if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
+ g_args.answer_yes = false;
+ } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
+ g_args.answer_yes = true;
+ } else {
+ g_args.answer_yes = false;
+ }
+ } else if (!answerPrompt) {
+ g_args.answer_yes = false;
+ } else {
+ printf("failed to read json, confirm_parameter_prompt not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* dbs = cJSON_GetObjectItem(root, "databases");
+ if (!dbs || dbs->type != cJSON_Array) {
+ printf("failed to read json, databases not found\n");
+ goto PARSE_OVER;
+ }
+
+ int dbSize = cJSON_GetArraySize(dbs);
+ if (dbSize > MAX_DB_COUNT) {
+ printf("failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT);
+ goto PARSE_OVER;
+ }
+
+ g_Dbs.dbCount = dbSize;
+ for (int i = 0; i < dbSize; ++i) {
+ cJSON* dbinfos = cJSON_GetArrayItem(dbs, i);
+ if (dbinfos == NULL) continue;
+
+ // dbinfo
+ cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo");
+ if (!dbinfo || dbinfo->type != cJSON_Object) {
+ printf("failed to read json, dbinfo not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name");
+ if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) {
+ printf("failed to read json, db name not found");
+ goto PARSE_OVER;
+ }
+ strncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
+
+ cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
+ if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
+ if (0 == strncasecmp(drop->valuestring, "yes", 3)) {
+ g_Dbs.db[i].drop = 1;
+ } else {
+ g_Dbs.db[i].drop = 0;
+ }
+ } else if (!drop) {
+ g_Dbs.db[i].drop = 0;
+ } else {
+ printf("failed to read json, drop not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision");
+ if (precision && precision->type == cJSON_String && precision->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!precision) {
+ //strncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
+ memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, precision not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* update = cJSON_GetObjectItem(dbinfo, "update");
+ if (update && update->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.update = update->valueint;
+ } else if (!update) {
+ g_Dbs.db[i].dbCfg.update = -1;
+ } else {
+ printf("failed to read json, update not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica");
+ if (replica && replica->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.replica = replica->valueint;
+ } else if (!replica) {
+ g_Dbs.db[i].dbCfg.replica = -1;
+ } else {
+ printf("failed to read json, replica not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep");
+ if (keep && keep->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.keep = keep->valueint;
+ } else if (!keep) {
+ g_Dbs.db[i].dbCfg.keep = -1;
+ } else {
+ printf("failed to read json, keep not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* days = cJSON_GetObjectItem(dbinfo, "days");
+ if (days && days->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.days = days->valueint;
+ } else if (!days) {
+ g_Dbs.db[i].dbCfg.days = -1;
+ } else {
+ printf("failed to read json, days not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache");
+ if (cache && cache->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.cache = cache->valueint;
+ } else if (!cache) {
+ g_Dbs.db[i].dbCfg.cache = -1;
+ } else {
+ printf("failed to read json, cache not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks");
+ if (blocks && blocks->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.blocks = blocks->valueint;
+ } else if (!blocks) {
+ g_Dbs.db[i].dbCfg.blocks = -1;
+ } else {
+ printf("failed to read json, block not found");
+ goto PARSE_OVER;
+ }
+
+ //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode");
+ //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) {
+ // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint;
+ //} else if (!maxtablesPerVnode) {
+ // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES;
+ //} else {
+ // printf("failed to read json, maxtablesPerVnode not found");
+ // goto PARSE_OVER;
+ //}
+
+ cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows");
+ if (minRows && minRows->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
+ } else if (!minRows) {
+ g_Dbs.db[i].dbCfg.minRows = -1;
+ } else {
+ printf("failed to read json, minRows not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows");
+ if (maxRows && maxRows->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
+ } else if (!maxRows) {
+ g_Dbs.db[i].dbCfg.maxRows = -1;
+ } else {
+ printf("failed to read json, maxRows not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp");
+ if (comp && comp->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.comp = comp->valueint;
+ } else if (!comp) {
+ g_Dbs.db[i].dbCfg.comp = -1;
+ } else {
+ printf("failed to read json, comp not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel");
+ if (walLevel && walLevel->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint;
+ } else if (!walLevel) {
+ g_Dbs.db[i].dbCfg.walLevel = -1;
+ } else {
+ printf("failed to read json, walLevel not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum");
+ if (quorum && quorum->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.quorum = quorum->valueint;
+ } else if (!quorum) {
+ g_Dbs.db[i].dbCfg.quorum = -1;
+ } else {
+ printf("failed to read json, walLevel not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync");
+ if (fsync && fsync->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.fsync = fsync->valueint;
+ } else if (!fsync) {
+ g_Dbs.db[i].dbCfg.fsync = -1;
+ } else {
+ printf("failed to read json, fsync not found");
+ goto PARSE_OVER;
+ }
+
+ // super_talbes
+ cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
+ if (!stables || stables->type != cJSON_Array) {
+ printf("failed to read json, super_tables not found");
+ goto PARSE_OVER;
+ }
+
+ int stbSize = cJSON_GetArraySize(stables);
+ if (stbSize > MAX_SUPER_TABLE_COUNT) {
+ printf("failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT);
+ goto PARSE_OVER;
+ }
+
+ g_Dbs.db[i].superTblCount = stbSize;
+ for (int j = 0; j < stbSize; ++j) {
+ cJSON* stbInfo = cJSON_GetArrayItem(stables, j);
+ if (stbInfo == NULL) continue;
+
+ // dbinfo
+ cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
+ if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) {
+ printf("failed to read json, stb name not found");
+ goto PARSE_OVER;
+ }
+ strncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE);
+
+ cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
+ if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
+ printf("failed to read json, childtable_prefix not found");
+ goto PARSE_OVER;
+ }
+ strncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE);
+
+ cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null
+ if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) {
+ if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL;
+ } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ } else {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ }
+ } else if (!autoCreateTbl) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ } else {
+ printf("failed to read json, auto_create_table not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
+ if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
+ } else if (!batchCreateTbl) {
+ g_Dbs.db[i].superTbls[j].batchCreateTableNum = 2000;
+ } else {
+ printf("failed to read json, batch_create_tbl_num not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
+ if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) {
+ if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
+ } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
+ } else {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
+ }
+ } else if (!childTblExists) {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
+ } else {
+ printf("failed to read json, child_table_exists not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
+ if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
+ printf("failed to read json, childtable_count not found");
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
+
+ cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
+ if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!dataSource) {
+ strncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, data_source not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful
+ if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE);
+ #ifndef TD_LOWA_CURL
+ if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 7)) {
+ printf("There no libcurl, so no support resetful test! please use taosc mode.\n");
+ goto PARSE_OVER;
+ }
+ #endif
+ } else if (!insertMode) {
+ strncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, insert_mode not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
+ if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!ts) {
+ strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, start_timestamp not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step");
+ if (timestampStep && timestampStep->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint;
+ } else if (!timestampStep) {
+ g_Dbs.db[i].superTbls[j].timeStampStep = 1000;
+ } else {
+ printf("failed to read json, timestamp_step not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size");
+ if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint;
+ if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) {
+ g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024;
+ }
+ } else if (!sampleDataBufSize) {
+ g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024;
+ } else {
+ printf("failed to read json, sample_buf_size not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format");
+ if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!sampleFormat) {
+ strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, sample_format not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file");
+ if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN);
+ } else if (!sampleFile) {
+ memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN);
+ } else {
+ printf("failed to read json, sample_file not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file");
+ if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) {
+ strncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN);
+ if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) {
+ g_Dbs.db[i].superTbls[j].tagSource = 0;
+ } else {
+ g_Dbs.db[i].superTbls[j].tagSource = 1;
+ }
+ } else if (!tagsFile) {
+ memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN);
+ g_Dbs.db[i].superTbls[j].tagSource = 0;
+ } else {
+ printf("failed to read json, tags_file not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
+ if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
+ int32_t len = maxSqlLen->valueint;
+ if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
+ len = TSDB_MAX_ALLOWED_SQL_LEN;
+ } else if (len < TSDB_MAX_SQL_LEN) {
+ len = TSDB_MAX_SQL_LEN;
+ }
+ g_Dbs.db[i].superTbls[j].maxSqlLen = len;
+ } else if (!maxSqlLen) {
+ g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN;
+ } else {
+ printf("failed to read json, maxSqlLen not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes
+ if (multiThreadWriteOneTbl && multiThreadWriteOneTbl->type == cJSON_String && multiThreadWriteOneTbl->valuestring != NULL) {
+ if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) {
+ g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1;
+ } else {
+ g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
+ }
+ } else if (!multiThreadWriteOneTbl) {
+ g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
+ } else {
+ printf("failed to read json, multiThreadWriteOneTbl not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* numberOfTblInOneSql = cJSON_GetObjectItem(stbInfo, "number_of_tbl_in_one_sql");
+ if (numberOfTblInOneSql && numberOfTblInOneSql->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = numberOfTblInOneSql->valueint;
+ } else if (!numberOfTblInOneSql) {
+ g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = 0;
+ } else {
+ printf("failed to read json, numberOfTblInOneSql not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl");
+ if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint;
+ } else if (!rowsPerTbl) {
+ g_Dbs.db[i].superTbls[j].rowsPerTbl = 1;
+ } else {
+ printf("failed to read json, rowsPerTbl not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio");
+ if (disorderRatio && disorderRatio->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint;
+ } else if (!disorderRatio) {
+ g_Dbs.db[i].superTbls[j].disorderRatio = 0;
+ } else {
+ printf("failed to read json, disorderRatio not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range");
+ if (disorderRange && disorderRange->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint;
+ } else if (!disorderRange) {
+ g_Dbs.db[i].superTbls[j].disorderRange = 1000;
+ } else {
+ printf("failed to read json, disorderRange not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* insertRate = cJSON_GetObjectItem(stbInfo, "insert_rate");
+ if (insertRate && insertRate->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].insertRate = insertRate->valueint;
+ } else if (!insertRate) {
+ g_Dbs.db[i].superTbls[j].insertRate = 0;
+ } else {
+ printf("failed to read json, insert_rate not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
+ if (insertRows && insertRows->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
+ if (0 == g_Dbs.db[i].superTbls[j].insertRows) {
+ g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
+ }
+ } else if (!insertRows) {
+ g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
+ } else {
+ printf("failed to read json, insert_rows not found");
+ goto PARSE_OVER;
+ }
+
+ if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
+ continue;
+ }
+
+ int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]);
+ if (false == retVal) {
+ goto PARSE_OVER;
+ }
+ }
+ }
+
+ ret = true;
+
+PARSE_OVER:
+ //free(content);
+ //cJSON_Delete(root);
+ //fclose(fp);
+ return ret;
+}
+
+static bool getMetaFromQueryJsonFile(cJSON* root) {
+ bool ret = false;
+
+ cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
+ if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
+ strncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
+ }
+
+ cJSON* host = cJSON_GetObjectItem(root, "host");
+ if (host && host->type == cJSON_String && host->valuestring != NULL) {
+ strncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!host) {
+ strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
+ } else {
+ printf("failed to read json, host not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* port = cJSON_GetObjectItem(root, "port");
+ if (port && port->type == cJSON_Number) {
+ g_queryInfo.port = port->valueint;
+ } else if (!port) {
+ g_queryInfo.port = 6030;
+ }
+
+ cJSON* user = cJSON_GetObjectItem(root, "user");
+ if (user && user->type == cJSON_String && user->valuestring != NULL) {
+ strncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!user) {
+ strncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ;
+ }
+
+ cJSON* password = cJSON_GetObjectItem(root, "password");
+ if (password && password->type == cJSON_String && password->valuestring != NULL) {
+ strncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!password) {
+ strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);;
+ }
+
+ cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
+ if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) {
+ if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
+ g_args.answer_yes = false;
+ } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
+ g_args.answer_yes = true;
+ } else {
+ g_args.answer_yes = false;
+ }
+ } else if (!answerPrompt) {
+ g_args.answer_yes = false;
+ } else {
+ printf("failed to read json, confirm_parameter_prompt not found");
+ goto PARSE_OVER;
+ }
+
+ cJSON* dbs = cJSON_GetObjectItem(root, "databases");
+ if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
+ strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
+ } else if (!dbs) {
+ printf("failed to read json, databases not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode");
+ if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) {
+ strncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE);
+ } else if (!queryMode) {
+ strncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE);
+ } else {
+ printf("failed to read json, query_mode not found\n");
+ goto PARSE_OVER;
+ }
+
+ // super_table_query
+ cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query");
+ if (!superQuery) {
+ g_queryInfo.superQueryInfo.concurrent = 0;
+ g_queryInfo.superQueryInfo.sqlCount = 0;
+ } else if (superQuery->type != cJSON_Object) {
+ printf("failed to read json, super_table_query not found");
+ goto PARSE_OVER;
+ } else {
+ cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval");
+ if (rate && rate->type == cJSON_Number) {
+ g_queryInfo.superQueryInfo.rate = rate->valueint;
+ } else if (!rate) {
+ g_queryInfo.superQueryInfo.rate = 0;
+ }
+
+ cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent");
+ if (concurrent && concurrent->type == cJSON_Number) {
+ g_queryInfo.superQueryInfo.concurrent = concurrent->valueint;
+ } else if (!concurrent) {
+ g_queryInfo.superQueryInfo.concurrent = 1;
+ }
+
+ cJSON* mode = cJSON_GetObjectItem(superQuery, "mode");
+ if (mode && mode->type == cJSON_String && mode->valuestring != NULL) {
+ if (0 == strcmp("sync", mode->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeMode = 0;
+ } else if (0 == strcmp("async", mode->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeMode = 1;
+ } else {
+ printf("failed to read json, subscribe mod error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.superQueryInfo.subscribeMode = 0;
+ }
+
+ cJSON* interval = cJSON_GetObjectItem(superQuery, "interval");
+ if (interval && interval->type == cJSON_Number) {
+ g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint;
+ } else if (!interval) {
+ //printf("failed to read json, subscribe interval no found\n");
+ //goto PARSE_OVER;
+ g_queryInfo.superQueryInfo.subscribeInterval = 10000;
+ }
+
+ cJSON* restart = cJSON_GetObjectItem(superQuery, "restart");
+ if (restart && restart->type == cJSON_String && restart->valuestring != NULL) {
+ if (0 == strcmp("yes", restart->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeRestart = 1;
+ } else if (0 == strcmp("no", restart->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeRestart = 0;
+ } else {
+ printf("failed to read json, subscribe restart error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.superQueryInfo.subscribeRestart = 1;
+ }
+
+ cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress");
+ if (keepProgress && keepProgress->type == cJSON_String && keepProgress->valuestring != NULL) {
+ if (0 == strcmp("yes", keepProgress->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeKeepProgress = 1;
+ } else if (0 == strcmp("no", keepProgress->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
+ } else {
+ printf("failed to read json, subscribe keepProgress error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
+ }
+
+ // sqls
+ cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
+ if (!superSqls) {
+ g_queryInfo.superQueryInfo.sqlCount = 0;
+ } else if (superSqls->type != cJSON_Array) {
+ printf("failed to read json, super sqls not found\n");
+ goto PARSE_OVER;
+ } else {
+ int superSqlSize = cJSON_GetArraySize(superSqls);
+ if (superSqlSize > MAX_QUERY_SQL_COUNT) {
+ printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT);
+ goto PARSE_OVER;
+ }
+
+ g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
+ for (int j = 0; j < superSqlSize; ++j) {
+ cJSON* sql = cJSON_GetArrayItem(superSqls, j);
+ if (sql == NULL) continue;
+
+ cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
+ if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
+ printf("failed to read json, sql not found\n");
+ goto PARSE_OVER;
+ }
+ strncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+
+ cJSON *result = cJSON_GetObjectItem(sql, "result");
+ if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) {
+ strncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
+ } else if (NULL == result) {
+ memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
+ } else {
+ printf("failed to read json, super query result file not found\n");
+ goto PARSE_OVER;
+ }
+ }
+ }
+ }
+
+ // sub_table_query
+ cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query");
+ if (!subQuery) {
+ g_queryInfo.subQueryInfo.threadCnt = 0;
+ g_queryInfo.subQueryInfo.sqlCount = 0;
+ } else if (subQuery->type != cJSON_Object) {
+ printf("failed to read json, sub_table_query not found");
+ ret = true;
+ goto PARSE_OVER;
+ } else {
+ cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval");
+ if (subrate && subrate->type == cJSON_Number) {
+ g_queryInfo.subQueryInfo.rate = subrate->valueint;
+ } else if (!subrate) {
+ g_queryInfo.subQueryInfo.rate = 0;
+ }
+
+ cJSON* threads = cJSON_GetObjectItem(subQuery, "threads");
+ if (threads && threads->type == cJSON_Number) {
+ g_queryInfo.subQueryInfo.threadCnt = threads->valueint;
+ } else if (!threads) {
+ g_queryInfo.subQueryInfo.threadCnt = 1;
+ }
+
+ //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count");
+ //if (subTblCnt && subTblCnt->type == cJSON_Number) {
+ // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint;
+ //} else if (!subTblCnt) {
+ // g_queryInfo.subQueryInfo.childTblCount = 0;
+ //}
+
+ cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname");
+ if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) {
+ strncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE);
+ } else {
+ printf("failed to read json, super table name not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* submode = cJSON_GetObjectItem(subQuery, "mode");
+ if (submode && submode->type == cJSON_String && submode->valuestring != NULL) {
+ if (0 == strcmp("sync", submode->valuestring)) {
+ g_queryInfo.subQueryInfo.subscribeMode = 0;
+ } else if (0 == strcmp("async", submode->valuestring)) {
+ g_queryInfo.subQueryInfo.subscribeMode = 1;
+ } else {
+ printf("failed to read json, subscribe mod error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.subQueryInfo.subscribeMode = 0;
+ }
+
+ cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval");
+ if (subinterval && subinterval->type == cJSON_Number) {
+ g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint;
+ } else if (!subinterval) {
+ //printf("failed to read json, subscribe interval no found\n");
+ //goto PARSE_OVER;
+ g_queryInfo.subQueryInfo.subscribeInterval = 10000;
+ }
+
+ cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart");
+ if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) {
+ if (0 == strcmp("yes", subrestart->valuestring)) {
+ g_queryInfo.subQueryInfo.subscribeRestart = 1;
+ } else if (0 == strcmp("no", subrestart->valuestring)) {
+ g_queryInfo.subQueryInfo.subscribeRestart = 0;
+ } else {
+ printf("failed to read json, subscribe restart error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.subQueryInfo.subscribeRestart = 1;
+ }
+
+ cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress");
+ if (subkeepProgress && subkeepProgress->type == cJSON_String && subkeepProgress->valuestring != NULL) {
+ if (0 == strcmp("yes", subkeepProgress->valuestring)) {
+ g_queryInfo.subQueryInfo.subscribeKeepProgress = 1;
+ } else if (0 == strcmp("no", subkeepProgress->valuestring)) {
+ g_queryInfo.subQueryInfo.subscribeKeepProgress = 0;
+ } else {
+ printf("failed to read json, subscribe keepProgress error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.subQueryInfo.subscribeKeepProgress = 0;
+ }
+
+ // sqls
+ cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls");
+ if (!subsqls) {
+ g_queryInfo.subQueryInfo.sqlCount = 0;
+ } else if (subsqls->type != cJSON_Array) {
+ printf("failed to read json, super sqls not found\n");
+ goto PARSE_OVER;
+ } else {
+ int superSqlSize = cJSON_GetArraySize(subsqls);
+ if (superSqlSize > MAX_QUERY_SQL_COUNT) {
+ printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT);
+ goto PARSE_OVER;
+ }
+
+ g_queryInfo.subQueryInfo.sqlCount = superSqlSize;
+ for (int j = 0; j < superSqlSize; ++j) {
+ cJSON* sql = cJSON_GetArrayItem(subsqls, j);
+ if (sql == NULL) continue;
+
+ cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
+ if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
+ printf("failed to read json, sql not found\n");
+ goto PARSE_OVER;
+ }
+ strncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+
+ cJSON *result = cJSON_GetObjectItem(sql, "result");
+ if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){
+ strncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
+ } else if (NULL == result) {
+ memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
+ } else {
+ printf("failed to read json, sub query result file not found\n");
+ goto PARSE_OVER;
+ }
+ }
+ }
+ }
+
+ ret = true;
+
+PARSE_OVER:
+ //free(content);
+ //cJSON_Delete(root);
+ //fclose(fp);
+ return ret;
+}
+
+static bool getInfoFromJsonFile(char* file) {
+ FILE *fp = fopen(file, "r");
+ if (!fp) {
+ printf("failed to read %s, reason:%s\n", file, strerror(errno));
+ return false;
+ }
+
+ bool ret = false;
+ int maxLen = 64000;
+ char *content = calloc(1, maxLen + 1);
+ int len = fread(content, 1, maxLen, fp);
+ if (len <= 0) {
+ free(content);
+ fclose(fp);
+ printf("failed to read %s, content is null", file);
+ return false;
+ }
+
+ content[len] = 0;
+ cJSON* root = cJSON_Parse(content);
+ if (root == NULL) {
+ printf("failed to cjson parse %s, invalid json format", file);
+ goto PARSE_OVER;
+ }
+
+ cJSON* filetype = cJSON_GetObjectItem(root, "filetype");
+ if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) {
+ if (0 == strcasecmp("insert", filetype->valuestring)) {
+ g_jsonType = INSERT_MODE;
+ } else if (0 == strcasecmp("query", filetype->valuestring)) {
+ g_jsonType = QUERY_MODE;
+ } else if (0 == strcasecmp("subscribe", filetype->valuestring)) {
+ g_jsonType = SUBSCRIBE_MODE;
+ } else {
+ printf("failed to read json, filetype not support\n");
+ goto PARSE_OVER;
+ }
+ } else if (!filetype) {
+ g_jsonType = INSERT_MODE;
+ } else {
+ printf("failed to read json, filetype not found\n");
+ goto PARSE_OVER;
+ }
+
+ if (INSERT_MODE == g_jsonType) {
+ ret = getMetaFromInsertJsonFile(root);
+ } else if (QUERY_MODE == g_jsonType) {
+ ret = getMetaFromQueryJsonFile(root);
+ } else if (SUBSCRIBE_MODE == g_jsonType) {
+ ret = getMetaFromQueryJsonFile(root);
+ } else {
+ printf("input json file type error! please input correct file type: insert or query or subscribe\n");
+ goto PARSE_OVER;
+ }
+
+PARSE_OVER:
+ free(content);
+ cJSON_Delete(root);
+ fclose(fp);
+ return ret;
+}
+
+
+void prePareSampleData() {
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ //if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].dataSource, "sample", 6)) {
+ // readSampleFromFileToMem(&g_Dbs.db[i].superTbls[j]);
+ //}
+
+ if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) {
+ (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]);
+ }
+
+ #ifdef TD_LOWA_CURL
+ if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) {
+ curl_global_init(CURL_GLOBAL_ALL);
+ }
+ #endif
+ }
+ }
+}
+
+void postFreeResource() {
+ tmfclose(g_fpOfInsertResult);
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ if (0 != g_Dbs.db[i].superTbls[j].colsOfCreatChildTable) {
+ free(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable);
+ g_Dbs.db[i].superTbls[j].colsOfCreatChildTable = NULL;
+ }
+ if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) {
+ free(g_Dbs.db[i].superTbls[j].sampleDataBuf);
+ g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL;
+ }
+ if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) {
+ free(g_Dbs.db[i].superTbls[j].tagDataBuf);
+ g_Dbs.db[i].superTbls[j].tagDataBuf = NULL;
+ }
+ if (0 != g_Dbs.db[i].superTbls[j].childTblName) {
+ free(g_Dbs.db[i].superTbls[j].childTblName);
+ g_Dbs.db[i].superTbls[j].childTblName = NULL;
+ }
+
+ #ifdef TD_LOWA_CURL
+ if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) {
+ curl_global_cleanup();
+ }
+ #endif
+ }
+ }
+}
+
+int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* superTblInfo, int* sampleUsePos, FILE *fp, char* sampleBuf) {
+ if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
+ int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleBuf);
+ if (0 != ret) {
+ return -1;
+ }
+ *sampleUsePos = 0;
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", sampleBuf + superTblInfo->lenOfOneRow * (*sampleUsePos));
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
+
+ (*sampleUsePos)++;
+
+ return dataLen;
+}
+
+int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) {
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
+ for (int i = 0; i < stbInfo->columnCount; i++) {
+ if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) {
+ if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
+ printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN);
+ return (-1);
+ }
+
+ char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
+ if (NULL == buf) {
+ printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
+ return (-1);
+ }
+ rand_string(buf, stbInfo->columns[i].dataLen);
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf);
+ tmfree(buf);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_int());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_float());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_double());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint());
+ } else {
+ printf("No support data type: %s\n", stbInfo->columns[i].dataType);
+ return (-1);
+ }
+ }
+ dataLen -= 2;
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
+
+ return dataLen;
+}
+
+void syncWriteForNumberOfTblInOneSql(threadInfo *winfo, FILE *fp, char* sampleDataBuf) {
+ SSuperTable* superTblInfo = winfo->superTblInfo;
+
+ int samplePos = 0;
+
+ //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id);
+ int64_t totalRowsInserted = 0;
+ int64_t totalAffectedRows = 0;
+ int64_t lastPrintTime = taosGetTimestampMs();
+
+ char* buffer = calloc(superTblInfo->maxSqlLen+1, 1);
+ if (NULL == buffer) {
+ printf("========calloc size[ %d ] fail!\n", superTblInfo->maxSqlLen);
+ return;
+ }
+
+ int32_t numberOfTblInOneSql = superTblInfo->numberOfTblInOneSql;
+ int32_t tbls = winfo->end_table_id - winfo->start_table_id + 1;
+ if (numberOfTblInOneSql > tbls) {
+ numberOfTblInOneSql = tbls;
+ }
+
+ int64_t time_counter = winfo->start_time;
+ int64_t tmp_time;
+ int sampleUsePos;
+
+ int64_t st = 0;
+ int64_t et = 0;
+ for (int i = 0; i < superTblInfo->insertRows;) {
+ if (superTblInfo->insertRate && (et - st) < 1000) {
+ taosMsleep(1000 - (et - st)); // ms
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
+ }
+
+ if (superTblInfo->insertRate) {
+ st = taosGetTimestampMs();
+ }
+
+ int32_t tbl_id = 0;
+ for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; ) {
+ int inserted = i;
+
+ int k = 0;
+ int batchRowsSql = 0;
+ while (1)
+ {
+ int len = 0;
+ memset(buffer, 0, superTblInfo->maxSqlLen);
+ char *pstr = buffer;
+
+ int32_t end_tbl_id = tID + numberOfTblInOneSql;
+ if (end_tbl_id > winfo->end_table_id) {
+ end_tbl_id = winfo->end_table_id+1;
+ }
+ for (tbl_id = tID; tbl_id < end_tbl_id; tbl_id++) {
+ sampleUsePos = samplePos;
+ if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+ if (0 == superTblInfo->tagSource) {
+ tagsValBuf = generateTagVaulesForStb(superTblInfo);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(superTblInfo, tbl_id % superTblInfo->tagSampleCount);
+ }
+ if (NULL == tagsValBuf) {
+ goto free_and_statistics;
+ }
+
+ if (0 == len) {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
+ } else {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
+ }
+ tmfree(tagsValBuf);
+ } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
+ if (0 == len) {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN);
+ } else {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN);
+ }
+ } else { // pre-create child table
+ if (0 == len) {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id);
+ } else {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id);
+ }
+ }
+
+ tmp_time = time_counter;
+ for (k = 0; k < superTblInfo->rowsPerTbl;) {
+ int retLen = 0;
+ if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
+ retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf);
+ if (retLen < 0) {
+ goto free_and_statistics;
+ }
+ } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) {
+ int rand_num = rand_tinyint() % 100;
+ if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) {
+ int64_t d = tmp_time - rand() % superTblInfo->disorderRange;
+ retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo);
+ } else {
+ retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo);
+ }
+ if (retLen < 0) {
+ goto free_and_statistics;
+ }
+ }
+ len += retLen;
+ //inserted++;
+ k++;
+ totalRowsInserted++;
+ batchRowsSql++;
+
+ if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128) || batchRowsSql >= INT16_MAX - 1) {
+ tID = tbl_id + 1;
+ printf("config rowsPerTbl and numberOfTblInOneSql not match with max_sql_lenth, please reconfig![lenOfOneRow:%d]\n", superTblInfo->lenOfOneRow);
+ goto send_to_server;
+ }
+ }
+
+ }
+
+ tID = tbl_id;
+ inserted += superTblInfo->rowsPerTbl;
+
+ send_to_server:
+ batchRowsSql = 0;
+ if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
+ //printf("multi table===== sql: %s \n\n", buffer);
+ //int64_t t1 = taosGetTimestampMs();
+ int64_t startTs;
+ int64_t endTs;
+ startTs = taosGetTimestampUs();
+
+ int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE);
+ if (0 > affectedRows) {
+ goto free_and_statistics;
+ } else {
+ endTs = taosGetTimestampUs();
+ int64_t delay = endTs - startTs;
+ if (delay > winfo->maxDelay) winfo->maxDelay = delay;
+ if (delay < winfo->minDelay) winfo->minDelay = delay;
+ winfo->cntDelay++;
+ winfo->totalDelay += delay;
+ //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay;
+ }
+ totalAffectedRows += affectedRows;
+
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+ //int64_t t2 = taosGetTimestampMs();
+ //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0);
+ } else {
+ #ifdef TD_LOWA_CURL
+ //int64_t t1 = taosGetTimestampMs();
+ int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle);
+ //int64_t t2 = taosGetTimestampMs();
+ //printf("http insert sql return, Spent %ld ms \n", t2 - t1);
+
+ if (0 != retCode) {
+ printf("========curl return fail, threadID[%d]\n", winfo->threadID);
+ goto free_and_statistics;
+ }
+ #else
+ printf("========no use http mode for no curl lib!\n");
+ goto free_and_statistics;
+ #endif
+ }
+
+ //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt);
+ break;
+ }
+
+ if (tID > winfo->end_table_id) {
+ if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
+ samplePos = sampleUsePos;
+ }
+ i = inserted;
+ time_counter = tmp_time;
+ }
+ }
+
+ if (superTblInfo->insertRate) {
+ et = taosGetTimestampMs();
+ }
+ //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i);
+ }
+
+ free_and_statistics:
+ tmfree(buffer);
+ winfo->totalRowsInserted = totalRowsInserted;
+ winfo->totalAffectedRows = totalAffectedRows;
+ printf("====thread[%d] completed total inserted rows: %"PRId64 ", affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
+ return;
+}
+
+// sync insertion
+/*
+ 1 thread: 100 tables * 2000 rows/s
+ 1 thread: 10 tables * 20000 rows/s
+ 6 thread: 300 tables * 2000 rows/s
+
+ 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s
+*/
+void *syncWrite(void *sarg) {
+ int64_t totalRowsInserted = 0;
+ int64_t totalAffectedRows = 0;
+ int64_t lastPrintTime = taosGetTimestampMs();
+
+ threadInfo *winfo = (threadInfo *)sarg;
+ SSuperTable* superTblInfo = winfo->superTblInfo;
+
+ FILE *fp = NULL;
+ char* sampleDataBuf = NULL;
+ int samplePos = 0;
+
+ // each thread read sample data from csv file
+ if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
+ sampleDataBuf = calloc(superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
+ if (sampleDataBuf == NULL) {
+ printf("Failed to calloc %d Bytes, reason:%s\n", superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno));
+ return NULL;
+ }
+
+ fp = fopen(superTblInfo->sampleFile, "r");
+ if (fp == NULL) {
+ printf("Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno));
+ tmfree(sampleDataBuf);
+ return NULL;
+ }
+ int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleDataBuf);
+ if (0 != ret) {
+ tmfree(sampleDataBuf);
+ tmfclose(fp);
+ return NULL;
+ }
+ }
+
+ if (superTblInfo->numberOfTblInOneSql > 0) {
+ syncWriteForNumberOfTblInOneSql(winfo, fp, sampleDataBuf);
+ tmfree(sampleDataBuf);
+ tmfclose(fp);
+ return NULL;
+ }
+
+ //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id);
+
+ char* buffer = calloc(superTblInfo->maxSqlLen, 1);
+
+ int nrecords_per_request = 0;
+ if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
+ nrecords_per_request = (superTblInfo->maxSqlLen - 1280 - superTblInfo->lenOfTagOfOneRow) / superTblInfo->lenOfOneRow;
+ } else {
+ nrecords_per_request = (superTblInfo->maxSqlLen - 1280) / superTblInfo->lenOfOneRow;
+ }
+
+ int nrecords_no_last_req = nrecords_per_request;
+ int nrecords_last_req = 0;
+ int loop_cnt = 0;
+ if (0 != superTblInfo->insertRate) {
+ if (nrecords_no_last_req >= superTblInfo->insertRate) {
+ nrecords_no_last_req = superTblInfo->insertRate;
+ } else {
+ nrecords_last_req = superTblInfo->insertRate % nrecords_per_request;
+ loop_cnt = (superTblInfo->insertRate / nrecords_per_request) + (superTblInfo->insertRate % nrecords_per_request ? 1 : 0) ;
+ }
+ }
+
+ if (nrecords_no_last_req <= 0) {
+ nrecords_no_last_req = 1;
+ }
+
+ if (nrecords_no_last_req >= INT16_MAX) {
+ nrecords_no_last_req = INT16_MAX - 1;
+ }
+
+ if (nrecords_last_req >= INT16_MAX) {
+ nrecords_last_req = INT16_MAX - 1;
+ }
+
+ int nrecords_cur_req = nrecords_no_last_req;
+ int loop_cnt_orig = loop_cnt;
+
+ //printf("========nrecords_per_request:%d, nrecords_no_last_req:%d, nrecords_last_req:%d, loop_cnt:%d\n", nrecords_per_request, nrecords_no_last_req, nrecords_last_req, loop_cnt);
+
+ int64_t time_counter = winfo->start_time;
+
+ int64_t st = 0;
+ int64_t et = 0;
+ for (int i = 0; i < superTblInfo->insertRows;) {
+ if (superTblInfo->insertRate && (et - st) < 1000) {
+ taosMsleep(1000 - (et - st)); // ms
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
+ }
+
+ if (superTblInfo->insertRate) {
+ st = taosGetTimestampMs();
+ }
+
+ for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) {
+ int inserted = i;
+ int64_t tmp_time = time_counter;
+
+ int sampleUsePos = samplePos;
+ int k = 0;
+ while (1)
+ {
+ int len = 0;
+ memset(buffer, 0, superTblInfo->maxSqlLen);
+ char *pstr = buffer;
+
+ if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+ if (0 == superTblInfo->tagSource) {
+ tagsValBuf = generateTagVaulesForStb(superTblInfo);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(superTblInfo, tID % superTblInfo->tagSampleCount);
+ }
+ if (NULL == tagsValBuf) {
+ goto free_and_statistics_2;
+ }
+
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values", winfo->db_name, superTblInfo->childTblPrefix, tID, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
+ tmfree(tagsValBuf);
+ } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values", winfo->db_name, superTblInfo->childTblName + tID * TSDB_TABLE_NAME_LEN);
+ } else {
+ len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values", winfo->db_name, superTblInfo->childTblPrefix, tID);
+ }
+
+ for (k = 0; k < nrecords_cur_req;) {
+ int retLen = 0;
+ if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
+ retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf);
+ if (retLen < 0) {
+ goto free_and_statistics_2;
+ }
+ } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) {
+ int rand_num = rand_tinyint() % 100;
+ if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) {
+ int64_t d = tmp_time - rand() % superTblInfo->disorderRange;
+ retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo);
+ //printf("disorder rows, rand_num:%d, last ts:%"PRId64" current ts:%"PRId64"\n", rand_num, tmp_time, d);
+ } else {
+ retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo);
+ }
+ if (retLen < 0) {
+ goto free_and_statistics_2;
+ }
+ }
+ len += retLen;
+ inserted++;
+ k++;
+ totalRowsInserted++;
+
+ if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) break;
+ }
+
+ if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
+ //printf("===== sql: %s \n\n", buffer);
+ //int64_t t1 = taosGetTimestampMs();
+ int64_t startTs;
+ int64_t endTs;
+ startTs = taosGetTimestampUs();
+
+ int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE);
+ if (0 > affectedRows){
+ goto free_and_statistics_2;
+ } else {
+ endTs = taosGetTimestampUs();
+ int64_t delay = endTs - startTs;
+ if (delay > winfo->maxDelay) winfo->maxDelay = delay;
+ if (delay < winfo->minDelay) winfo->minDelay = delay;
+ winfo->cntDelay++;
+ winfo->totalDelay += delay;
+ //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay;
+ }
+ totalAffectedRows += affectedRows;
+
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+ //int64_t t2 = taosGetTimestampMs();
+ //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0);
+ } else {
+ #ifdef TD_LOWA_CURL
+ //int64_t t1 = taosGetTimestampMs();
+ int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle);
+ //int64_t t2 = taosGetTimestampMs();
+ //printf("http insert sql return, Spent %ld ms \n", t2 - t1);
+
+ if (0 != retCode) {
+ printf("========curl return fail, threadID[%d]\n", winfo->threadID);
+ goto free_and_statistics_2;
+ }
+ #else
+ printf("========no use http mode for no curl lib!\n");
+ goto free_and_statistics_2;
+ #endif
+ }
+
+ //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt);
+
+ if (loop_cnt) {
+ loop_cnt--;
+ if ((1 == loop_cnt) && (0 != nrecords_last_req)) {
+ nrecords_cur_req = nrecords_last_req;
+ } else if (0 == loop_cnt){
+ nrecords_cur_req = nrecords_no_last_req;
+ loop_cnt = loop_cnt_orig;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (tID == winfo->end_table_id) {
+ if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
+ samplePos = sampleUsePos;
+ }
+ i = inserted;
+ time_counter = tmp_time;
+ }
+ }
+
+ if (superTblInfo->insertRate) {
+ et = taosGetTimestampMs();
+ }
+ //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i);
+ }
+
+ free_and_statistics_2:
+ tmfree(buffer);
+ tmfree(sampleDataBuf);
+ tmfclose(fp);
+
+ winfo->totalRowsInserted = totalRowsInserted;
+ winfo->totalAffectedRows = totalAffectedRows;
+
+ printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
+ return NULL;
+}
+
+void callBack(void *param, TAOS_RES *res, int code) {
+ threadInfo* winfo = (threadInfo*)param;
+
+ if (winfo->superTblInfo->insertRate) {
+ winfo->et = taosGetTimestampMs();
+ if (winfo->et - winfo->st < 1000) {
+ taosMsleep(1000 - (winfo->et - winfo->st)); // ms
+ }
+ }
+
+ char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen);
+ char *data = calloc(1, MAX_DATA_SIZE);
+ char *pstr = buffer;
+ pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id);
+ if (winfo->counter >= winfo->superTblInfo->insertRows) {
+ winfo->start_table_id++;
+ winfo->counter = 0;
+ }
+ if (winfo->start_table_id > winfo->end_table_id) {
+ tsem_post(&winfo->lock_sem);
+ free(buffer);
+ free(data);
+ taos_free_result(res);
+ return;
+ }
+
+ for (int i = 0; i < winfo->nrecords_per_request; i++) {
+ int rand_num = rand() % 100;
+ if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio)
+ {
+ int64_t d = winfo->lastTs - rand() % 1000000 + rand_num;
+ //generateData(data, datatype, ncols_per_record, d, len_of_binary);
+ (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo);
+ } else {
+ //generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary);
+ (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo);
+ }
+ pstr += sprintf(pstr, "%s", data);
+ winfo->counter++;
+
+ if (winfo->counter >= winfo->superTblInfo->insertRows) {
+ break;
+ }
+ }
+
+ if (winfo->superTblInfo->insertRate) {
+ winfo->st = taosGetTimestampMs();
+ }
+ taos_query_a(winfo->taos, buffer, callBack, winfo);
+ free(buffer);
+ free(data);
+
+ taos_free_result(res);
+}
+
+void *asyncWrite(void *sarg) {
+ threadInfo *winfo = (threadInfo *)sarg;
+
+ winfo->nrecords_per_request = 0;
+ //if (AUTO_CREATE_SUBTBL == winfo->superTblInfo->autoCreateTable) {
+ winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280 - winfo->superTblInfo->lenOfTagOfOneRow) / winfo->superTblInfo->lenOfOneRow;
+ //} else {
+ // winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280) / winfo->superTblInfo->lenOfOneRow;
+ //}
+
+ if (0 != winfo->superTblInfo->insertRate) {
+ if (winfo->nrecords_per_request >= winfo->superTblInfo->insertRate) {
+ winfo->nrecords_per_request = winfo->superTblInfo->insertRate;
+ }
+ }
+
+ if (winfo->nrecords_per_request <= 0) {
+ winfo->nrecords_per_request = 1;
+ }
+
+ if (winfo->nrecords_per_request >= INT16_MAX) {
+ winfo->nrecords_per_request = INT16_MAX - 1;
+ }
+
+ if (winfo->nrecords_per_request >= INT16_MAX) {
+ winfo->nrecords_per_request = INT16_MAX - 1;
+ }
+
+ winfo->st = 0;
+ winfo->et = 0;
+ winfo->lastTs = winfo->start_time;
+
+ if (winfo->superTblInfo->insertRate) {
+ winfo->st = taosGetTimestampMs();
+ }
+ taos_query_a(winfo->taos, "show databases", callBack, winfo);
+
+ tsem_wait(&(winfo->lock_sem));
+
+ return NULL;
+}
+
+void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* superTblInfo) {
+ pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ threadInfo *infos = malloc(threads * sizeof(threadInfo));
+ memset(pids, 0, threads * sizeof(pthread_t));
+ memset(infos, 0, threads * sizeof(threadInfo));
+ int ntables = superTblInfo->childTblCount;
+
+ int a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ //TAOS* taos;
+ //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
+ // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
+ // if (NULL == taos) {
+ // printf("connect to server fail, reason: %s\n", taos_errstr(NULL));
+ // exit(-1);
+ // }
+ //}
+
+ int32_t timePrec = TSDB_TIME_PRECISION_MILLI;
+ if (0 != precision[0]) {
+ if (0 == strncasecmp(precision, "ms", 2)) {
+ timePrec = TSDB_TIME_PRECISION_MILLI;
+ } else if (0 == strncasecmp(precision, "us", 2)) {
+ timePrec = TSDB_TIME_PRECISION_MICRO;
+ } else {
+ printf("No support precision: %s\n", precision);
+ exit(-1);
+ }
+ }
+
+ int64_t start_time;
+ if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
+ start_time = taosGetTimestamp(timePrec);
+ } else {
+ (void)taosParseTime(superTblInfo->startTimestamp, &start_time, strlen(superTblInfo->startTimestamp), timePrec, 0);
+ }
+
+ double start = getCurrentTime();
+
+ int last = 0;
+ for (int i = 0; i < threads; i++) {
+ threadInfo *t_info = infos + i;
+ t_info->threadID = i;
+ tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
+ t_info->superTblInfo = superTblInfo;
+
+ t_info->start_time = start_time;
+ t_info->minDelay = INT16_MAX;
+
+ if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
+ //t_info->taos = taos;
+ t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
+ if (NULL == t_info->taos) {
+ printf("connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL));
+ exit(-1);
+ }
+ } else {
+ t_info->taos = NULL;
+ #ifdef TD_LOWA_CURL
+ t_info->curl_handle = curl_easy_init();
+ #endif
+ }
+
+ if (0 == superTblInfo->multiThreadWriteOneTbl) {
+ t_info->start_table_id = last;
+ t_info->end_table_id = i < b ? last + a : last + a - 1;
+ last = t_info->end_table_id + 1;
+ } else {
+ t_info->start_table_id = 0;
+ t_info->end_table_id = superTblInfo->childTblCount - 1;
+ t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint();
+ }
+
+ tsem_init(&(t_info->lock_sem), 0, 0);
+
+ if (SYNC == g_Dbs.queryMode) {
+ pthread_create(pids + i, NULL, syncWrite, t_info);
+ } else {
+ pthread_create(pids + i, NULL, asyncWrite, t_info);
+ }
+ }
+
+ for (int i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
+ }
int64_t totalDelay = 0;
int64_t maxDelay = 0;
int64_t minDelay = INT16_MAX;
int64_t cntDelay = 0;
double avgDelay = 0;
+
for (int i = 0; i < threads; i++) {
- info *t_info = infos + i;
- taos_close(t_info->taos);
- tsem_destroy(&(t_info->mutex_sem));
+ threadInfo *t_info = infos + i;
+
tsem_destroy(&(t_info->lock_sem));
+ taos_close(t_info->taos);
+
+ superTblInfo->totalAffectedRows += t_info->totalAffectedRows;
+ superTblInfo->totalRowsInserted += t_info->totalRowsInserted;
totalDelay += t_info->totalDelay;
cntDelay += t_info->cntDelay;
if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay;
if (t_info->minDelay < minDelay) minDelay = t_info->minDelay;
+ #ifdef TD_LOWA_CURL
+ if (t_info->curl_handle) {
+ curl_easy_cleanup(t_info->curl_handle);
+ }
+ #endif
}
+
avgDelay = (double)totalDelay / cntDelay;
- fprintf(fp, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n",
- avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
+ double end = getCurrentTime();
+ printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n",
+ end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName);
+ fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n",
+ end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName);
+
printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n",
avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
+ fprintf(g_fpOfInsertResult, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n",
+ avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
+
+
+ //taos_close(taos);
free(pids);
- free(infos);
- fclose(fp);
+ free(infos);
- if (method_of_delete != 0)
- {
- TAOS *dtaos = taos_connect(ip_addr, user, pass, db_name, port);
- double dts = getCurrentTime();
- printf("Deleteing %d table(s)......\n", ntables);
+}
- switch (method_of_delete)
- {
- case 1:
- // delete by table
- /* Create all the tables; */
- for (int i = 0; i < ntables; i++) {
- sprintf(command, "drop table %s.%s%d;", db_name, tb_prefix, i);
- queryDB(dtaos, command);
+
+void *readTable(void *sarg) {
+#if 1
+ threadInfo *rinfo = (threadInfo *)sarg;
+ TAOS *taos = rinfo->taos;
+ char command[BUFFER_SIZE] = "\0";
+ int64_t sTime = rinfo->start_time;
+ char *tb_prefix = rinfo->tb_prefix;
+ FILE *fp = fopen(rinfo->fp, "a");
+ if (NULL == fp) {
+ printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
+ return NULL;
+ }
+
+ int num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
+ int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
+ int totalData = num_of_DPT * num_of_tables;
+ bool do_aggreFunc = g_Dbs.do_aggreFunc;
+
+ int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
+ if (!do_aggreFunc) {
+ printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
+ }
+ printf("%d records:\n", totalData);
+ fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
+
+ for (int j = 0; j < n; j++) {
+ double totalT = 0;
+ int count = 0;
+ for (int i = 0; i < num_of_tables; i++) {
+ sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime);
+
+ double t = getCurrentTime();
+ TAOS_RES *pSql = taos_query(taos, command);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
+ taos_free_result(pSql);
+ taos_close(taos);
+ return NULL;
}
- break;
- case 2:
- // delete by stable
- if (!use_metric) {
- break;
+
+ while (taos_fetch_row(pSql) != NULL) {
+ count++;
}
- else
- {
- sprintf(command, "drop table %s.meters;", db_name);
- queryDB(dtaos, command);
- }
- break;
- case 3:
- // delete by database
- sprintf(command, "drop database %s;", db_name);
- queryDB(dtaos, command);
- break;
- default:
- break;
+
+ t = getCurrentTime() - t;
+ totalT += t;
+
+ taos_free_result(pSql);
}
- printf("Table(s) droped!\n");
- taos_close(dtaos);
+ fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n",
+ aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
+ (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
+ printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT);
+ }
+ fprintf(fp, "\n");
+ fclose(fp);
+#endif
+ return NULL;
+}
- double dt = getCurrentTime() - dts;
- printf("Spent %.4f seconds to drop %d tables\n", dt, ntables);
+void *readMetric(void *sarg) {
+#if 1
+ threadInfo *rinfo = (threadInfo *)sarg;
+ TAOS *taos = rinfo->taos;
+ char command[BUFFER_SIZE] = "\0";
+ FILE *fp = fopen(rinfo->fp, "a");
+ if (NULL == fp) {
+ printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
+ return NULL;
+ }
- FILE *fp = fopen(arguments.output_file, "a");
- fprintf(fp, "Spent %.4f seconds to drop %d tables\n", dt, ntables);
- fclose(fp);
+ int num_of_DPT = rinfo->superTblInfo->insertRows;
+ int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
+ int totalData = num_of_DPT * num_of_tables;
+ bool do_aggreFunc = g_Dbs.do_aggreFunc;
+ int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
+ if (!do_aggreFunc) {
+ printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
+ }
+ printf("%d records:\n", totalData);
+ fprintf(fp, "Querying On %d records:\n", totalData);
+
+ for (int j = 0; j < n; j++) {
+ char condition[BUFFER_SIZE - 30] = "\0";
+ char tempS[64] = "\0";
+
+ int m = 10 < num_of_tables ? 10 : num_of_tables;
+
+ for (int i = 1; i <= m; i++) {
+ if (i == 1) {
+ sprintf(tempS, "t1 = %d", i);
+ } else {
+ sprintf(tempS, " or t1 = %d ", i);
+ }
+ strcat(condition, tempS);
+
+ sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
+
+ printf("Where condition: %s\n", condition);
+ fprintf(fp, "%s\n", command);
+
+ double t = getCurrentTime();
+
+ TAOS_RES *pSql = taos_query(taos, command);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
+ taos_free_result(pSql);
+ taos_close(taos);
+ return NULL;
+ }
+ int count = 0;
+ while (taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
+ t = getCurrentTime() - t;
+
+ fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000);
+ printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t);
+
+ taos_free_result(pSql);
+ }
+ fprintf(fp, "\n");
+ }
+ fclose(fp);
+#endif
+ return NULL;
+}
+
+
+int insertTestProcess() {
+
+ g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a");
+ if (NULL == g_fpOfInsertResult) {
+ fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile);
+ return 1;
+ };
+
+ setupForAnsiEscape();
+ int ret = printfInsertMeta();
+ resetAfterAnsiEscape();
+ if (ret == -1)
+ exit(EXIT_FAILURE);
+
+ printfInsertMetaToFile(g_fpOfInsertResult);
+
+ if (!g_args.answer_yes) {
+ printf("Press enter key to continue\n\n");
+ (void)getchar();
}
+ init_rand_data();
- if (false == insert_only) {
+ // create database and super tables
+ (void)createDatabases();
+
+ // pretreatement
+ prePareSampleData();
+
+ double start;
+ double end;
+
+ // create child tables
+ start = getCurrentTime();
+ createChildTables();
+ end = getCurrentTime();
+ if (g_totalChildTables > 0) {
+ printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount);
+ fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount);
+ }
+
+ taosMsleep(1000);
+
+ // create sub threads for inserting data
+ //start = getCurrentTime();
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j];
+ startMultiThreadInsertData(g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, superTblInfo);
+ }
+ }
+ //end = getCurrentTime();
+
+ //int64_t totalRowsInserted = 0;
+ //int64_t totalAffectedRows = 0;
+ //for (int i = 0; i < g_Dbs.dbCount; i++) {
+ // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ // totalRowsInserted += g_Dbs.db[i].superTbls[j].totalRowsInserted;
+ // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows;
+ //}
+ //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalRowsInserted, totalAffectedRows, g_Dbs.threadCount);
+ if (NULL == g_args.metaFile && false == g_Dbs.insert_only) {
// query data
pthread_t read_id;
- info *rInfo = malloc(sizeof(info));
- rInfo->start_time = 1500000000000;
+ threadInfo *rInfo = malloc(sizeof(threadInfo));
+ rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
rInfo->start_table_id = 0;
- rInfo->end_table_id = ntables - 1;
- rInfo->do_aggreFunc = do_aggreFunc;
- rInfo->nrecords_per_table = nrecords_per_table;
- rInfo->taos = taos_connect(ip_addr, user, pass, db_name, port);
- strcpy(rInfo->tb_prefix, tb_prefix);
- strcpy(rInfo->fp, arguments.output_file);
+ rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1;
+ //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
+ //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows;
+ rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
+ rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port);
+ strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix);
+ strcpy(rInfo->fp, g_Dbs.resultFile);
- if (!use_metric) {
+ if (!g_Dbs.use_metric) {
pthread_create(&read_id, NULL, readTable, rInfo);
} else {
pthread_create(&read_id, NULL, readMetric, rInfo);
}
pthread_join(read_id, NULL);
taos_close(rInfo->taos);
- free(rInfo);
}
- taos_cleanup();
+ postFreeResource();
+
return 0;
}
-#define MAX_SQL_SIZE 65536
-void selectSql(TAOS* taos, char* sqlcmd)
-{
- TAOS_RES *pSql = taos_query(taos, sqlcmd);
- int32_t code = taos_errno(pSql);
+void *superQueryProcess(void *sarg) {
+ threadInfo *winfo = (threadInfo *)sarg;
+
+ //char sqlStr[MAX_TB_NAME_SIZE*2];
+ //sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ //queryDB(winfo->taos, sqlStr);
- if (code != 0) {
- printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql));
- taos_free_result(pSql);
- exit(1);
+ int64_t st = 0;
+ int64_t et = 0;
+ while (1) {
+ if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
+ taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
+ }
+
+ st = taosGetTimestampMs();
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+ int64_t t1 = taosGetTimestampUs();
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
+ sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
+ }
+ selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
+ int64_t t2 = taosGetTimestampUs();
+ printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
+ } else {
+ #ifdef TD_LOWA_CURL
+ int64_t t1 = taosGetTimestampUs();
+ int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle);
+ int64_t t2 = taosGetTimestampUs();
+ printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
+
+ if (0 != retCode) {
+ printf("====curl return fail, threadID[%d]\n", winfo->threadID);
+ return NULL;
+ }
+ #endif
+ }
+ }
+ et = taosGetTimestampMs();
+ printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
}
-
- int count = 0;
- while (taos_fetch_row(pSql) != NULL) {
- count++;
- }
-
- taos_free_result(pSql);
- return;
+ return NULL;
}
+void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
+ char sourceString[32] = "xxxx";
+ char subTblName[MAX_TB_NAME_SIZE*3];
+ sprintf(subTblName, "%s.%s", g_queryInfo.dbName, g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
+
+ //printf("inSql: %s\n", inSql);
+
+ char* pos = strstr(inSql, sourceString);
+ if (0 == pos) {
+ return;
+ }
+
+ strncpy(outSql, inSql, pos - inSql);
+ //printf("1: %s\n", outSql);
+ strcat(outSql, subTblName);
+ //printf("2: %s\n", outSql);
+ strcat(outSql, pos+strlen(sourceString));
+ //printf("3: %s\n", outSql);
+}
+
+void *subQueryProcess(void *sarg) {
+ char sqlstr[1024];
+ threadInfo *winfo = (threadInfo *)sarg;
+ int64_t st = 0;
+ int64_t et = g_queryInfo.subQueryInfo.rate*1000;
+ while (1) {
+ if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) {
+ taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
+ }
+
+ st = taosGetTimestampMs();
+ for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
+ for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
+ memset(sqlstr,0,sizeof(sqlstr));
+ replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i);
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
+ sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
+ }
+ selectAndGetResult(winfo->taos, sqlstr, tmpFile);
+ }
+ }
+ et = taosGetTimestampMs();
+ printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0);
+ }
+ return NULL;
+}
+
+int queryTestProcess() {
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port);
+ if (taos == NULL) {
+ fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ exit(-1);
+ }
+
+ if (0 != g_queryInfo.subQueryInfo.sqlCount) {
+ (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount);
+ }
+
+ printfQueryMeta();
+
+ if (!g_args.answer_yes) {
+ printf("Press enter key to continue\n\n");
+ (void)getchar();
+ }
+
+ printfQuerySystemInfo(taos);
+
+ pthread_t *pids = NULL;
+ threadInfo *infos = NULL;
+ //==== create sub threads for query from specify table
+ if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) {
+
+ pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
+ infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo));
+ if ((NULL == pids) || (NULL == infos)) {
+ printf("malloc failed for create threads\n");
+ taos_close(taos);
+ exit(-1);
+ }
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
+ threadInfo *t_info = infos + i;
+ t_info->threadID = i;
+
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+ t_info->taos = taos;
+
+ char sqlStr[MAX_TB_NAME_SIZE*2];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ (void)queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE);
+ } else {
+ t_info->taos = NULL;
+ #ifdef TD_LOWA_CURL
+ t_info->curl_handle = curl_easy_init();
+ #endif
+ }
+
+ pthread_create(pids + i, NULL, superQueryProcess, t_info);
+ }
+ }else {
+ g_queryInfo.superQueryInfo.concurrent = 0;
+ }
+
+ pthread_t *pidsOfSub = NULL;
+ threadInfo *infosOfSub = NULL;
+ //==== create sub threads for query from all sub table of the super table
+ if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) {
+ pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t));
+ infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo));
+ if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
+ printf("malloc failed for create threads\n");
+ taos_close(taos);
+ exit(-1);
+ }
+
+ int ntables = g_queryInfo.subQueryInfo.childTblCount;
+ int threads = g_queryInfo.subQueryInfo.threadCnt;
+
+ int a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ int last = 0;
+ for (int i = 0; i < threads; i++) {
+ threadInfo *t_info = infosOfSub + i;
+ t_info->threadID = i;
+
+ t_info->start_table_id = last;
+ t_info->end_table_id = i < b ? last + a : last + a - 1;
+ last = t_info->end_table_id + 1;
+ t_info->taos = taos;
+ pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info);
+ }
+
+ g_queryInfo.subQueryInfo.threadCnt = threads;
+ }else {
+ g_queryInfo.subQueryInfo.threadCnt = 0;
+ }
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
+ pthread_join(pids[i], NULL);
+ }
+
+ tmfree((char*)pids);
+ tmfree((char*)infos);
+
+ for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) {
+ pthread_join(pidsOfSub[i], NULL);
+ }
+
+ tmfree((char*)pidsOfSub);
+ tmfree((char*)infosOfSub);
+
+ taos_close(taos);
+ return 0;
+}
+
+static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+ if (res == NULL || taos_errno(res) != 0) {
+ printf("failed to subscribe result, code:%d, reason:%s\n", code, taos_errstr(res));
+ return;
+ }
+
+ getResult(res, (char*)param);
+ taos_free_result(res);
+}
+
+static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
+ TAOS_SUB* tsub = NULL;
+
+ if (g_queryInfo.superQueryInfo.subscribeMode) {
+ tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, g_queryInfo.superQueryInfo.subscribeInterval);
+ } else {
+ tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, NULL, NULL, 0);
+ }
+
+ if (tsub == NULL) {
+ printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ return NULL;
+ }
+
+ return tsub;
+}
+
+void *subSubscribeProcess(void *sarg) {
+ threadInfo *winfo = (threadInfo *)sarg;
+ char subSqlstr[1024];
+
+ char sqlStr[MAX_TB_NAME_SIZE*2];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){
+ return NULL;
+ }
+
+ //int64_t st = 0;
+ //int64_t et = 0;
+ do {
+ //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
+ // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
+ // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
+ //}
+
+ //st = taosGetTimestampMs();
+ char topic[32] = {0};
+ for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
+ sprintf(topic, "taosdemo-subscribe-%d", i);
+ memset(subSqlstr,0,sizeof(subSqlstr));
+ replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i);
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
+ sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
+ }
+ g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile);
+ if (NULL == g_queryInfo.subQueryInfo.tsub[i]) {
+ return NULL;
+ }
+ }
+ //et = taosGetTimestampMs();
+ //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+ } while (0);
+
+ // start loop to consume result
+ TAOS_RES* res = NULL;
+ while (1) {
+ for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
+ if (1 == g_queryInfo.subQueryInfo.subscribeMode) {
+ continue;
+ }
+
+ res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]);
+ if (res) {
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
+ sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
+ }
+ getResult(res, tmpFile);
+ }
+ }
+ }
+ taos_free_result(res);
+
+ for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
+ taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], g_queryInfo.subQueryInfo.subscribeKeepProgress);
+ }
+ return NULL;
+}
+
+void *superSubscribeProcess(void *sarg) {
+ threadInfo *winfo = (threadInfo *)sarg;
+
+ char sqlStr[MAX_TB_NAME_SIZE*2];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) {
+ return NULL;
+ }
+
+ //int64_t st = 0;
+ //int64_t et = 0;
+ do {
+ //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
+ // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
+ // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
+ //}
+
+ //st = taosGetTimestampMs();
+ char topic[32] = {0};
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ sprintf(topic, "taosdemo-subscribe-%d", i);
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
+ sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
+ }
+ g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile);
+ if (NULL == g_queryInfo.superQueryInfo.tsub[i]) {
+ return NULL;
+ }
+ }
+ //et = taosGetTimestampMs();
+ //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+ } while (0);
+
+ // start loop to consume result
+ TAOS_RES* res = NULL;
+ while (1) {
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ if (1 == g_queryInfo.superQueryInfo.subscribeMode) {
+ continue;
+ }
+
+ res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]);
+ if (res) {
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
+ sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
+ }
+ getResult(res, tmpFile);
+ }
+ }
+ }
+ taos_free_result(res);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ }
+ return NULL;
+}
+
+int subscribeTestProcess() {
+ printfQueryMeta();
+
+ if (!g_args.answer_yes) {
+ printf("Press enter key to continue\n\n");
+ (void)getchar();
+ }
+
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port);
+ if (taos == NULL) {
+ fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ exit(-1);
+ }
+
+ if (0 != g_queryInfo.subQueryInfo.sqlCount) {
+ (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount);
+ }
+
+
+ pthread_t *pids = NULL;
+ threadInfo *infos = NULL;
+ //==== create sub threads for query from super table
+ if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) {
+ pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
+ infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo));
+ if ((NULL == pids) || (NULL == infos)) {
+ printf("malloc failed for create threads\n");
+ taos_close(taos);
+ exit(-1);
+ }
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
+ threadInfo *t_info = infos + i;
+ t_info->threadID = i;
+ t_info->taos = taos;
+ pthread_create(pids + i, NULL, superSubscribeProcess, t_info);
+ }
+ }
+
+ //==== create sub threads for query from sub table
+ pthread_t *pidsOfSub = NULL;
+ threadInfo *infosOfSub = NULL;
+ if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) {
+ pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t));
+ infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo));
+ if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
+ printf("malloc failed for create threads\n");
+ taos_close(taos);
+ exit(-1);
+ }
+
+ int ntables = g_queryInfo.subQueryInfo.childTblCount;
+ int threads = g_queryInfo.subQueryInfo.threadCnt;
+
+ int a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ int last = 0;
+ for (int i = 0; i < threads; i++) {
+ threadInfo *t_info = infosOfSub + i;
+ t_info->threadID = i;
+
+ t_info->start_table_id = last;
+ t_info->end_table_id = i < b ? last + a : last + a - 1;
+ t_info->taos = taos;
+ pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info);
+ }
+ g_queryInfo.subQueryInfo.threadCnt = threads;
+ }
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
+ pthread_join(pids[i], NULL);
+ }
+
+ tmfree((char*)pids);
+ tmfree((char*)infos);
+
+ for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) {
+ pthread_join(pidsOfSub[i], NULL);
+ }
+
+ tmfree((char*)pidsOfSub);
+ tmfree((char*)infosOfSub);
+ taos_close(taos);
+ return 0;
+}
+
+void initOfInsertMeta() {
+ memset(&g_Dbs, 0, sizeof(SDbs));
+
+ // set default values
+ strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
+ g_Dbs.port = 6030;
+ strncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
+ strncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
+ g_Dbs.threadCount = 2;
+ g_Dbs.use_metric = true;
+}
+
+void initOfQueryMeta() {
+ memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo));
+
+ // set default values
+ strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
+ g_queryInfo.port = 6030;
+ strncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
+ strncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
+}
+
+void setParaFromArg(){
+ if (g_args.host) {
+ strcpy(g_Dbs.host, g_args.host);
+ } else {
+ strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
+ }
+
+ if (g_args.user) {
+ strcpy(g_Dbs.user, g_args.user);
+ }
+
+ if (g_args.password) {
+ strcpy(g_Dbs.password, g_args.password);
+ }
+
+ if (g_args.port) {
+ g_Dbs.port = g_args.port;
+ }
+
+ g_Dbs.dbCount = 1;
+ g_Dbs.db[0].drop = 1;
+
+ strncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
+ g_Dbs.db[0].dbCfg.replica = g_args.replica;
+ strncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
+
+
+ strncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
+
+ g_Dbs.use_metric = g_args.use_metric;
+ g_Dbs.insert_only = g_args.insert_only;
+
+ g_Dbs.db[0].superTblCount = 1;
+ strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
+ g_Dbs.threadCount = g_args.num_of_threads;
+ g_Dbs.threadCountByCreateTbl = 1;
+ g_Dbs.queryMode = g_args.mode;
+
+ g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
+ g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS;
+ g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
+ g_Dbs.db[0].superTbls[0].insertRate = 0;
+ g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
+ g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
+ strncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
+ strncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
+ strncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE);
+ strncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].timeStampStep = 10;
+
+ // g_args.num_of_RPR;
+ g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT;
+ g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE;
+
+ g_Dbs.do_aggreFunc = true;
+
+ char dataString[STRING_LEN];
+ char **data_type = g_args.datatype;
+
+ memset(dataString, 0, STRING_LEN);
+
+ if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) {
+ g_Dbs.do_aggreFunc = false;
+ }
+
+ g_Dbs.db[0].superTbls[0].columnCount = 0;
+ for (int i = 0; i < MAX_NUM_DATATYPE; i++) {
+ if (data_type[i] == NULL) {
+ break;
+ }
+
+ strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
+ g_Dbs.db[0].superTbls[0].columnCount++;
+ }
+
+ if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) {
+ g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR;
+ } else {
+ for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) {
+ strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
+ g_Dbs.db[0].superTbls[0].columnCount++;
+ }
+ }
+
+ if (g_Dbs.use_metric) {
+ strncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
+
+ strncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
+ g_Dbs.db[0].superTbls[0].tagCount = 2;
+ } else {
+ g_Dbs.db[0].superTbls[0].tagCount = 0;
+ }
+}
/* Function to do regular expression check */
static int regexMatch(const char *s, const char *reg, int cflags) {
@@ -996,7 +5130,7 @@ void querySqlFile(TAOS* taos, char* sqlFile)
FILE *fp = fopen(sqlFile, "r");
if (fp == NULL) {
printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
- exit(-1);
+ return;
}
int read_len = 0;
@@ -1023,7 +5157,7 @@ void querySqlFile(TAOS* taos, char* sqlFile)
}
memcpy(cmd + cmd_len, line, read_len);
- selectSql(taos, cmd);
+ queryDbExec(taos, cmd, NO_INSERT_TYPE);
memset(cmd, 0, MAX_SQL_SIZE);
cmd_len = 0;
}
@@ -1031,550 +5165,79 @@ void querySqlFile(TAOS* taos, char* sqlFile)
t = getCurrentTime() - t;
printf("run %s took %.6f second(s)\n\n", sqlFile, t);
- free(cmd);
- if (line) free(line);
- fclose(fp);
+ tmfree(cmd);
+ tmfree(line);
+ tmfclose(fp);
return;
}
-void * createTable(void *sarg)
-{
- char command[BUFFER_SIZE] = "\0";
-
- info *winfo = (info *)sarg;
+int main(int argc, char *argv[]) {
+ parse_args(argc, argv, &g_args);
- if (!winfo->use_metric) {
- /* Create all the tables; */
- printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
- for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s);", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
- queryDB(winfo->taos, command);
+ if (g_args.metaFile) {
+ initOfInsertMeta();
+ initOfQueryMeta();
+ if (false == getInfoFromJsonFile(g_args.metaFile)) {
+ printf("Failed to read %s\n", g_args.metaFile);
+ return 1;
}
- } else {
- /* Create all the tables; */
- printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
- for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
- int j;
- if (i % 10 == 0) {
- j = 10;
- } else {
- j = i % 10;
- }
- if (j % 2 == 0) {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai");
- } else {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing");
+ if (INSERT_MODE == g_jsonType) {
+ if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
+ (void)insertTestProcess();
+ } else if (QUERY_MODE == g_jsonType) {
+ if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
+ (void)queryTestProcess();
+ } else if (SUBSCRIBE_MODE == g_jsonType) {
+ if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
+ (void)subscribeTestProcess();
+ } else {
+ ;
}
- queryDB(winfo->taos, command);
- }
- }
+ } else {
+
+ memset(&g_Dbs, 0, sizeof(SDbs));
+ g_jsonType = INSERT_MODE;
+ setParaFromArg();
- return NULL;
-}
-
-void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) {
- double ts = getCurrentTime();
- printf("create table......\n");
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
- info *infos = malloc(threads * sizeof(info));
-
- int a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
-
- int b = 0;
- if (threads != 0)
- b = ntables % threads;
- int last = 0;
- for (int i = 0; i < threads; i++) {
- info *t_info = infos + i;
- t_info->threadID = i;
- tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
- tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE);
- t_info->taos = taos_connect(ip_addr, user, pass, db_name, port);
- t_info->start_table_id = last;
- t_info->end_table_id = i < b ? last + a : last + a - 1;
- last = t_info->end_table_id + 1;
- t_info->use_metric = use_metric;
- t_info->cols = cols;
- pthread_create(pids + i, NULL, createTable, t_info);
- }
-
- for (int i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
-
- double t = getCurrentTime() - ts;
- printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads);
-
- for (int i = 0; i < threads; i++) {
- info *t_info = infos + i;
- tsem_destroy(&(t_info->mutex_sem));
- tsem_destroy(&(t_info->lock_sem));
- }
-
- free(pids);
- free(infos);
-
- return ;
-}
-
-void *readTable(void *sarg) {
- info *rinfo = (info *)sarg;
- TAOS *taos = rinfo->taos;
- char command[BUFFER_SIZE] = "\0";
- int64_t sTime = rinfo->start_time;
- char *tb_prefix = rinfo->tb_prefix;
- FILE *fp = fopen(rinfo->fp, "a");
- if (NULL == fp) {
- printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
- return NULL;
- }
-
- int num_of_DPT = rinfo->nrecords_per_table;
- int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
- int totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = rinfo->do_aggreFunc;
-
- int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
- printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
- }
- printf("%d records:\n", totalData);
- fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
-
- for (int j = 0; j < n; j++) {
- double totalT = 0;
- int count = 0;
- for (int i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime);
-
- double t = getCurrentTime();
- TAOS_RES *pSql = taos_query(taos, command);
- int32_t code = taos_errno(pSql);
-
- if (code != 0) {
- fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- exit(EXIT_FAILURE);
- }
-
- while (taos_fetch_row(pSql) != NULL) {
- count++;
- }
-
- t = getCurrentTime() - t;
- totalT += t;
-
- taos_free_result(pSql);
- }
-
- fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n",
- aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
- (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
- printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT);
- }
- fprintf(fp, "\n");
-
- fclose(fp);
- return NULL;
-}
-
-void *readMetric(void *sarg) {
- info *rinfo = (info *)sarg;
- TAOS *taos = rinfo->taos;
- char command[BUFFER_SIZE] = "\0";
- FILE *fp = fopen(rinfo->fp, "a");
- if (NULL == fp) {
- printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
- return NULL;
- }
-
- int num_of_DPT = rinfo->nrecords_per_table;
- int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
- int totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = rinfo->do_aggreFunc;
-
- int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
- printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
- }
- printf("%d records:\n", totalData);
- fprintf(fp, "Querying On %d records:\n", totalData);
-
- for (int j = 0; j < n; j++) {
- char condition[BUFFER_SIZE - 30] = "\0";
- char tempS[64] = "\0";
-
- int m = 10 < num_of_tables ? 10 : num_of_tables;
-
- for (int i = 1; i <= m; i++) {
- if (i == 1) {
- sprintf(tempS, "areaid = %d", i);
- } else {
- sprintf(tempS, " or areaid = %d ", i);
- }
- strcat(condition, tempS);
-
- sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
-
- printf("Where condition: %s\n", condition);
- fprintf(fp, "%s\n", command);
-
- double t = getCurrentTime();
-
- TAOS_RES *pSql = taos_query(taos, command);
- int32_t code = taos_errno(pSql);
-
- if (code != 0) {
- fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- exit(1);
- }
- int count = 0;
- while (taos_fetch_row(pSql) != NULL) {
- count++;
- }
- t = getCurrentTime() - t;
-
- fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000);
- printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t);
-
- taos_free_result(pSql);
- }
- fprintf(fp, "\n");
- }
-
- fclose(fp);
- return NULL;
-}
-
-static int queryDbExec(TAOS *taos, char *command, int type) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
-
- for (i = 0; i < 5; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
+ if (NULL != g_args.sqlFile) {
+ TAOS* qtaos = taos_connect(
+ g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port);
+ querySqlFile(qtaos, g_args.sqlFile);
+ taos_close(qtaos);
+ return 0;
}
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
- }
- }
+ (void)insertTestProcess();
+ if (g_Dbs.insert_only) return 0;
- if (code != 0) {
- fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res));
- taos_free_result(res);
- //taos_close(taos);
- return -1;
- }
-
- if (1 == type) {
- int affectedRows = taos_affected_rows(res);
- taos_free_result(res);
- return affectedRows;
- }
+ // select
+ if (false == g_Dbs.insert_only) {
+ // query data
- taos_free_result(res);
+ pthread_t read_id;
+ threadInfo *rInfo = malloc(sizeof(threadInfo));
+ rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
+ rInfo->start_table_id = 0;
+ rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1;
+ //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
+ //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows;
+ rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
+ rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port);
+ strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix);
+ strcpy(rInfo->fp, g_Dbs.resultFile);
+
+ if (!g_Dbs.use_metric) {
+ pthread_create(&read_id, NULL, readTable, rInfo);
+ } else {
+ pthread_create(&read_id, NULL, readMetric, rInfo);
+ }
+ pthread_join(read_id, NULL);
+ taos_close(rInfo->taos);
+ free(rInfo);
+ }
+ }
+
+ taos_cleanup();
return 0;
}
-void queryDB(TAOS *taos, char *command) {
- int i;
- TAOS_RES *pSql = NULL;
- int32_t code = -1;
-
- for (i = 0; i < 5; i++) {
- if (NULL != pSql) {
- taos_free_result(pSql);
- pSql = NULL;
- }
-
- pSql = taos_query(taos, command);
- code = taos_errno(pSql);
- if (0 == code) {
- break;
- }
- }
-
- if (code != 0) {
- fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- exit(EXIT_FAILURE);
- }
-
- taos_free_result(pSql);
-}
-
-// sync insertion
-void *syncWrite(void *sarg) {
- info *winfo = (info *)sarg;
- char buffer[BUFFER_SIZE] = "\0";
- char data[MAX_DATA_SIZE];
- char **data_type = winfo->datatype;
- int len_of_binary = winfo->len_of_binary;
- int ncols_per_record = winfo->ncols_per_record;
- srand((uint32_t)time(NULL));
- int64_t time_counter = winfo->start_time;
- for (int i = 0; i < winfo->nrecords_per_table;) {
- for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) {
- int inserted = i;
- int64_t tmp_time = time_counter;
-
- char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, tID);
- int k;
- for (k = 0; k < winfo->nrecords_per_request;) {
- int rand_num = rand() % 100;
- int len = -1;
- if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) {
- int64_t d = tmp_time - rand() % 1000000 + rand_num;
- len = generateData(data, data_type, ncols_per_record, d, len_of_binary);
- } else {
- len = generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary);
- }
-
- //assert(len + pstr - buffer < BUFFER_SIZE);
- if (len + pstr - buffer >= BUFFER_SIZE) { // too long
- break;
- }
-
- pstr += sprintf(pstr, " %s", data);
- inserted++;
- k++;
-
- if (inserted >= winfo->nrecords_per_table) break;
- }
-
- /* puts(buffer); */
- int64_t startTs;
- int64_t endTs;
- startTs = taosGetTimestampUs();
- //queryDB(winfo->taos, buffer);
- int affectedRows = queryDbExec(winfo->taos, buffer, 1);
-
- if (0 <= affectedRows){
- endTs = taosGetTimestampUs();
- int64_t delay = endTs - startTs;
- if (delay > winfo->maxDelay) winfo->maxDelay = delay;
- if (delay < winfo->minDelay) winfo->minDelay = delay;
- winfo->cntDelay++;
- winfo->totalDelay += delay;
- //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay;
- }
-
- if (tID == winfo->end_table_id) {
- i = inserted;
- time_counter = tmp_time;
- }
- }
- }
- return NULL;
-}
-
-void *asyncWrite(void *sarg) {
- info *winfo = (info *)sarg;
- taos_query_a(winfo->taos, "show databases", callBack, winfo);
-
- tsem_wait(&(winfo->lock_sem));
-
- return NULL;
-}
-
-void callBack(void *param, TAOS_RES *res, int code) {
- info* winfo = (info*)param;
- char **datatype = winfo->datatype;
- int ncols_per_record = winfo->ncols_per_record;
- int len_of_binary = winfo->len_of_binary;
-
- int64_t tmp_time = winfo->start_time;
- char *buffer = calloc(1, BUFFER_SIZE);
- char *data = calloc(1, MAX_DATA_SIZE);
- char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id);
- if (winfo->counter >= winfo->nrecords_per_table) {
- winfo->start_table_id++;
- winfo->counter = 0;
- }
- if (winfo->start_table_id > winfo->end_table_id) {
- tsem_post(&winfo->lock_sem);
- free(buffer);
- free(data);
- taos_free_result(res);
- return;
- }
-
- for (int i = 0; i < winfo->nrecords_per_request; i++) {
- int rand_num = rand() % 100;
- if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate)
- {
- int64_t d = tmp_time - rand() % 1000000 + rand_num;
- generateData(data, datatype, ncols_per_record, d, len_of_binary);
- } else
- {
- generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary);
- }
- pstr += sprintf(pstr, "%s", data);
- winfo->counter++;
-
- if (winfo->counter >= winfo->nrecords_per_table) {
- break;
- }
- }
- taos_query_a(winfo->taos, buffer, callBack, winfo);
- free(buffer);
- free(data);
-
- taos_free_result(res);
-}
-
-double getCurrentTime() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) != 0) {
- perror("Failed to get current time in ms");
- exit(EXIT_FAILURE);
- }
-
- return tv.tv_sec + tv.tv_usec / 1E6;
-}
-
-int32_t randint[MAX_PREPARED_RAND];
-int64_t randbigint[MAX_PREPARED_RAND];
-float randfloat[MAX_PREPARED_RAND];
-double randdouble[MAX_PREPARED_RAND];
-
-int32_t rand_tinyint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 128;
-
-}
-
-int32_t rand_smallint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 32767;
-}
-
-int32_t rand_int(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor];
-}
-
-int64_t rand_bigint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randbigint[cursor];
-
-}
-
-float rand_float(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randfloat[cursor];
-
-}
-
-double rand_double() {
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randdouble[cursor];
-
-}
-
-void init_rand_data(){
- for (int i = 0; i < MAX_PREPARED_RAND; i++){
- randint[i] = (int)(rand() % 10);
- randbigint[i] = (int64_t)(rand() % 2147483648);
- randfloat[i] = (float)(rand() / 1000.0);
- randdouble[i] = (double)(rand() / 1000000.0);
- }
-}
-
-int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) {
- memset(res, 0, MAX_DATA_SIZE);
- char *pstr = res;
- pstr += sprintf(pstr, "(%" PRId64, timestamp);
- int c = 0;
-
- for (; c < MAX_NUM_DATATYPE; c++) {
- if (data_type[c] == NULL) {
- break;
- }
- }
-
- if (0 == c) {
- perror("data type error!");
- exit(-1);
- }
-
- for (int i = 0; i < num_of_cols; i++) {
- if (strcasecmp(data_type[i % c], "tinyint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_tinyint() );
- } else if (strcasecmp(data_type[i % c], "smallint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_smallint());
- } else if (strcasecmp(data_type[i % c], "int") == 0) {
- pstr += sprintf(pstr, ", %d", rand_int());
- } else if (strcasecmp(data_type[i % c], "bigint") == 0) {
- pstr += sprintf(pstr, ", %" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % c], "float") == 0) {
- pstr += sprintf(pstr, ", %10.4f", rand_float());
- } else if (strcasecmp(data_type[i % c], "double") == 0) {
- double t = rand_double();
- pstr += sprintf(pstr, ", %20.8f", t);
- } else if (strcasecmp(data_type[i % c], "bool") == 0) {
- bool b = rand() & 1;
- pstr += sprintf(pstr, ", %s", b ? "true" : "false");
- } else if (strcasecmp(data_type[i % c], "binary") == 0) {
- char *s = malloc(len_of_binary);
- rand_string(s, len_of_binary);
- pstr += sprintf(pstr, ", \"%s\"", s);
- free(s);
- }else if (strcasecmp(data_type[i % c], "nchar") == 0) {
- char *s = malloc(len_of_binary);
- rand_string(s, len_of_binary);
- pstr += sprintf(pstr, ", \"%s\"", s);
- free(s);
- }
-
- if (pstr - res > MAX_DATA_SIZE) {
- perror("column length too long, abort");
- exit(-1);
- }
- }
-
- pstr += sprintf(pstr, ")");
-
- return (int32_t)(pstr - res);
-}
-
-static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890";
-void rand_string(char *str, int size) {
- str[0] = 0;
- if (size > 0) {
- --size;
- int n;
- for (n = 0; n < size; n++) {
- int key = rand() % (int)(sizeof charset - 1);
- str[n] = charset[key];
- }
- str[n] = 0;
- }
-}
diff --git a/src/kit/taosdemox/CMakeLists.txt b/src/kit/taosdemox/CMakeLists.txt
deleted file mode 100644
index abe4e74710..0000000000
--- a/src/kit/taosdemox/CMakeLists.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
-PROJECT(TDengine)
-
-INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
-INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include)
-
-IF (TD_LINUX)
- AUX_SOURCE_DIRECTORY(. SRC)
- ADD_EXECUTABLE(taosdemox ${SRC})
-
- #find_program(HAVE_CURL NAMES curl)
- IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32))
- ADD_DEFINITIONS(-DTD_LOWA_CURL)
- LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib)
- ADD_LIBRARY(curl STATIC IMPORTED)
- SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a)
- TARGET_LINK_LIBRARIES(taosdemox curl)
- ENDIF ()
-
- IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdemox taos_static cJson)
- ELSE ()
- TARGET_LINK_LIBRARIES(taosdemox taos cJson)
- ENDIF ()
-ENDIF ()
-
-IF (TD_DARWIN)
- # missing a few dependencies, such as
- # AUX_SOURCE_DIRECTORY(. SRC)
- # ADD_EXECUTABLE(taosdemox ${SRC})
- #
- # #find_program(HAVE_CURL NAMES curl)
- # IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32))
- # ADD_DEFINITIONS(-DTD_LOWA_CURL)
- # LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib)
- # ADD_LIBRARY(curl STATIC IMPORTED)
- # SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a)
- # TARGET_LINK_LIBRARIES(taosdemox curl)
- # ENDIF ()
- #
- # IF (TD_SOMODE_STATIC)
- # TARGET_LINK_LIBRARIES(taosdemox taos_static cJson)
- # ELSE ()
- # TARGET_LINK_LIBRARIES(taosdemox taos cJson)
- # ENDIF ()
-ENDIF ()
-
diff --git a/src/kit/taosdemox/taosdemox.c b/src/kit/taosdemox/taosdemox.c
deleted file mode 100644
index 674c9aa0b8..0000000000
--- a/src/kit/taosdemox/taosdemox.c
+++ /dev/null
@@ -1,5125 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-
-/*
- when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread.
-*/
-
-#define _GNU_SOURCE
-#define CURL_STATICLIB
-
-#ifdef TD_LOWA_CURL
-#include "curl/curl.h"
-#endif
-
-#ifdef LINUX
- #include "os.h"
- #include "cJSON.h"
- #include
- #include
- #include
- #ifndef _ALPINE
- #include
- #endif
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
-#else
- #include
- #include
- #include
- #include "os.h"
-
- #pragma comment ( lib, "libcurl.lib" )
- #pragma comment ( lib, "ws2_32.lib" )
- #pragma comment ( lib, "winmm.lib" )
- #pragma comment ( lib, "wldap32.lib" )
-#endif
-
-#include "taos.h"
-#include "tutil.h"
-
-extern char configDir[];
-
-#define INSERT_JSON_NAME "insert.json"
-#define QUERY_JSON_NAME "query.json"
-#define SUBSCRIBE_JSON_NAME "subscribe.json"
-
-#define INSERT_MODE 0
-#define QUERY_MODE 1
-#define SUBSCRIBE_MODE 2
-
-#define MAX_SQL_SIZE 65536
-#define BUFFER_SIZE (65536*2)
-#define MAX_DB_NAME_SIZE 64
-#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE 16000
-#define MAX_NUM_DATATYPE 10
-#define OPT_ABORT 1 /* –abort */
-#define STRING_LEN 60000
-#define MAX_PREPARED_RAND 1000000
-//#define MAX_SQL_SIZE 65536
-#define MAX_FILE_NAME_LEN 256
-
-#define MAX_SAMPLES_ONCE_FROM_FILE 10000
-#define MAX_NUM_DATATYPE 10
-
-#define MAX_DB_COUNT 8
-#define MAX_SUPER_TABLE_COUNT 8
-#define MAX_COLUMN_COUNT 1024
-#define MAX_TAG_COUNT 128
-
-#define MAX_QUERY_SQL_COUNT 10
-#define MAX_QUERY_SQL_LENGTH 256
-
-#define MAX_DATABASE_COUNT 256
-
-typedef enum CREATE_SUB_TALBE_MOD_EN {
- PRE_CREATE_SUBTBL,
- AUTO_CREATE_SUBTBL,
- NO_CREATE_SUBTBL
-} CREATE_SUB_TALBE_MOD_EN;
-
-typedef enum TALBE_EXISTS_EN {
- TBL_ALREADY_EXISTS,
- TBL_NO_EXISTS,
- TBL_EXISTS_BUTT
-} TALBE_EXISTS_EN;
-
-enum MODE {
- SYNC,
- ASYNC,
- MODE_BUT
-};
-
-enum QUERY_TYPE {
- NO_INSERT_TYPE,
- INSERT_TYPE,
- QUERY_TYPE_BUT
-} ;
-
-enum _show_db_index {
- TSDB_SHOW_DB_NAME_INDEX,
- TSDB_SHOW_DB_CREATED_TIME_INDEX,
- TSDB_SHOW_DB_NTABLES_INDEX,
- TSDB_SHOW_DB_VGROUPS_INDEX,
- TSDB_SHOW_DB_REPLICA_INDEX,
- TSDB_SHOW_DB_QUORUM_INDEX,
- TSDB_SHOW_DB_DAYS_INDEX,
- TSDB_SHOW_DB_KEEP_INDEX,
- TSDB_SHOW_DB_CACHE_INDEX,
- TSDB_SHOW_DB_BLOCKS_INDEX,
- TSDB_SHOW_DB_MINROWS_INDEX,
- TSDB_SHOW_DB_MAXROWS_INDEX,
- TSDB_SHOW_DB_WALLEVEL_INDEX,
- TSDB_SHOW_DB_FSYNC_INDEX,
- TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_CACHELAST_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
- TSDB_SHOW_DB_UPDATE_INDEX,
- TSDB_SHOW_DB_STATUS_INDEX,
- TSDB_MAX_SHOW_DB
-};
-
-// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
-enum _show_stables_index {
- TSDB_SHOW_STABLES_NAME_INDEX,
- TSDB_SHOW_STABLES_CREATED_TIME_INDEX,
- TSDB_SHOW_STABLES_COLUMNS_INDEX,
- TSDB_SHOW_STABLES_METRIC_INDEX,
- TSDB_SHOW_STABLES_UID_INDEX,
- TSDB_SHOW_STABLES_TID_INDEX,
- TSDB_SHOW_STABLES_VGID_INDEX,
- TSDB_MAX_SHOW_STABLES
-};
-enum _describe_table_index {
- TSDB_DESCRIBE_METRIC_FIELD_INDEX,
- TSDB_DESCRIBE_METRIC_TYPE_INDEX,
- TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
- TSDB_DESCRIBE_METRIC_NOTE_INDEX,
- TSDB_MAX_DESCRIBE_METRIC
-};
-
-typedef struct {
- char field[TSDB_COL_NAME_LEN + 1];
- char type[16];
- int length;
- char note[128];
-} SColDes;
-
-/* Used by main to communicate with parse_opt. */
-typedef struct SArguments_S {
- char * metaFile;
- char * host;
- uint16_t port;
- char * user;
- char * password;
- char * database;
- int replica;
- char * tb_prefix;
- char * sqlFile;
- bool use_metric;
- bool insert_only;
- bool answer_yes;
- char * output_file;
- int mode;
- char * datatype[MAX_NUM_DATATYPE + 1];
- int len_of_binary;
- int num_of_CPR;
- int num_of_threads;
- int num_of_RPR;
- int num_of_tables;
- int num_of_DPT;
- int abort;
- int disorderRatio;
- int disorderRange;
- int method_of_delete;
- char ** arg_list;
-} SArguments;
-
-typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
- int dataLen;
- char note[128];
-} StrColumn;
-
-typedef struct SSuperTable_S {
- char sTblName[MAX_TB_NAME_SIZE];
- int childTblCount;
- bool superTblExists; // 0: no, 1: yes
- bool childTblExists; // 0: no, 1: yes
- int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
- int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
- char childTblPrefix[MAX_TB_NAME_SIZE];
- char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
- char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful
- int insertRate; // 0: unlimit > 0 rows/s
-
- int multiThreadWriteOneTbl; // 0: no, 1: yes
- int numberOfTblInOneSql; // 0/1: one table, > 1: number of tbl
- int rowsPerTbl; //
- int disorderRatio; // 0: no disorder, >0: x%
- int disorderRange; // ms or us by database precision
- int maxSqlLen; //
-
- int64_t insertRows; // 0: no limit
- int timeStampStep;
- char startTimestamp[MAX_TB_NAME_SIZE]; //
- char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
- char sampleFile[MAX_FILE_NAME_LEN];
- char tagsFile[MAX_FILE_NAME_LEN];
-
- int columnCount;
- StrColumn columns[MAX_COLUMN_COUNT];
- int tagCount;
- StrColumn tags[MAX_TAG_COUNT];
-
- char* childTblName;
- char* colsOfCreatChildTable;
- int lenOfOneRow;
- int lenOfTagOfOneRow;
-
- char* sampleDataBuf;
- int sampleDataBufSize;
- //int sampleRowCount;
- //int sampleUsePos;
-
- int tagSource; // 0: rand, 1: tag sample
- char* tagDataBuf;
- int tagSampleCount;
- int tagUsePos;
-
- // statistics
- int64_t totalRowsInserted;
- int64_t totalAffectedRows;
-} SSuperTable;
-
-typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
- char create_time[32];
- int32_t ntables;
- int32_t vgroups;
- int16_t replica;
- int16_t quorum;
- int16_t days;
- char keeplist[32];
- int32_t cache; //MB
- int32_t blocks;
- int32_t minrows;
- int32_t maxrows;
- int8_t wallevel;
- int32_t fsync;
- int8_t comp;
- int8_t cachelast;
- char precision[8]; // time resolution
- int8_t update;
- char status[16];
-} SDbInfo;
-
-typedef struct SDbCfg_S {
-// int maxtablesPerVnode;
- int minRows;
- int maxRows;
- int comp;
- int walLevel;
- int fsync;
- int replica;
- int update;
- int keep;
- int days;
- int cache;
- int blocks;
- int quorum;
- char precision[MAX_TB_NAME_SIZE];
-} SDbCfg;
-
-typedef struct SDataBase_S {
- char dbName[MAX_DB_NAME_SIZE];
- int drop; // 0: use exists, 1: if exists, drop then new create
- SDbCfg dbCfg;
- int superTblCount;
- SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
-} SDataBase;
-
-typedef struct SDbs_S {
- char cfgDir[MAX_FILE_NAME_LEN];
- char host[MAX_DB_NAME_SIZE];
- uint16_t port;
- char user[MAX_DB_NAME_SIZE];
- char password[MAX_DB_NAME_SIZE];
- char resultFile[MAX_FILE_NAME_LEN];
- bool use_metric;
- bool insert_only;
- bool do_aggreFunc;
- bool queryMode;
-
- int threadCount;
- int threadCountByCreateTbl;
- int dbCount;
- SDataBase db[MAX_DB_COUNT];
-
- // statistics
- int64_t totalRowsInserted;
- int64_t totalAffectedRows;
-} SDbs;
-
-typedef struct SuperQueryInfo_S {
- int rate; // 0: unlimit > 0 loop/s
- int concurrent;
- int sqlCount;
- int subscribeMode; // 0: sync, 1: async
- int subscribeInterval; // ms
- int subscribeRestart;
- int subscribeKeepProgress;
- char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH];
- char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
- TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
-} SuperQueryInfo;
-
-typedef struct SubQueryInfo_S {
- char sTblName[MAX_TB_NAME_SIZE];
- int rate; // 0: unlimit > 0 loop/s
- int threadCnt;
- int subscribeMode; // 0: sync, 1: async
- int subscribeInterval; // ms
- int subscribeRestart;
- int subscribeKeepProgress;
- int childTblCount;
- char childTblPrefix[MAX_TB_NAME_SIZE];
- int sqlCount;
- char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH];
- char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
- TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
-
- char* childTblName;
-} SubQueryInfo;
-
-typedef struct SQueryMetaInfo_S {
- char cfgDir[MAX_FILE_NAME_LEN];
- char host[MAX_DB_NAME_SIZE];
- uint16_t port;
- char user[MAX_DB_NAME_SIZE];
- char password[MAX_DB_NAME_SIZE];
- char dbName[MAX_DB_NAME_SIZE];
- char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful
-
- SuperQueryInfo superQueryInfo;
- SubQueryInfo subQueryInfo;
-} SQueryMetaInfo;
-
-typedef struct SThreadInfo_S {
- TAOS *taos;
- #ifdef TD_LOWA_CURL
- CURL *curl_handle;
- #endif
- int threadID;
- char db_name[MAX_DB_NAME_SIZE];
- char fp[4096];
- char tb_prefix[MAX_TB_NAME_SIZE];
- int start_table_id;
- int end_table_id;
- int data_of_rate;
- int64_t start_time;
- char* cols;
- bool use_metric;
- SSuperTable* superTblInfo;
-
- // for async insert
- tsem_t lock_sem;
- int64_t counter;
- int64_t st;
- int64_t et;
- int64_t lastTs;
- int nrecords_per_request;
-
- // statistics
- int64_t totalRowsInserted;
- int64_t totalAffectedRows;
-} threadInfo;
-
-typedef struct curlMemInfo_S {
- char *buf;
- size_t sizeleft;
- } curlMemInfo;
-
-
-
-#ifdef LINUX
- /* The options we understand. */
- static struct argp_option options[] = {
- {0, 'f', "meta file", 0, "The meta data to the execution procedure, if use -f, all others options invalid. Default is NULL.", 0},
- #ifdef _TD_POWER_
- {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 1},
- {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 2},
- #else
- {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 1},
- {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 2},
- #endif
- {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 2},
- {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 2},
- {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
- {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
- {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 4},
- {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 4},
- {0, 's', "sql file", 0, "The select sql file.", 6},
- {0, 'M', 0, 0, "Use metric flag.", 4},
- {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 6},
- {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 4},
- {0, 'b', "type_of_cols", 0, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.", 4},
- {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4},
- {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4},
- {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4},
- // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4},
- {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4},
- {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4},
- {0, 'x', 0, 0, "Not insert only flag.", 4},
- {0, 'y', 0, 0, "Default input yes for prompt.", 4},
- {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4},
- {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4},
- //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5},
- {0}};
-
-/* Parse a single option. */
-static error_t parse_opt(int key, char *arg, struct argp_state *state) {
- // Get the input argument from argp_parse, which we know is a pointer to our arguments structure.
- SArguments *arguments = state->input;
- wordexp_t full_path;
- char **sptr;
- switch (key) {
- case 'f':
- arguments->metaFile = arg;
- break;
- case 'h':
- arguments->host = arg;
- break;
- case 'p':
- arguments->port = atoi(arg);
- break;
- case 'u':
- arguments->user = arg;
- break;
- case 'P':
- arguments->password = arg;
- break;
- case 'o':
- arguments->output_file = arg;
- break;
- case 's':
- arguments->sqlFile = arg;
- break;
- case 'q':
- arguments->mode = atoi(arg);
- break;
- case 'T':
- arguments->num_of_threads = atoi(arg);
- break;
- //case 'r':
- // arguments->num_of_RPR = atoi(arg);
- // break;
- case 't':
- arguments->num_of_tables = atoi(arg);
- break;
- case 'n':
- arguments->num_of_DPT = atoi(arg);
- break;
- case 'd':
- arguments->database = arg;
- break;
- case 'l':
- arguments->num_of_CPR = atoi(arg);
- break;
- case 'b':
- sptr = arguments->datatype;
- if (strstr(arg, ",") == NULL) {
- if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 &&
- strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 &&
- strcasecmp(arg, "SMALLINT") != 0 && strcasecmp(arg, "TIMESTAMP") != 0 &&
- strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 &&
- strcasecmp(arg, "BINARY") != 0 && strcasecmp(arg, "NCHAR") != 0) {
- argp_error(state, "Invalid data_type!");
- }
- sptr[0] = arg;
- } else {
- int index = 0;
- char *dupstr = strdup(arg);
- char *running = dupstr;
- char *token = strsep(&running, ",");
- while (token != NULL) {
- if (strcasecmp(token, "INT") != 0 && strcasecmp(token, "FLOAT") != 0 &&
- strcasecmp(token, "TINYINT") != 0 && strcasecmp(token, "BOOL") != 0 &&
- strcasecmp(token, "SMALLINT") != 0 && strcasecmp(token, "TIMESTAMP") != 0 &&
- strcasecmp(token, "BIGINT") != 0 && strcasecmp(token, "DOUBLE") != 0 &&
- strcasecmp(token, "BINARY") != 0 && strcasecmp(token, "NCHAR") != 0) {
- argp_error(state, "Invalid data_type!");
- }
- sptr[index++] = token;
- token = strsep(&running, ",");
- if (index >= MAX_NUM_DATATYPE) break;
- }
- }
- break;
- case 'w':
- arguments->len_of_binary = atoi(arg);
- break;
- case 'm':
- arguments->tb_prefix = arg;
- break;
- case 'M':
- arguments->use_metric = true;
- break;
- case 'x':
- arguments->insert_only = false;
- case 'y':
- arguments->answer_yes = true;
- break;
- case 'c':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
- wordfree(&full_path);
- break;
- case 'O':
- arguments->disorderRatio = atoi(arg);
- if (arguments->disorderRatio < 0 || arguments->disorderRatio > 100)
- {
- argp_error(state, "Invalid disorder ratio, should 1 ~ 100!");
- }
- break;
- case 'R':
- arguments->disorderRange = atoi(arg);
- break;
- case 'a':
- arguments->replica = atoi(arg);
- if (arguments->replica > 3 || arguments->replica < 1)
- {
- arguments->replica = 1;
- }
- break;
- //case 'D':
- // arguments->method_of_delete = atoi(arg);
- // break;
- case OPT_ABORT:
- arguments->abort = 1;
- break;
- case ARGP_KEY_ARG:
- /*arguments->arg_list = &state->argv[state->next-1];
- state->next = state->argc;*/
- argp_usage(state);
- break;
-
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
-
-static struct argp argp = {options, parse_opt, 0, 0};
-
-void parse_args(int argc, char *argv[], SArguments *arguments) {
- argp_parse(&argp, argc, argv, 0, 0, arguments);
- if (arguments->abort) {
- #ifndef _ALPINE
- error(10, 0, "ABORTED");
- #else
- abort();
- #endif
- }
-}
-
-#else
- void printHelp() {
- char indent[10] = " ";
- printf("%s%s\n", indent, "-f");
- printf("%s%s%s\n", indent, indent, "The meta file to the execution procedure. Default is './meta.json'.");
- printf("%s%s\n", indent, "-c");
- printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'.");
- }
-
- void parse_args(int argc, char *argv[], SArguments *arguments) {
- for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-f") == 0) {
- arguments->metaFile = argv[++i];
- } else if (strcmp(argv[i], "-c") == 0) {
- strcpy(configDir, argv[++i]);
- } else if (strcmp(argv[i], "--help") == 0) {
- printHelp();
- exit(EXIT_FAILURE);
- } else {
- fprintf(stderr, "wrong options\n");
- printHelp();
- exit(EXIT_FAILURE);
- }
- }
- }
-#endif
-
-static bool getInfoFromJsonFile(char* file);
-//static int generateOneRowDataForStb(SSuperTable* stbInfo);
-//static int getDataIntoMemForStb(SSuperTable* stbInfo);
-static void init_rand_data();
-static int createDatabases();
-static void createChildTables();
-static int queryDbExec(TAOS *taos, char *command, int type);
-
-/* ************ Global variables ************ */
-
-int32_t randint[MAX_PREPARED_RAND];
-int64_t randbigint[MAX_PREPARED_RAND];
-float randfloat[MAX_PREPARED_RAND];
-double randdouble[MAX_PREPARED_RAND];
-char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"};
-
-SArguments g_args = {NULL,
- "127.0.0.1", // host
- 6030, // port
- "root", // user
- #ifdef _TD_POWER_
- "powerdb", // password
- #else
- "taosdata", // password
- #endif
- "test", // database
- 1, // replica
- "t", // tb_prefix
- NULL, // sqlFile
- false, // use_metric
- true, // insert_only
- false, // answer_yes;
- "./output.txt", // output_file
- 0, // mode : sync or async
- {
- "TINYINT", // datatype
- "SMALLINT",
- "INT",
- "BIGINT",
- "FLOAT",
- "DOUBLE",
- "BINARY",
- "NCHAR",
- "BOOL",
- "TIMESTAMP"
- },
- 16, // len_of_binary
- 10, // num_of_CPR
- 10, // num_of_connections/thread
- 100, // num_of_RPR
- 10000, // num_of_tables
- 10000, // num_of_DPT
- 0, // abort
- 0, // disorderRatio
- 1000, // disorderRange
- 1, // method_of_delete
- NULL // arg_list
-};
-
-
-static int g_jsonType = 0;
-static SDbs g_Dbs;
-static int g_totalChildTables = 0;
-static SQueryMetaInfo g_queryInfo;
-static FILE * g_fpOfInsertResult = NULL;
-
-
-void tmfclose(FILE *fp) {
- if (NULL != fp) {
- fclose(fp);
- }
-}
-
-void tmfree(char *buf) {
- if (NULL != buf) {
- free(buf);
- }
-}
-
-static int queryDbExec(TAOS *taos, char *command, int type) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
-
- for (i = 0; i < 5; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
- }
-
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
- }
- }
-
- if (code != 0) {
- fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res));
- taos_free_result(res);
- //taos_close(taos);
- return -1;
- }
-
- if (INSERT_TYPE == type) {
- int affectedRows = taos_affected_rows(res);
- taos_free_result(res);
- return affectedRows;
- }
-
- taos_free_result(res);
- return 0;
-}
-
-static void getResult(TAOS_RES *res, char* resultFileName) {
- TAOS_ROW row = NULL;
- int num_rows = 0;
- int num_fields = taos_field_count(res);
- TAOS_FIELD *fields = taos_fetch_fields(res);
-
- FILE *fp = NULL;
- if (resultFileName[0] != 0) {
- fp = fopen(resultFileName, "at");
- if (fp == NULL) {
- fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName);
- }
- }
-
- char* databuf = (char*) calloc(1, 100*1024*1024);
- if (databuf == NULL) {
- fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n");
- return ;
- }
-
- int totalLen = 0;
- char temp[16000];
-
- // fetch the records row by row
- while ((row = taos_fetch_row(res))) {
- if (totalLen >= 100*1024*1024 - 32000) {
- if (fp) fprintf(fp, "%s", databuf);
- totalLen = 0;
- memset(databuf, 0, 100*1024*1024);
- }
- num_rows++;
- int len = taos_print_row(temp, row, fields, num_fields);
- len += sprintf(temp + len, "\n");
- //printf("query result:%s\n", temp);
- memcpy(databuf + totalLen, temp, len);
- totalLen += len;
- }
-
- if (fp) fprintf(fp, "%s", databuf);
- tmfclose(fp);
- free(databuf);
-}
-
-static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) {
- TAOS_RES *res = taos_query(taos, command);
- if (res == NULL || taos_errno(res) != 0) {
- printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res));
- taos_free_result(res);
- return;
- }
-
- getResult(res, resultFileName);
- taos_free_result(res);
-}
-
-double getCurrentTime() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) != 0) {
- perror("Failed to get current time in ms");
- return 0.0;
- }
-
- return tv.tv_sec + tv.tv_usec / 1E6;
-}
-
-static int32_t rand_bool(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 2;
-}
-
-static int32_t rand_tinyint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 128;
-}
-
-static int32_t rand_smallint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 32767;
-}
-
-static int32_t rand_int(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor];
-}
-
-static int64_t rand_bigint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randbigint[cursor];
-
-}
-
-static float rand_float(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randfloat[cursor];
-}
-
-static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
-void rand_string(char *str, int size) {
- str[0] = 0;
- if (size > 0) {
- //--size;
- int n;
- for (n = 0; n < size; n++) {
- int key = rand_tinyint() % (int)(sizeof(charset) - 1);
- str[n] = charset[key];
- }
- str[n] = 0;
- }
-}
-
-static double rand_double() {
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randdouble[cursor];
-
-}
-
-static void init_rand_data() {
- for (int i = 0; i < MAX_PREPARED_RAND; i++){
- randint[i] = (int)(rand() % 65535);
- randbigint[i] = (int64_t)(rand() % 2147483648);
- randfloat[i] = (float)(rand() / 1000.0);
- randdouble[i] = (double)(rand() / 1000000.0);
- }
-}
-
-static void printfInsertMeta() {
- printf("\033[1m\033[40;32m================ insert.json parse result START ================\033[0m\n");
- printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port);
- printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
- printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
- printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
- printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
- printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
-
- printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- printf("database[\033[33m%d\033[0m]:\n", i);
- printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName);
- if (0 == g_Dbs.db[i].drop) {
- printf(" drop: \033[33mno\033[0m\n");
- }else {
- printf(" drop: \033[33myes\033[0m\n");
- }
-
- if (g_Dbs.db[i].dbCfg.blocks > 0) {
- printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks);
- }
- if (g_Dbs.db[i].dbCfg.cache > 0) {
- printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache);
- }
- if (g_Dbs.db[i].dbCfg.days > 0) {
- printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days);
- }
- if (g_Dbs.db[i].dbCfg.keep > 0) {
- printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep);
- }
- if (g_Dbs.db[i].dbCfg.replica > 0) {
- printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica);
- }
- if (g_Dbs.db[i].dbCfg.update > 0) {
- printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update);
- }
- if (g_Dbs.db[i].dbCfg.minRows > 0) {
- printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows);
- }
- if (g_Dbs.db[i].dbCfg.maxRows > 0) {
- printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows);
- }
- if (g_Dbs.db[i].dbCfg.comp > 0) {
- printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp);
- }
- if (g_Dbs.db[i].dbCfg.walLevel > 0) {
- printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel);
- }
- if (g_Dbs.db[i].dbCfg.fsync > 0) {
- printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync);
- }
- if (g_Dbs.db[i].dbCfg.quorum > 0) {
- printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum);
- }
- if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
- if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
- printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision);
- } else {
- printf(" precision error: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision);
- exit(EXIT_FAILURE);
- }
- }
-
- printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount);
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- printf(" super table[\033[33m%d\033[0m]:\n", j);
-
- printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName);
-
- if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
- } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes");
- } else {
- printf(" autoCreateTable: \033[33m%s\033[0m\n", "error");
- }
-
- if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- printf(" childTblExists: \033[33m%s\033[0m\n", "no");
- } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- printf(" childTblExists: \033[33m%s\033[0m\n", "yes");
- } else {
- printf(" childTblExists: \033[33m%s\033[0m\n", "error");
- }
-
- printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount);
- printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix);
- printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource);
- printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode);
- printf(" insertRate: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].insertRate);
- printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows);
-
- if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
- printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
- }else {
- printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
- }
- printf(" numberOfTblInOneSql: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql);
- printf(" rowsPerTbl: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].rowsPerTbl);
- printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange);
- printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio);
- printf(" maxSqlLen: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
-
- printf(" timeStampStep: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep);
- printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp);
- printf(" sampleFormat: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFormat);
- printf(" sampleFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFile);
- printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile);
-
- printf(" columnCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].columnCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) {
- printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- } else {
- printf("column[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
- }
- }
- printf("\n");
-
- printf(" tagCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].tagCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) {
- printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- } else {
- printf("tag[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
- }
- }
- printf("\n");
- }
- printf("\n");
- }
- printf("\033[1m\033[40;32m================ insert.json parse result END================\033[0m\n");
-}
-
-static void printfInsertMetaToFile(FILE* fp) {
- fprintf(fp, "================ insert.json parse result START================\n");
- fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
- fprintf(fp, "user: %s\n", g_Dbs.user);
- fprintf(fp, "password: %s\n", g_Dbs.password);
- fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
- fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
- fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
-
- fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- fprintf(fp, "database[%d]:\n", i);
- fprintf(fp, " database name: %s\n", g_Dbs.db[i].dbName);
- if (0 == g_Dbs.db[i].drop) {
- fprintf(fp, " drop: no\n");
- }else {
- fprintf(fp, " drop: yes\n");
- }
-
- if (g_Dbs.db[i].dbCfg.blocks > 0) {
- fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks);
- }
- if (g_Dbs.db[i].dbCfg.cache > 0) {
- fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache);
- }
- if (g_Dbs.db[i].dbCfg.days > 0) {
- fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days);
- }
- if (g_Dbs.db[i].dbCfg.keep > 0) {
- fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep);
- }
- if (g_Dbs.db[i].dbCfg.replica > 0) {
- fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica);
- }
- if (g_Dbs.db[i].dbCfg.update > 0) {
- fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update);
- }
- if (g_Dbs.db[i].dbCfg.minRows > 0) {
- fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows);
- }
- if (g_Dbs.db[i].dbCfg.maxRows > 0) {
- fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows);
- }
- if (g_Dbs.db[i].dbCfg.comp > 0) {
- fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp);
- }
- if (g_Dbs.db[i].dbCfg.walLevel > 0) {
- fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel);
- }
- if (g_Dbs.db[i].dbCfg.fsync > 0) {
- fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync);
- }
- if (g_Dbs.db[i].dbCfg.quorum > 0) {
- fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum);
- }
- if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
- if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
- fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision);
- } else {
- fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision);
- }
- }
-
- fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount);
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- fprintf(fp, " super table[%d]:\n", j);
-
- fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName);
-
- if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- fprintf(fp, " autoCreateTable: %s\n", "no");
- } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- fprintf(fp, " autoCreateTable: %s\n", "yes");
- } else {
- fprintf(fp, " autoCreateTable: %s\n", "error");
- }
-
- if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- fprintf(fp, " childTblExists: %s\n", "no");
- } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- fprintf(fp, " childTblExists: %s\n", "yes");
- } else {
- fprintf(fp, " childTblExists: %s\n", "error");
- }
-
- fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount);
- fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix);
- fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource);
- fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode);
- fprintf(fp, " insertRate: %d\n", g_Dbs.db[i].superTbls[j].insertRate);
- fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows);
-
- if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
- fprintf(fp, " multiThreadWriteOneTbl: no\n");
- }else {
- fprintf(fp, " multiThreadWriteOneTbl: yes\n");
- }
- fprintf(fp, " numberOfTblInOneSql: %d\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql);
- fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl);
- fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
- fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
- fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
-
- fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep);
- fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp);
- fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat);
- fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile);
- fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile);
-
- fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) {
- fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- } else {
- fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
- }
- }
- fprintf(fp, "\n");
-
- fprintf(fp, " tagCount: %d\n ", g_Dbs.db[i].superTbls[j].tagCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) {
- fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- } else {
- fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
- }
- }
- fprintf(fp, "\n");
- }
- fprintf(fp, "\n");
- }
- fprintf(fp, "================ insert.json parse result END ================\n\n");
-}
-
-static void printfQueryMeta() {
- printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n");
- printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port);
- printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
- printf("password: \033[33m%s\033[0m\n", g_queryInfo.password);
- printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
-
- printf("\n");
- printf("specified table query info: \n");
- printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
- printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent);
- printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
-
- if (SUBSCRIBE_MODE == g_jsonType) {
- printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode);
- printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress);
- }
-
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]);
- }
- printf("\n");
- printf("super table query info: \n");
- printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate);
- printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount);
- printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName);
-
- if (SUBSCRIBE_MODE == g_jsonType) {
- printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode);
- printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress);
- }
-
- printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount);
- for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]);
- }
- printf("\n");
- printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n");
-}
-
-
-static char* xFormatTimestamp(char* buf, int64_t val, int precision) {
- time_t tt;
- if (precision == TSDB_TIME_PRECISION_MICRO) {
- tt = (time_t)(val / 1000000);
- } else {
- tt = (time_t)(val / 1000);
- }
-
-/* comment out as it make testcases like select_with_tags.sim fail.
- but in windows, this may cause the call to localtime crash if tt < 0,
- need to find a better solution.
- if (tt < 0) {
- tt = 0;
- }
- */
-
-#ifdef WINDOWS
- if (tt < 0) tt = 0;
-#endif
-
- struct tm* ptm = localtime(&tt);
- size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
-
- if (precision == TSDB_TIME_PRECISION_MICRO) {
- sprintf(buf + pos, ".%06d", (int)(val % 1000000));
- } else {
- sprintf(buf + pos, ".%03d", (int)(val % 1000));
- }
-
- return buf;
-}
-
-static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) {
- if (val == NULL) {
- fprintf(fp, "%s", TSDB_DATA_NULL_STR);
- return;
- }
-
- char buf[TSDB_MAX_BYTES_PER_ROW];
- switch (field->type) {
- case TSDB_DATA_TYPE_BOOL:
- fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- fprintf(fp, "%d", *((int8_t *)val));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- fprintf(fp, "%d", *((int16_t *)val));
- break;
- case TSDB_DATA_TYPE_INT:
- fprintf(fp, "%d", *((int32_t *)val));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- fprintf(fp, "%" PRId64, *((int64_t *)val));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
- break;
- case TSDB_DATA_TYPE_BINARY:
- case TSDB_DATA_TYPE_NCHAR:
- memcpy(buf, val, length);
- buf[length] = 0;
- fprintf(fp, "\'%s\'", buf);
- break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- xFormatTimestamp(buf, *(int64_t*)val, precision);
- fprintf(fp, "'%s'", buf);
- break;
- default:
- break;
- }
-}
-
-static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
- TAOS_ROW row = taos_fetch_row(tres);
- if (row == NULL) {
- return 0;
- }
-
- FILE* fp = fopen(fname, "at");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to open file: %s\n", fname);
- return -1;
- }
-
- int num_fields = taos_num_fields(tres);
- TAOS_FIELD *fields = taos_fetch_fields(tres);
- int precision = taos_result_precision(tres);
-
- for (int col = 0; col < num_fields; col++) {
- if (col > 0) {
- fprintf(fp, ",");
- }
- fprintf(fp, "%s", fields[col].name);
- }
- fputc('\n', fp);
-
- int numOfRows = 0;
- do {
- int32_t* length = taos_fetch_lengths(tres);
- for (int i = 0; i < num_fields; i++) {
- if (i > 0) {
- fputc(',', fp);
- }
- xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision);
- }
- fputc('\n', fp);
-
- numOfRows++;
- row = taos_fetch_row(tres);
- } while( row != NULL);
-
- fclose(fp);
-
- return numOfRows;
-}
-
-static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
- TAOS_RES * res;
- TAOS_ROW row = NULL;
- int count = 0;
-
- res = taos_query(taos, "show databases;");
- int32_t code = taos_errno(res);
-
- if (code != 0) {
- fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res));
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
-
- while ((row = taos_fetch_row(res)) != NULL) {
- // sys database name : 'log'
- if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue;
-
- dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (dbInfos[count] == NULL) {
- fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count);
- return -1;
- }
-
- strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
- xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI);
- dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
-
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
- dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
-
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
- dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
-
- count++;
- if (count > MAX_DATABASE_COUNT) {
- fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT);
- break;
- }
- }
-
- return count;
-}
-
-static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) {
- FILE *fp = NULL;
- if (filename[0] != 0) {
- fp = fopen(filename, "at");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file: %s\n", filename);
- return;
- }
- }
-
- fprintf(fp, "================ database[%d] ================\n", index);
- fprintf(fp, "name: %s\n", dbInfos->name);
- fprintf(fp, "created_time: %s\n", dbInfos->create_time);
- fprintf(fp, "ntables: %d\n", dbInfos->ntables);
- fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
- fprintf(fp, "replica: %d\n", dbInfos->replica);
- fprintf(fp, "quorum: %d\n", dbInfos->quorum);
- fprintf(fp, "days: %d\n", dbInfos->days);
- fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist);
- fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
- fprintf(fp, "blocks: %d\n", dbInfos->blocks);
- fprintf(fp, "minrows: %d\n", dbInfos->minrows);
- fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
- fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
- fprintf(fp, "fsync: %d\n", dbInfos->fsync);
- fprintf(fp, "comp: %d\n", dbInfos->comp);
- fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
- fprintf(fp, "precision: %s\n", dbInfos->precision);
- fprintf(fp, "update: %d\n", dbInfos->update);
- fprintf(fp, "status: %s\n", dbInfos->status);
- fprintf(fp, "\n");
-
- fclose(fp);
-}
-
-static void printfQuerySystemInfo(TAOS * taos) {
- char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
- char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
- TAOS_RES* res;
-
- time_t t;
- struct tm* lt;
- time(&t);
- lt = localtime(&t);
- snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec);
-
- // show variables
- res = taos_query(taos, "show variables;");
- //getResult(res, filename);
- xDumpResultToFile(filename, res);
-
- // show dnodes
- res = taos_query(taos, "show dnodes;");
- xDumpResultToFile(filename, res);
- //getResult(res, filename);
-
- // show databases
- res = taos_query(taos, "show databases;");
- SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
- if (dbInfos == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return;
- }
- int dbCount = getDbFromServer(taos, dbInfos);
- if (dbCount <= 0) return;
-
- for (int i = 0; i < dbCount; i++) {
- // printf database info
- printfDbInfoForQueryToFile(filename, dbInfos[i], i);
-
- // show db.vgroups
- snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
- res = taos_query(taos, buffer);
- xDumpResultToFile(filename, res);
-
- // show db.stables
- snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
- res = taos_query(taos, buffer);
- xDumpResultToFile(filename, res);
-
- free(dbInfos[i]);
- }
-
- free(dbInfos);
-
-}
-
-
-#ifdef TD_LOWA_CURL
-static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp)
-{
- size_t realsize = size * nmemb;
- curlMemInfo* mem = (curlMemInfo*)userp;
-
- char *ptr = realloc(mem->buf, mem->sizeleft + realsize + 1);
- if(ptr == NULL) {
- /* out of memory! */
- printf("not enough memory (realloc returned NULL)\n");
- return 0;
- }
-
- mem->buf = ptr;
- memcpy(&(mem->buf[mem->sizeleft]), contents, realsize);
- mem->sizeleft += realsize;
- mem->buf[mem->sizeleft] = 0;
-
- //printf("result:%s\n\n", mem->buf);
-
- return realsize;
-}
-
-void curlProceLogin(void)
-{
- CURL *curl_handle;
- CURLcode res;
-
- curlMemInfo chunk;
-
- chunk.buf = malloc(1); /* will be grown as needed by the realloc above */
- chunk.sizeleft = 0; /* no data at this point */
-
- //curl_global_init(CURL_GLOBAL_ALL);
-
- /* init the curl session */
- curl_handle = curl_easy_init();
-
- curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,"");
- curl_easy_setopt(curl_handle, CURLOPT_POST, 1);
-
- char dstUrl[128] = {0};
- snprintf(dstUrl, 128, "http://%s:6041/rest/login/root/taosdata", g_Dbs.host);
-
- /* specify URL to get */
- curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl);
-
- /* send all data to this function */
- curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback);
-
- /* we pass our 'chunk' struct to the callback function */
- curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk);
-
- /* do it! */
- res = curl_easy_perform(curl_handle);
-
- /* check for errors */
- if(res != CURLE_OK) {
- fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
- }
- else {
- //printf("response len:%lu, content: %s \n", (unsigned long)chunk.sizeleft, chunk.buf);
- ;
- }
-
- /* cleanup curl stuff */
- curl_easy_cleanup(curl_handle);
-
- free(chunk.buf);
-
- /* we're done with libcurl, so clean it up */
- //curl_global_cleanup();
-
- return;
-}
-
-int curlProceSql(char* host, uint16_t port, char* sqlstr, CURL *curl_handle)
-{
- //curlProceLogin();
-
- //CURL *curl_handle;
- CURLcode res;
-
- curlMemInfo chunk;
-
- chunk.buf = malloc(1); /* will be grown as needed by the realloc above */
- chunk.sizeleft = 0; /* no data at this point */
-
-
- char dstUrl[128] = {0};
- snprintf(dstUrl, 128, "http://%s:%u/rest/sql", host, port+TSDB_PORT_HTTP);
-
- //curl_global_init(CURL_GLOBAL_ALL);
-
- /* init the curl session */
- //curl_handle = curl_easy_init();
-
- //curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,"");
- curl_easy_setopt(curl_handle, CURLOPT_POST, 1L);
-
- /* specify URL to get */
- curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl);
-
- /* enable TCP keep-alive for this transfer */
- curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPALIVE, 1L);
- /* keep-alive idle time to 120 seconds */
- curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPIDLE, 120L);
- /* interval time between keep-alive probes: 60 seconds */
- curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPINTVL, 60L);
-
- /* send all data to this function */
- curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback);
-
- /* we pass our 'chunk' struct to the callback function */
- curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk);
-
- struct curl_slist *list = NULL;
- list = curl_slist_append(list, "Authorization: Basic cm9vdDp0YW9zZGF0YQ==");
- curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list);
- curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list);
-
- /* Set the expected upload size. */
- curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)strlen(sqlstr));
- curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, sqlstr);
-
- /* get it! */
- res = curl_easy_perform(curl_handle);
-
- /* check for errors */
- if(res != CURLE_OK) {
- fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
- return -1;
- }
- else {
- /* curl_easy_perform() block end and return result */
- //printf("[%32.32s] sql response len:%lu, content: %s \n\n", sqlstr, (unsigned long)chunk.sizeleft, chunk.buf);
- ;
- }
-
- curl_slist_free_all(list); /* free the list again */
-
- /* cleanup curl stuff */
- //curl_easy_cleanup(curl_handle);
-
- free(chunk.buf);
-
- /* we're done with libcurl, so clean it up */
- //curl_global_cleanup();
-
- return 0;
-}
-#endif
-
-char* getTagValueFromTagSample( SSuperTable* stbInfo, int tagUsePos) {
- char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
- if (NULL == dataBuf) {
- printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
- return NULL;
- }
-
- int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
-
- return dataBuf;
-}
-
-char* generateTagVaulesForStb(SSuperTable* stbInfo) {
- char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
- if (NULL == dataBuf) {
- printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
- return NULL;
- }
-
- int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(");
- for (int i = 0; i < stbInfo->tagCount; i++) {
- if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", 5))) {
- if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) {
- printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN);
- tmfree(dataBuf);
- return NULL;
- }
-
- char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1);
- if (NULL == buf) {
- printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen);
- tmfree(dataBuf);
- return NULL;
- }
- rand_string(buf, stbInfo->tags[i].dataLen);
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf);
- tmfree(buf);
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", 3)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_int());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", 6)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", 5)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_float());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", 6)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_double());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", 8)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_smallint());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", 7)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_tinyint());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", 4)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_bool());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", 4)) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint());
- } else {
- printf("No support data type: %s\n", stbInfo->tags[i].dataType);
- tmfree(dataBuf);
- return NULL;
- }
- }
- dataLen -= 2;
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")");
- return dataBuf;
-}
-
-static int calcRowLen(SSuperTable* superTbls) {
- int colIndex;
- int lenOfOneRow = 0;
-
- for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
- char* dataType = superTbls->columns[colIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- lenOfOneRow += 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- lenOfOneRow += 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- lenOfOneRow += 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- lenOfOneRow += 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- lenOfOneRow += 42;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- lenOfOneRow += 21;
- } else {
- printf("get error data type : %s\n", dataType);
- exit(-1);
- }
- }
-
- superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
-
- int tagIndex;
- int lenOfTagOfOneRow = 0;
- for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
- char* dataType = superTbls->tags[tagIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42;
- } else {
- printf("get error tag type : %s\n", dataType);
- exit(-1);
- }
- }
-
- superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
-
- return 0;
-}
-
-
-static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int* childTblCountOfSuperTbl) {
- char command[BUFFER_SIZE] = "\0";
- TAOS_RES * res;
- TAOS_ROW row = NULL;
-
- char* childTblName = *childTblNameOfSuperTbl;
-
- //get all child table name use cmd: select tbname from superTblName;
- snprintf(command, BUFFER_SIZE, "select tbname from %s.%s", dbName, sTblName);
- res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- printf("failed to run command %s\n", command);
- taos_free_result(res);
- taos_close(taos);
- exit(-1);
- }
-
- int childTblCount = 10000;
- int count = 0;
- childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
- char* pTblName = childTblName;
- while ((row = taos_fetch_row(res)) != NULL) {
- int32_t* len = taos_fetch_lengths(res);
- strncpy(pTblName, (char *)row[0], len[0]);
- //printf("==== sub table name: %s\n", pTblName);
- count++;
- if (count >= childTblCount - 1) {
- char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
- if (tmp != NULL) {
- childTblName = tmp;
- childTblCount = (int)(childTblCount*1.5);
- memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
- } else {
- // exit, if allocate more memory failed
- printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName);
- tmfree(childTblName);
- taos_free_result(res);
- taos_close(taos);
- exit(-1);
- }
- }
- pTblName = childTblName + count * TSDB_TABLE_NAME_LEN;
- }
-
- *childTblCountOfSuperTbl = count;
- *childTblNameOfSuperTbl = childTblName;
-
- taos_free_result(res);
- return 0;
-}
-
-static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) {
- char command[BUFFER_SIZE] = "\0";
- TAOS_RES * res;
- TAOS_ROW row = NULL;
- int count = 0;
-
- //get schema use cmd: describe superTblName;
- snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName);
- res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- printf("failed to run command %s\n", command);
- taos_free_result(res);
- return -1;
- }
-
- int tagIndex = 0;
- int columnIndex = 0;
- TAOS_FIELD *fields = taos_fetch_fields(res);
- while ((row = taos_fetch_row(res)) != NULL) {
- if (0 == count) {
- count++;
- continue;
- }
-
- if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
- strncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- strncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
- superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- strncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
- tagIndex++;
- } else {
- strncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- strncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
- superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- strncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
- columnIndex++;
- }
- count++;
- }
-
- superTbls->columnCount = columnIndex;
- superTbls->tagCount = tagIndex;
- taos_free_result(res);
-
- calcRowLen(superTbls);
-
- if (TBL_ALREADY_EXISTS == superTbls->childTblExists) {
- //get all child table name use cmd: select tbname from superTblName;
- getAllChildNameOfSuperTable(taos, dbName, superTbls->sTblName, &superTbls->childTblName, &superTbls->childTblCount);
- }
- return 0;
-}
-
-static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) {
- char command[BUFFER_SIZE] = "\0";
-
- char cols[STRING_LEN] = "\0";
- int colIndex;
- int len = 0;
-
- int lenOfOneRow = 0;
- for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
- char* dataType = superTbls->columns[colIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", superTbls->columns[colIndex].dataLen);
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "NCHAR", superTbls->columns[colIndex].dataLen);
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT");
- lenOfOneRow += 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT");
- lenOfOneRow += 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT");
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT");
- lenOfOneRow += 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL");
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT");
- lenOfOneRow += 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE");
- lenOfOneRow += 42;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP");
- lenOfOneRow += 21;
- } else {
- taos_close(taos);
- printf("config error data type : %s\n", dataType);
- exit(-1);
- }
- }
-
- superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
- //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow);
-
- // save for creating child table
- superTbls->colsOfCreatChildTable = (char*)calloc(len+20, 1);
- if (NULL == superTbls->colsOfCreatChildTable) {
- printf("Failed when calloc, size:%d", len+1);
- taos_close(taos);
- exit(-1);
- }
- snprintf(superTbls->colsOfCreatChildTable, len+20, "(ts timestamp%s)", cols);
-
- if (use_metric) {
- char tags[STRING_LEN] = "\0";
- int tagIndex;
- len = 0;
-
- int lenOfTagOfOneRow = 0;
- len += snprintf(tags + len, STRING_LEN - len, "(");
- for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
- char* dataType = superTbls->tags[tagIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "BINARY", superTbls->tags[tagIndex].dataLen);
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "NCHAR", superTbls->tags[tagIndex].dataLen);
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "INT");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BIGINT");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "SMALLINT");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "TINYINT");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BOOL");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE");
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42;
- } else {
- taos_close(taos);
- printf("config error tag type : %s\n", dataType);
- exit(-1);
- }
- }
- len -= 2;
- len += snprintf(tags + len, STRING_LEN - len, ")");
-
- superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
-
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbls->sTblName, cols, tags);
- if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
- return -1;
- }
- printf("\ncreate supertable %s success!\n\n", superTbls->sTblName);
- }
- return 0;
-}
-
-
-static int createDatabases() {
- TAOS * taos = NULL;
- int ret = 0;
- if (taos_init()) {
- fprintf(stderr, "Failed to init taos\n");
- exit(-1);
- }
-
- taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
- if (taos == NULL) {
- fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
- exit(-1);
- }
- char command[BUFFER_SIZE] = "\0";
-
-
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- if (g_Dbs.db[i].drop) {
- sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
- if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
- taos_close(taos);
- return -1;
- }
- }
-
- int dataLen = 0;
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "create database if not exists %s ", g_Dbs.db[i].dbName);
-
- if (g_Dbs.db[i].dbCfg.blocks > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "blocks %d ", g_Dbs.db[i].dbCfg.blocks);
- }
- if (g_Dbs.db[i].dbCfg.cache > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "cache %d ", g_Dbs.db[i].dbCfg.cache);
- }
- if (g_Dbs.db[i].dbCfg.days > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "days %d ", g_Dbs.db[i].dbCfg.days);
- }
- if (g_Dbs.db[i].dbCfg.keep > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "keep %d ", g_Dbs.db[i].dbCfg.keep);
- }
- if (g_Dbs.db[i].dbCfg.replica > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "replica %d ", g_Dbs.db[i].dbCfg.replica);
- }
- if (g_Dbs.db[i].dbCfg.update > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "update %d ", g_Dbs.db[i].dbCfg.update);
- }
- //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
- // dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode);
- //}
- if (g_Dbs.db[i].dbCfg.minRows > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "minrows %d ", g_Dbs.db[i].dbCfg.minRows);
- }
- if (g_Dbs.db[i].dbCfg.maxRows > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "maxrows %d ", g_Dbs.db[i].dbCfg.maxRows);
- }
- if (g_Dbs.db[i].dbCfg.comp > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "comp %d ", g_Dbs.db[i].dbCfg.comp);
- }
- if (g_Dbs.db[i].dbCfg.walLevel > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "wal %d ", g_Dbs.db[i].dbCfg.walLevel);
- }
- if (g_Dbs.db[i].dbCfg.fsync > 0) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "fsync %d ", g_Dbs.db[i].dbCfg.fsync);
- }
- if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
- dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
- }
-
- if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
- taos_close(taos);
- return -1;
- }
- printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
-
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- // describe super table, if exists
- sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
- if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
- g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS;
- ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric);
- } else {
- g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS;
- ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]);
- }
-
- if (0 != ret) {
- taos_close(taos);
- return -1;
- }
- }
- }
-
- taos_close(taos);
- return 0;
-}
-
-
-void * createTable(void *sarg)
-{
- threadInfo *winfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = winfo->superTblInfo;
-
- int64_t lastPrintTime = taosGetTimestampMs();
-
- char* buffer = calloc(superTblInfo->maxSqlLen, 1);
-
- int len = 0;
- int batchNum = 0;
- //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
- for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
- if (0 == g_Dbs.use_metric) {
- snprintf(buffer, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable);
- } else {
- if (0 == len) {
- batchNum = 0;
- memset(buffer, 0, superTblInfo->maxSqlLen);
- len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "create table ");
- }
-
- char* tagsValBuf = NULL;
- if (0 == superTblInfo->tagSource) {
- tagsValBuf = generateTagVaulesForStb(superTblInfo);
- } else {
- tagsValBuf = getTagValueFromTagSample(superTblInfo, i % superTblInfo->tagSampleCount);
- }
- if (NULL == tagsValBuf) {
- free(buffer);
- return NULL;
- }
-
- len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
- free(tagsValBuf);
- batchNum++;
-
- if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) {
- continue;
- }
- }
-
- len = 0;
- if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){
- free(buffer);
- return NULL;
- }
-
- int64_t currentPrintTime = taosGetTimestampMs();
- if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] already create %d - %d tables\n", winfo->threadID, winfo->start_table_id, i);
- lastPrintTime = currentPrintTime;
- }
- }
-
- if (0 != len) {
- (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE);
- }
-
- free(buffer);
- return NULL;
-}
-
-void startMultiThreadCreateChildTable(char* cols, int threads, int ntables, char* db_name, SSuperTable* superTblInfo) {
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
- threadInfo *infos = malloc(threads * sizeof(threadInfo));
-
- if ((NULL == pids) || (NULL == infos)) {
- printf("malloc failed\n");
- exit(-1);
- }
-
- if (threads < 1) {
- threads = 1;
- }
-
- int a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
-
- int b = 0;
- b = ntables % threads;
-
- int last = 0;
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infos + i;
- t_info->threadID = i;
- tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
- t_info->superTblInfo = superTblInfo;
- t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
- t_info->start_table_id = last;
- t_info->end_table_id = i < b ? last + a : last + a - 1;
- last = t_info->end_table_id + 1;
- t_info->use_metric = 1;
- t_info->cols = cols;
- pthread_create(pids + i, NULL, createTable, t_info);
- }
-
- for (int i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
-
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infos + i;
- taos_close(t_info->taos);
- }
-
- free(pids);
- free(infos);
-}
-
-
-static void createChildTables() {
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
- continue;
- }
- startMultiThreadCreateChildTable(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable, g_Dbs.threadCountByCreateTbl, g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
- g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
- }
- }
-}
-
-/*
-static int taosGetLineNum(const char *fileName)
-{
- int lineNum = 0;
- char cmd[1024] = { 0 };
- char buf[1024] = { 0 };
- sprintf(cmd, "wc -l %s", fileName);
-
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- return lineNum;
- }
-
- if (fgets(buf, sizeof(buf), fp)) {
- int index = strchr((const char*)buf, ' ') - buf;
- buf[index] = '\0';
- lineNum = atoi(buf);
- }
- pclose(fp);
- return lineNum;
-}
-*/
-
-/*
- Read 10000 lines at most. If more than 10000 lines, continue to read after using
-*/
-int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
- size_t n = 0;
- ssize_t readLen = 0;
- char * line = NULL;
-
- FILE *fp = fopen(superTblInfo->tagsFile, "r");
- if (fp == NULL) {
- printf("Failed to open tags file: %s, reason:%s\n", superTblInfo->tagsFile, strerror(errno));
- return -1;
- }
-
- if (superTblInfo->tagDataBuf) {
- free(superTblInfo->tagDataBuf);
- superTblInfo->tagDataBuf = NULL;
- }
-
- int tagCount = 10000;
- int count = 0;
- char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount);
- if (tagDataBuf == NULL) {
- printf("Failed to calloc, reason:%s\n", strerror(errno));
- fclose(fp);
- return -1;
- }
-
- while ((readLen = getline(&line, &n, fp)) != -1) {
- if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
- line[--readLen] = 0;
- }
-
- if (readLen == 0) {
- continue;
- }
-
- memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen);
- count++;
-
- if (count >= tagCount - 1) {
- char *tmp = realloc(tagDataBuf, (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
- if (tmp != NULL) {
- tagDataBuf = tmp;
- tagCount = (int)(tagCount*1.5);
- memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
- } else {
- // exit, if allocate more memory failed
- printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile);
- tmfree(tagDataBuf);
- free(line);
- fclose(fp);
- return -1;
- }
- }
- }
-
- superTblInfo->tagDataBuf = tagDataBuf;
- superTblInfo->tagSampleCount = count;
-
- free(line);
- fclose(fp);
- return 0;
-}
-
-int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
- // TODO
- return 0;
-}
-
-
-/*
- Read 10000 lines at most. If more than 10000 lines, continue to read after using
-*/
-int readSampleFromCsvFileToMem(FILE *fp, SSuperTable* superTblInfo, char* sampleBuf) {
- size_t n = 0;
- ssize_t readLen = 0;
- char * line = NULL;
- int getRows = 0;
-
- memset(sampleBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE* superTblInfo->lenOfOneRow);
- while (1) {
- readLen = getline(&line, &n, fp);
- if (-1 == readLen) {
- if(0 != fseek(fp, 0, SEEK_SET)) {
- printf("Failed to fseek file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno));
- return -1;
- }
- continue;
- }
-
- if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
- line[--readLen] = 0;
- }
-
- if (readLen == 0) {
- continue;
- }
-
- if (readLen > superTblInfo->lenOfOneRow) {
- printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow);
- continue;
- }
-
- memcpy(sampleBuf + getRows * superTblInfo->lenOfOneRow, line, readLen);
- getRows++;
-
- if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
- break;
- }
- }
-
- tmfree(line);
- return 0;
-}
-
-/*
-void readSampleFromFileToMem(SSuperTable * supterTblInfo) {
- int ret;
- if (0 == strncasecmp(supterTblInfo->sampleFormat, "csv", 3)) {
- ret = readSampleFromCsvFileToMem(supterTblInfo);
- } else if (0 == strncasecmp(supterTblInfo->sampleFormat, "json", 4)) {
- ret = readSampleFromJsonFileToMem(supterTblInfo);
- }
-
- if (0 != ret) {
- exit(-1);
- }
-}
-*/
-static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) {
- bool ret = false;
-
- // columns
- cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
- if (columns && columns->type != cJSON_Array) {
- printf("failed to read json, columns not found\n");
- goto PARSE_OVER;
- } else if (NULL == columns) {
- superTbls->columnCount = 0;
- superTbls->tagCount = 0;
- return true;
- }
-
- int columnSize = cJSON_GetArraySize(columns);
- if (columnSize > MAX_COLUMN_COUNT) {
- printf("failed to read json, column size overflow, max column size is %d\n", MAX_COLUMN_COUNT);
- goto PARSE_OVER;
- }
-
- int count = 1;
- int index = 0;
- StrColumn columnCase;
-
- //superTbls->columnCount = columnSize;
- for (int k = 0; k < columnSize; ++k) {
- cJSON* column = cJSON_GetArrayItem(columns, k);
- if (column == NULL) continue;
-
- count = 1;
- cJSON* countObj = cJSON_GetObjectItem(column, "count");
- if (countObj && countObj->type == cJSON_Number) {
- count = countObj->valueint;
- } else if (countObj && countObj->type != cJSON_Number) {
- printf("failed to read json, column count not found");
- goto PARSE_OVER;
- } else {
- count = 1;
- }
-
- // column info
- memset(&columnCase, 0, sizeof(StrColumn));
- cJSON *dataType = cJSON_GetObjectItem(column, "type");
- if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) {
- printf("failed to read json, column type not found");
- goto PARSE_OVER;
- }
- //strncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
- strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
-
- cJSON* dataLen = cJSON_GetObjectItem(column, "len");
- if (dataLen && dataLen->type == cJSON_Number) {
- columnCase.dataLen = dataLen->valueint;
- } else if (dataLen && dataLen->type != cJSON_Number) {
- printf("failed to read json, column len not found");
- goto PARSE_OVER;
- } else {
- columnCase.dataLen = 8;
- }
-
- for (int n = 0; n < count; ++n) {
- strncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
- superTbls->columns[index].dataLen = columnCase.dataLen;
- index++;
- }
- }
- superTbls->columnCount = index;
-
- count = 1;
- index = 0;
- // tags
- cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
- if (!tags || tags->type != cJSON_Array) {
- printf("failed to read json, tags not found");
- goto PARSE_OVER;
- }
-
- int tagSize = cJSON_GetArraySize(tags);
- if (tagSize > MAX_TAG_COUNT) {
- printf("failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT);
- goto PARSE_OVER;
- }
-
- //superTbls->tagCount = tagSize;
- for (int k = 0; k < tagSize; ++k) {
- cJSON* tag = cJSON_GetArrayItem(tags, k);
- if (tag == NULL) continue;
-
- count = 1;
- cJSON* countObj = cJSON_GetObjectItem(tag, "count");
- if (countObj && countObj->type == cJSON_Number) {
- count = countObj->valueint;
- } else if (countObj && countObj->type != cJSON_Number) {
- printf("failed to read json, column count not found");
- goto PARSE_OVER;
- } else {
- count = 1;
- }
-
- // column info
- memset(&columnCase, 0, sizeof(StrColumn));
- cJSON *dataType = cJSON_GetObjectItem(tag, "type");
- if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) {
- printf("failed to read json, tag type not found");
- goto PARSE_OVER;
- }
- strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
-
- cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
- if (dataLen && dataLen->type == cJSON_Number) {
- columnCase.dataLen = dataLen->valueint;
- } else if (dataLen && dataLen->type != cJSON_Number) {
- printf("failed to read json, column len not found");
- goto PARSE_OVER;
- } else {
- columnCase.dataLen = 0;
- }
-
- for (int n = 0; n < count; ++n) {
- strncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
- superTbls->tags[index].dataLen = columnCase.dataLen;
- index++;
- }
- }
- superTbls->tagCount = index;
-
- ret = true;
-
-PARSE_OVER:
- //free(content);
- //cJSON_Delete(root);
- //fclose(fp);
- return ret;
-}
-
-static bool getMetaFromInsertJsonFile(cJSON* root) {
- bool ret = false;
-
- cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
- if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
- strncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
- }
-
- cJSON* host = cJSON_GetObjectItem(root, "host");
- if (host && host->type == cJSON_String && host->valuestring != NULL) {
- strncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE);
- } else if (!host) {
- strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, host not found\n");
- goto PARSE_OVER;
- }
-
- cJSON* port = cJSON_GetObjectItem(root, "port");
- if (port && port->type == cJSON_Number) {
- g_Dbs.port = port->valueint;
- } else if (!port) {
- g_Dbs.port = 6030;
- }
-
- cJSON* user = cJSON_GetObjectItem(root, "user");
- if (user && user->type == cJSON_String && user->valuestring != NULL) {
- strncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE);
- } else if (!user) {
- strncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE);
- }
-
- cJSON* password = cJSON_GetObjectItem(root, "password");
- if (password && password->type == cJSON_String && password->valuestring != NULL) {
- strncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE);
- } else if (!password) {
- strncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE);
- }
-
- cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
- if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) {
- strncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN);
- } else if (!resultfile) {
- strncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN);
- }
-
- cJSON* threads = cJSON_GetObjectItem(root, "thread_count");
- if (threads && threads->type == cJSON_Number) {
- g_Dbs.threadCount = threads->valueint;
- } else if (!threads) {
- g_Dbs.threadCount = 1;
- } else {
- printf("failed to read json, threads not found");
- goto PARSE_OVER;
- }
-
- cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl");
- if (threads2 && threads2->type == cJSON_Number) {
- g_Dbs.threadCountByCreateTbl = threads2->valueint;
- } else if (!threads2) {
- g_Dbs.threadCountByCreateTbl = 1;
- } else {
- printf("failed to read json, threads2 not found");
- goto PARSE_OVER;
- }
-
- cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
- if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) {
- if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
- g_args.answer_yes = false;
- } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
- g_args.answer_yes = true;
- } else {
- g_args.answer_yes = false;
- }
- } else if (!answerPrompt) {
- g_args.answer_yes = false;
- } else {
- printf("failed to read json, confirm_parameter_prompt not found");
- goto PARSE_OVER;
- }
-
- cJSON* dbs = cJSON_GetObjectItem(root, "databases");
- if (!dbs || dbs->type != cJSON_Array) {
- printf("failed to read json, databases not found\n");
- goto PARSE_OVER;
- }
-
- int dbSize = cJSON_GetArraySize(dbs);
- if (dbSize > MAX_DB_COUNT) {
- printf("failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT);
- goto PARSE_OVER;
- }
-
- g_Dbs.dbCount = dbSize;
- for (int i = 0; i < dbSize; ++i) {
- cJSON* dbinfos = cJSON_GetArrayItem(dbs, i);
- if (dbinfos == NULL) continue;
-
- // dbinfo
- cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo");
- if (!dbinfo || dbinfo->type != cJSON_Object) {
- printf("failed to read json, dbinfo not found");
- goto PARSE_OVER;
- }
-
- cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name");
- if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) {
- printf("failed to read json, db name not found");
- goto PARSE_OVER;
- }
- strncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
-
- cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
- if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
- if (0 == strncasecmp(drop->valuestring, "yes", 3)) {
- g_Dbs.db[i].drop = 1;
- } else {
- g_Dbs.db[i].drop = 0;
- }
- } else if (!drop) {
- g_Dbs.db[i].drop = 0;
- } else {
- printf("failed to read json, drop not found");
- goto PARSE_OVER;
- }
-
- cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision");
- if (precision && precision->type == cJSON_String && precision->valuestring != NULL) {
- strncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE);
- } else if (!precision) {
- //strncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
- memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, precision not found");
- goto PARSE_OVER;
- }
-
- cJSON* update = cJSON_GetObjectItem(dbinfo, "update");
- if (update && update->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.update = update->valueint;
- } else if (!update) {
- g_Dbs.db[i].dbCfg.update = -1;
- } else {
- printf("failed to read json, update not found");
- goto PARSE_OVER;
- }
-
- cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica");
- if (replica && replica->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.replica = replica->valueint;
- } else if (!replica) {
- g_Dbs.db[i].dbCfg.replica = -1;
- } else {
- printf("failed to read json, replica not found");
- goto PARSE_OVER;
- }
-
- cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep");
- if (keep && keep->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.keep = keep->valueint;
- } else if (!keep) {
- g_Dbs.db[i].dbCfg.keep = -1;
- } else {
- printf("failed to read json, keep not found");
- goto PARSE_OVER;
- }
-
- cJSON* days = cJSON_GetObjectItem(dbinfo, "days");
- if (days && days->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.days = days->valueint;
- } else if (!days) {
- g_Dbs.db[i].dbCfg.days = -1;
- } else {
- printf("failed to read json, days not found");
- goto PARSE_OVER;
- }
-
- cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache");
- if (cache && cache->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.cache = cache->valueint;
- } else if (!cache) {
- g_Dbs.db[i].dbCfg.cache = -1;
- } else {
- printf("failed to read json, cache not found");
- goto PARSE_OVER;
- }
-
- cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks");
- if (blocks && blocks->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.blocks = blocks->valueint;
- } else if (!blocks) {
- g_Dbs.db[i].dbCfg.blocks = -1;
- } else {
- printf("failed to read json, block not found");
- goto PARSE_OVER;
- }
-
- //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode");
- //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) {
- // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint;
- //} else if (!maxtablesPerVnode) {
- // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES;
- //} else {
- // printf("failed to read json, maxtablesPerVnode not found");
- // goto PARSE_OVER;
- //}
-
- cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows");
- if (minRows && minRows->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
- } else if (!minRows) {
- g_Dbs.db[i].dbCfg.minRows = -1;
- } else {
- printf("failed to read json, minRows not found");
- goto PARSE_OVER;
- }
-
- cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows");
- if (maxRows && maxRows->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
- } else if (!maxRows) {
- g_Dbs.db[i].dbCfg.maxRows = -1;
- } else {
- printf("failed to read json, maxRows not found");
- goto PARSE_OVER;
- }
-
- cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp");
- if (comp && comp->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.comp = comp->valueint;
- } else if (!comp) {
- g_Dbs.db[i].dbCfg.comp = -1;
- } else {
- printf("failed to read json, comp not found");
- goto PARSE_OVER;
- }
-
- cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel");
- if (walLevel && walLevel->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint;
- } else if (!walLevel) {
- g_Dbs.db[i].dbCfg.walLevel = -1;
- } else {
- printf("failed to read json, walLevel not found");
- goto PARSE_OVER;
- }
-
- cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum");
- if (quorum && quorum->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.quorum = quorum->valueint;
- } else if (!quorum) {
- g_Dbs.db[i].dbCfg.quorum = -1;
- } else {
- printf("failed to read json, walLevel not found");
- goto PARSE_OVER;
- }
-
- cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync");
- if (fsync && fsync->type == cJSON_Number) {
- g_Dbs.db[i].dbCfg.fsync = fsync->valueint;
- } else if (!fsync) {
- g_Dbs.db[i].dbCfg.fsync = -1;
- } else {
- printf("failed to read json, fsync not found");
- goto PARSE_OVER;
- }
-
- // super_talbes
- cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
- if (!stables || stables->type != cJSON_Array) {
- printf("failed to read json, super_tables not found");
- goto PARSE_OVER;
- }
-
- int stbSize = cJSON_GetArraySize(stables);
- if (stbSize > MAX_SUPER_TABLE_COUNT) {
- printf("failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT);
- goto PARSE_OVER;
- }
-
- g_Dbs.db[i].superTblCount = stbSize;
- for (int j = 0; j < stbSize; ++j) {
- cJSON* stbInfo = cJSON_GetArrayItem(stables, j);
- if (stbInfo == NULL) continue;
-
- // dbinfo
- cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
- if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) {
- printf("failed to read json, stb name not found");
- goto PARSE_OVER;
- }
- strncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE);
-
- cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
- if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
- printf("failed to read json, childtable_prefix not found");
- goto PARSE_OVER;
- }
- strncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE);
-
- cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null
- if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) {
- if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) {
- g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL;
- } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) {
- g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
- } else {
- g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
- }
- } else if (!autoCreateTbl) {
- g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
- } else {
- printf("failed to read json, auto_create_table not found");
- goto PARSE_OVER;
- }
-
- cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
- if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
- } else if (!batchCreateTbl) {
- g_Dbs.db[i].superTbls[j].batchCreateTableNum = 2000;
- } else {
- printf("failed to read json, batch_create_tbl_num not found");
- goto PARSE_OVER;
- }
-
- cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
- if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) {
- if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) {
- g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
- } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) {
- g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
- } else {
- g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
- }
- } else if (!childTblExists) {
- g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
- } else {
- printf("failed to read json, child_table_exists not found");
- goto PARSE_OVER;
- }
-
- cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
- if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- printf("failed to read json, childtable_count not found");
- goto PARSE_OVER;
- }
- g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
-
- cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
- if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) {
- strncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE);
- } else if (!dataSource) {
- strncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, data_source not found");
- goto PARSE_OVER;
- }
-
- cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful
- if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) {
- strncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE);
- #ifndef TD_LOWA_CURL
- if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 7)) {
- printf("There no libcurl, so no support resetful test! please use taosc mode.\n");
- goto PARSE_OVER;
- }
- #endif
- } else if (!insertMode) {
- strncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, insert_mode not found");
- goto PARSE_OVER;
- }
-
- cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
- if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
- strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE);
- } else if (!ts) {
- strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, start_timestamp not found");
- goto PARSE_OVER;
- }
-
- cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step");
- if (timestampStep && timestampStep->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint;
- } else if (!timestampStep) {
- g_Dbs.db[i].superTbls[j].timeStampStep = 1000;
- } else {
- printf("failed to read json, timestamp_step not found");
- goto PARSE_OVER;
- }
-
- cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size");
- if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint;
- if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) {
- g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024;
- }
- } else if (!sampleDataBufSize) {
- g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024;
- } else {
- printf("failed to read json, sample_buf_size not found");
- goto PARSE_OVER;
- }
-
- cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format");
- if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) {
- strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE);
- } else if (!sampleFormat) {
- strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, sample_format not found");
- goto PARSE_OVER;
- }
-
- cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file");
- if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) {
- strncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN);
- } else if (!sampleFile) {
- memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN);
- } else {
- printf("failed to read json, sample_file not found");
- goto PARSE_OVER;
- }
-
- cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file");
- if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) {
- strncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN);
- if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) {
- g_Dbs.db[i].superTbls[j].tagSource = 0;
- } else {
- g_Dbs.db[i].superTbls[j].tagSource = 1;
- }
- } else if (!tagsFile) {
- memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN);
- g_Dbs.db[i].superTbls[j].tagSource = 0;
- } else {
- printf("failed to read json, tags_file not found");
- goto PARSE_OVER;
- }
-
- cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
- if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
- int32_t len = maxSqlLen->valueint;
- if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
- len = TSDB_MAX_ALLOWED_SQL_LEN;
- } else if (len < TSDB_MAX_SQL_LEN) {
- len = TSDB_MAX_SQL_LEN;
- }
- g_Dbs.db[i].superTbls[j].maxSqlLen = len;
- } else if (!maxSqlLen) {
- g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN;
- } else {
- printf("failed to read json, maxSqlLen not found");
- goto PARSE_OVER;
- }
-
- cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes
- if (multiThreadWriteOneTbl && multiThreadWriteOneTbl->type == cJSON_String && multiThreadWriteOneTbl->valuestring != NULL) {
- if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) {
- g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1;
- } else {
- g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
- }
- } else if (!multiThreadWriteOneTbl) {
- g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
- } else {
- printf("failed to read json, multiThreadWriteOneTbl not found");
- goto PARSE_OVER;
- }
-
- cJSON* numberOfTblInOneSql = cJSON_GetObjectItem(stbInfo, "number_of_tbl_in_one_sql");
- if (numberOfTblInOneSql && numberOfTblInOneSql->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = numberOfTblInOneSql->valueint;
- } else if (!numberOfTblInOneSql) {
- g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = 0;
- } else {
- printf("failed to read json, numberOfTblInOneSql not found");
- goto PARSE_OVER;
- }
-
- cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl");
- if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint;
- } else if (!rowsPerTbl) {
- g_Dbs.db[i].superTbls[j].rowsPerTbl = 1;
- } else {
- printf("failed to read json, rowsPerTbl not found");
- goto PARSE_OVER;
- }
-
- cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio");
- if (disorderRatio && disorderRatio->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint;
- } else if (!disorderRatio) {
- g_Dbs.db[i].superTbls[j].disorderRatio = 0;
- } else {
- printf("failed to read json, disorderRatio not found");
- goto PARSE_OVER;
- }
-
- cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range");
- if (disorderRange && disorderRange->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint;
- } else if (!disorderRange) {
- g_Dbs.db[i].superTbls[j].disorderRange = 1000;
- } else {
- printf("failed to read json, disorderRange not found");
- goto PARSE_OVER;
- }
-
- cJSON* insertRate = cJSON_GetObjectItem(stbInfo, "insert_rate");
- if (insertRate && insertRate->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].insertRate = insertRate->valueint;
- } else if (!insertRate) {
- g_Dbs.db[i].superTbls[j].insertRate = 0;
- } else {
- printf("failed to read json, insert_rate not found");
- goto PARSE_OVER;
- }
-
- cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
- if (insertRows && insertRows->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
- if (0 == g_Dbs.db[i].superTbls[j].insertRows) {
- g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
- }
- } else if (!insertRows) {
- g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
- } else {
- printf("failed to read json, insert_rows not found");
- goto PARSE_OVER;
- }
-
- if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
- continue;
- }
-
- int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]);
- if (false == retVal) {
- goto PARSE_OVER;
- }
- }
- }
-
- ret = true;
-
-PARSE_OVER:
- //free(content);
- //cJSON_Delete(root);
- //fclose(fp);
- return ret;
-}
-
-static bool getMetaFromQueryJsonFile(cJSON* root) {
- bool ret = false;
-
- cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
- if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
- strncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
- }
-
- cJSON* host = cJSON_GetObjectItem(root, "host");
- if (host && host->type == cJSON_String && host->valuestring != NULL) {
- strncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE);
- } else if (!host) {
- strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
- } else {
- printf("failed to read json, host not found\n");
- goto PARSE_OVER;
- }
-
- cJSON* port = cJSON_GetObjectItem(root, "port");
- if (port && port->type == cJSON_Number) {
- g_queryInfo.port = port->valueint;
- } else if (!port) {
- g_queryInfo.port = 6030;
- }
-
- cJSON* user = cJSON_GetObjectItem(root, "user");
- if (user && user->type == cJSON_String && user->valuestring != NULL) {
- strncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE);
- } else if (!user) {
- strncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ;
- }
-
- cJSON* password = cJSON_GetObjectItem(root, "password");
- if (password && password->type == cJSON_String && password->valuestring != NULL) {
- strncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE);
- } else if (!password) {
- strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);;
- }
-
- cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
- if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) {
- if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
- g_args.answer_yes = false;
- } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
- g_args.answer_yes = true;
- } else {
- g_args.answer_yes = false;
- }
- } else if (!answerPrompt) {
- g_args.answer_yes = false;
- } else {
- printf("failed to read json, confirm_parameter_prompt not found");
- goto PARSE_OVER;
- }
-
- cJSON* dbs = cJSON_GetObjectItem(root, "databases");
- if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
- strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
- } else if (!dbs) {
- printf("failed to read json, databases not found\n");
- goto PARSE_OVER;
- }
-
- cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode");
- if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) {
- strncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE);
- } else if (!queryMode) {
- strncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE);
- } else {
- printf("failed to read json, query_mode not found\n");
- goto PARSE_OVER;
- }
-
- // super_table_query
- cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query");
- if (!superQuery) {
- g_queryInfo.superQueryInfo.concurrent = 0;
- g_queryInfo.superQueryInfo.sqlCount = 0;
- } else if (superQuery->type != cJSON_Object) {
- printf("failed to read json, super_table_query not found");
- goto PARSE_OVER;
- } else {
- cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval");
- if (rate && rate->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.rate = rate->valueint;
- } else if (!rate) {
- g_queryInfo.superQueryInfo.rate = 0;
- }
-
- cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent");
- if (concurrent && concurrent->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.concurrent = concurrent->valueint;
- } else if (!concurrent) {
- g_queryInfo.superQueryInfo.concurrent = 1;
- }
-
- cJSON* mode = cJSON_GetObjectItem(superQuery, "mode");
- if (mode && mode->type == cJSON_String && mode->valuestring != NULL) {
- if (0 == strcmp("sync", mode->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeMode = 0;
- } else if (0 == strcmp("async", mode->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeMode = 1;
- } else {
- printf("failed to read json, subscribe mod error\n");
- goto PARSE_OVER;
- }
- } else {
- g_queryInfo.superQueryInfo.subscribeMode = 0;
- }
-
- cJSON* interval = cJSON_GetObjectItem(superQuery, "interval");
- if (interval && interval->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint;
- } else if (!interval) {
- //printf("failed to read json, subscribe interval no found\n");
- //goto PARSE_OVER;
- g_queryInfo.superQueryInfo.subscribeInterval = 10000;
- }
-
- cJSON* restart = cJSON_GetObjectItem(superQuery, "restart");
- if (restart && restart->type == cJSON_String && restart->valuestring != NULL) {
- if (0 == strcmp("yes", restart->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeRestart = 1;
- } else if (0 == strcmp("no", restart->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeRestart = 0;
- } else {
- printf("failed to read json, subscribe restart error\n");
- goto PARSE_OVER;
- }
- } else {
- g_queryInfo.superQueryInfo.subscribeRestart = 1;
- }
-
- cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress");
- if (keepProgress && keepProgress->type == cJSON_String && keepProgress->valuestring != NULL) {
- if (0 == strcmp("yes", keepProgress->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeKeepProgress = 1;
- } else if (0 == strcmp("no", keepProgress->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
- } else {
- printf("failed to read json, subscribe keepProgress error\n");
- goto PARSE_OVER;
- }
- } else {
- g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
- }
-
- // sqls
- cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
- if (!superSqls) {
- g_queryInfo.superQueryInfo.sqlCount = 0;
- } else if (superSqls->type != cJSON_Array) {
- printf("failed to read json, super sqls not found\n");
- goto PARSE_OVER;
- } else {
- int superSqlSize = cJSON_GetArraySize(superSqls);
- if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT);
- goto PARSE_OVER;
- }
-
- g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
- for (int j = 0; j < superSqlSize; ++j) {
- cJSON* sql = cJSON_GetArrayItem(superSqls, j);
- if (sql == NULL) continue;
-
- cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
- if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
- printf("failed to read json, sql not found\n");
- goto PARSE_OVER;
- }
- strncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
-
- cJSON *result = cJSON_GetObjectItem(sql, "result");
- if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) {
- strncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
- } else if (NULL == result) {
- memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
- } else {
- printf("failed to read json, super query result file not found\n");
- goto PARSE_OVER;
- }
- }
- }
- }
-
- // sub_table_query
- cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query");
- if (!subQuery) {
- g_queryInfo.subQueryInfo.threadCnt = 0;
- g_queryInfo.subQueryInfo.sqlCount = 0;
- } else if (subQuery->type != cJSON_Object) {
- printf("failed to read json, sub_table_query not found");
- ret = true;
- goto PARSE_OVER;
- } else {
- cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval");
- if (subrate && subrate->type == cJSON_Number) {
- g_queryInfo.subQueryInfo.rate = subrate->valueint;
- } else if (!subrate) {
- g_queryInfo.subQueryInfo.rate = 0;
- }
-
- cJSON* threads = cJSON_GetObjectItem(subQuery, "threads");
- if (threads && threads->type == cJSON_Number) {
- g_queryInfo.subQueryInfo.threadCnt = threads->valueint;
- } else if (!threads) {
- g_queryInfo.subQueryInfo.threadCnt = 1;
- }
-
- //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count");
- //if (subTblCnt && subTblCnt->type == cJSON_Number) {
- // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint;
- //} else if (!subTblCnt) {
- // g_queryInfo.subQueryInfo.childTblCount = 0;
- //}
-
- cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname");
- if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) {
- strncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE);
- } else {
- printf("failed to read json, super table name not found\n");
- goto PARSE_OVER;
- }
-
- cJSON* submode = cJSON_GetObjectItem(subQuery, "mode");
- if (submode && submode->type == cJSON_String && submode->valuestring != NULL) {
- if (0 == strcmp("sync", submode->valuestring)) {
- g_queryInfo.subQueryInfo.subscribeMode = 0;
- } else if (0 == strcmp("async", submode->valuestring)) {
- g_queryInfo.subQueryInfo.subscribeMode = 1;
- } else {
- printf("failed to read json, subscribe mod error\n");
- goto PARSE_OVER;
- }
- } else {
- g_queryInfo.subQueryInfo.subscribeMode = 0;
- }
-
- cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval");
- if (subinterval && subinterval->type == cJSON_Number) {
- g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint;
- } else if (!subinterval) {
- //printf("failed to read json, subscribe interval no found\n");
- //goto PARSE_OVER;
- g_queryInfo.subQueryInfo.subscribeInterval = 10000;
- }
-
- cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart");
- if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) {
- if (0 == strcmp("yes", subrestart->valuestring)) {
- g_queryInfo.subQueryInfo.subscribeRestart = 1;
- } else if (0 == strcmp("no", subrestart->valuestring)) {
- g_queryInfo.subQueryInfo.subscribeRestart = 0;
- } else {
- printf("failed to read json, subscribe restart error\n");
- goto PARSE_OVER;
- }
- } else {
- g_queryInfo.subQueryInfo.subscribeRestart = 1;
- }
-
- cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress");
- if (subkeepProgress && subkeepProgress->type == cJSON_String && subkeepProgress->valuestring != NULL) {
- if (0 == strcmp("yes", subkeepProgress->valuestring)) {
- g_queryInfo.subQueryInfo.subscribeKeepProgress = 1;
- } else if (0 == strcmp("no", subkeepProgress->valuestring)) {
- g_queryInfo.subQueryInfo.subscribeKeepProgress = 0;
- } else {
- printf("failed to read json, subscribe keepProgress error\n");
- goto PARSE_OVER;
- }
- } else {
- g_queryInfo.subQueryInfo.subscribeKeepProgress = 0;
- }
-
- // sqls
- cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls");
- if (!subsqls) {
- g_queryInfo.subQueryInfo.sqlCount = 0;
- } else if (subsqls->type != cJSON_Array) {
- printf("failed to read json, super sqls not found\n");
- goto PARSE_OVER;
- } else {
- int superSqlSize = cJSON_GetArraySize(subsqls);
- if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT);
- goto PARSE_OVER;
- }
-
- g_queryInfo.subQueryInfo.sqlCount = superSqlSize;
- for (int j = 0; j < superSqlSize; ++j) {
- cJSON* sql = cJSON_GetArrayItem(subsqls, j);
- if (sql == NULL) continue;
-
- cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
- if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
- printf("failed to read json, sql not found\n");
- goto PARSE_OVER;
- }
- strncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
-
- cJSON *result = cJSON_GetObjectItem(sql, "result");
- if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){
- strncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
- } else if (NULL == result) {
- memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
- } else {
- printf("failed to read json, sub query result file not found\n");
- goto PARSE_OVER;
- }
- }
- }
- }
-
- ret = true;
-
-PARSE_OVER:
- //free(content);
- //cJSON_Delete(root);
- //fclose(fp);
- return ret;
-}
-
-static bool getInfoFromJsonFile(char* file) {
- FILE *fp = fopen(file, "r");
- if (!fp) {
- printf("failed to read %s, reason:%s\n", file, strerror(errno));
- return false;
- }
-
- bool ret = false;
- int maxLen = 64000;
- char *content = calloc(1, maxLen + 1);
- int len = fread(content, 1, maxLen, fp);
- if (len <= 0) {
- free(content);
- fclose(fp);
- printf("failed to read %s, content is null", file);
- return false;
- }
-
- content[len] = 0;
- cJSON* root = cJSON_Parse(content);
- if (root == NULL) {
- printf("failed to cjson parse %s, invalid json format", file);
- goto PARSE_OVER;
- }
-
- cJSON* filetype = cJSON_GetObjectItem(root, "filetype");
- if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) {
- if (0 == strcasecmp("insert", filetype->valuestring)) {
- g_jsonType = INSERT_MODE;
- } else if (0 == strcasecmp("query", filetype->valuestring)) {
- g_jsonType = QUERY_MODE;
- } else if (0 == strcasecmp("subscribe", filetype->valuestring)) {
- g_jsonType = SUBSCRIBE_MODE;
- } else {
- printf("failed to read json, filetype not support\n");
- goto PARSE_OVER;
- }
- } else if (!filetype) {
- g_jsonType = INSERT_MODE;
- } else {
- printf("failed to read json, filetype not found\n");
- goto PARSE_OVER;
- }
-
- if (INSERT_MODE == g_jsonType) {
- ret = getMetaFromInsertJsonFile(root);
- } else if (QUERY_MODE == g_jsonType) {
- ret = getMetaFromQueryJsonFile(root);
- } else if (SUBSCRIBE_MODE == g_jsonType) {
- ret = getMetaFromQueryJsonFile(root);
- } else {
- printf("input json file type error! please input correct file type: insert or query or subscribe\n");
- goto PARSE_OVER;
- }
-
-PARSE_OVER:
- free(content);
- cJSON_Delete(root);
- fclose(fp);
- return ret;
-}
-
-
-void prePareSampleData() {
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- //if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].dataSource, "sample", 6)) {
- // readSampleFromFileToMem(&g_Dbs.db[i].superTbls[j]);
- //}
-
- if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) {
- (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]);
- }
-
- #ifdef TD_LOWA_CURL
- if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) {
- curl_global_init(CURL_GLOBAL_ALL);
- }
- #endif
- }
- }
-}
-
-void postFreeResource() {
- tmfclose(g_fpOfInsertResult);
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- if (0 != g_Dbs.db[i].superTbls[j].colsOfCreatChildTable) {
- free(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable);
- g_Dbs.db[i].superTbls[j].colsOfCreatChildTable = NULL;
- }
- if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) {
- free(g_Dbs.db[i].superTbls[j].sampleDataBuf);
- g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL;
- }
- if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) {
- free(g_Dbs.db[i].superTbls[j].tagDataBuf);
- g_Dbs.db[i].superTbls[j].tagDataBuf = NULL;
- }
- if (0 != g_Dbs.db[i].superTbls[j].childTblName) {
- free(g_Dbs.db[i].superTbls[j].childTblName);
- g_Dbs.db[i].superTbls[j].childTblName = NULL;
- }
-
- #ifdef TD_LOWA_CURL
- if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) {
- curl_global_cleanup();
- }
- #endif
- }
- }
-}
-
-int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* superTblInfo, int* sampleUsePos, FILE *fp, char* sampleBuf) {
- if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
- int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleBuf);
- if (0 != ret) {
- return -1;
- }
- *sampleUsePos = 0;
- }
-
- int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", sampleBuf + superTblInfo->lenOfOneRow * (*sampleUsePos));
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
-
- (*sampleUsePos)++;
-
- return dataLen;
-}
-
-int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) {
- int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
- for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) {
- if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
- printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN);
- return (-1);
- }
-
- char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
- if (NULL == buf) {
- printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
- return (-1);
- }
- rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf);
- tmfree(buf);
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_int());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_float());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_double());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) {
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint());
- } else {
- printf("No support data type: %s\n", stbInfo->columns[i].dataType);
- return (-1);
- }
- }
- dataLen -= 2;
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
-
- return dataLen;
-}
-
-void syncWriteForNumberOfTblInOneSql(threadInfo *winfo, FILE *fp, char* sampleDataBuf) {
- SSuperTable* superTblInfo = winfo->superTblInfo;
-
- int samplePos = 0;
-
- //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id);
- int64_t totalRowsInserted = 0;
- int64_t totalAffectedRows = 0;
- int64_t lastPrintTime = taosGetTimestampMs();
-
- char* buffer = calloc(superTblInfo->maxSqlLen+1, 1);
- if (NULL == buffer) {
- printf("========calloc size[ %d ] fail!\n", superTblInfo->maxSqlLen);
- return;
- }
-
- int32_t numberOfTblInOneSql = superTblInfo->numberOfTblInOneSql;
- int32_t tbls = winfo->end_table_id - winfo->start_table_id + 1;
- if (numberOfTblInOneSql > tbls) {
- numberOfTblInOneSql = tbls;
- }
-
- int64_t time_counter = winfo->start_time;
- int64_t tmp_time;
- int sampleUsePos;
-
- int64_t st = 0;
- int64_t et = 0;
- for (int i = 0; i < superTblInfo->insertRows;) {
- if (superTblInfo->insertRate && (et - st) < 1000) {
- taosMsleep(1000 - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
- }
-
- if (superTblInfo->insertRate) {
- st = taosGetTimestampMs();
- }
-
- int32_t tbl_id = 0;
- for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; ) {
- int inserted = i;
-
- int k = 0;
- int batchRowsSql = 0;
- while (1)
- {
- int len = 0;
- memset(buffer, 0, superTblInfo->maxSqlLen);
- char *pstr = buffer;
-
- int32_t end_tbl_id = tID + numberOfTblInOneSql;
- if (end_tbl_id > winfo->end_table_id) {
- end_tbl_id = winfo->end_table_id+1;
- }
- for (tbl_id = tID; tbl_id < end_tbl_id; tbl_id++) {
- sampleUsePos = samplePos;
- if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
- char* tagsValBuf = NULL;
- if (0 == superTblInfo->tagSource) {
- tagsValBuf = generateTagVaulesForStb(superTblInfo);
- } else {
- tagsValBuf = getTagValueFromTagSample(superTblInfo, tbl_id % superTblInfo->tagSampleCount);
- }
- if (NULL == tagsValBuf) {
- goto free_and_statistics;
- }
-
- if (0 == len) {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
- } else {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
- }
- tmfree(tagsValBuf);
- } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
- if (0 == len) {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN);
- } else {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN);
- }
- } else { // pre-create child table
- if (0 == len) {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id);
- } else {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id);
- }
- }
-
- tmp_time = time_counter;
- for (k = 0; k < superTblInfo->rowsPerTbl;) {
- int retLen = 0;
- if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
- retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf);
- if (retLen < 0) {
- goto free_and_statistics;
- }
- } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) {
- int rand_num = rand_tinyint() % 100;
- if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) {
- int64_t d = tmp_time - rand() % superTblInfo->disorderRange;
- retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo);
- } else {
- retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo);
- }
- if (retLen < 0) {
- goto free_and_statistics;
- }
- }
- len += retLen;
- //inserted++;
- k++;
- totalRowsInserted++;
- batchRowsSql++;
-
- if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128) || batchRowsSql >= INT16_MAX - 1) {
- tID = tbl_id + 1;
- printf("config rowsPerTbl and numberOfTblInOneSql not match with max_sql_lenth, please reconfig![lenOfOneRow:%d]\n", superTblInfo->lenOfOneRow);
- goto send_to_server;
- }
- }
-
- }
-
- tID = tbl_id;
- inserted += superTblInfo->rowsPerTbl;
-
- send_to_server:
- batchRowsSql = 0;
- if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
- //printf("multi table===== sql: %s \n\n", buffer);
- //int64_t t1 = taosGetTimestampMs();
- int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE);
- if (0 > affectedRows) {
- goto free_and_statistics;
- }
- totalAffectedRows += affectedRows;
-
- int64_t currentPrintTime = taosGetTimestampMs();
- if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
- lastPrintTime = currentPrintTime;
- }
- //int64_t t2 = taosGetTimestampMs();
- //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0);
- } else {
- #ifdef TD_LOWA_CURL
- //int64_t t1 = taosGetTimestampMs();
- int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle);
- //int64_t t2 = taosGetTimestampMs();
- //printf("http insert sql return, Spent %ld ms \n", t2 - t1);
-
- if (0 != retCode) {
- printf("========curl return fail, threadID[%d]\n", winfo->threadID);
- goto free_and_statistics;
- }
- #else
- printf("========no use http mode for no curl lib!\n");
- goto free_and_statistics;
- #endif
- }
-
- //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt);
- break;
- }
-
- if (tID > winfo->end_table_id) {
- if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
- samplePos = sampleUsePos;
- }
- i = inserted;
- time_counter = tmp_time;
- }
- }
-
- if (superTblInfo->insertRate) {
- et = taosGetTimestampMs();
- }
- //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i);
- }
-
- free_and_statistics:
- tmfree(buffer);
- winfo->totalRowsInserted = totalRowsInserted;
- winfo->totalAffectedRows = totalAffectedRows;
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
- return;
-}
-
-// sync insertion
-/*
- 1 thread: 100 tables * 2000 rows/s
- 1 thread: 10 tables * 20000 rows/s
- 6 thread: 300 tables * 2000 rows/s
-
- 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s
-*/
-void *syncWrite(void *sarg) {
- int64_t totalRowsInserted = 0;
- int64_t totalAffectedRows = 0;
- int64_t lastPrintTime = taosGetTimestampMs();
-
- threadInfo *winfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = winfo->superTblInfo;
-
- FILE *fp = NULL;
- char* sampleDataBuf = NULL;
- int samplePos = 0;
-
- // each thread read sample data from csv file
- if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
- sampleDataBuf = calloc(superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
- if (sampleDataBuf == NULL) {
- printf("Failed to calloc %d Bytes, reason:%s\n", superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno));
- return NULL;
- }
-
- fp = fopen(superTblInfo->sampleFile, "r");
- if (fp == NULL) {
- printf("Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno));
- tmfree(sampleDataBuf);
- return NULL;
- }
- int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleDataBuf);
- if (0 != ret) {
- tmfree(sampleDataBuf);
- tmfclose(fp);
- return NULL;
- }
- }
-
- if (superTblInfo->numberOfTblInOneSql > 0) {
- syncWriteForNumberOfTblInOneSql(winfo, fp, sampleDataBuf);
- tmfree(sampleDataBuf);
- tmfclose(fp);
- return NULL;
- }
-
- //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id);
-
- char* buffer = calloc(superTblInfo->maxSqlLen, 1);
-
- int nrecords_per_request = 0;
- if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
- nrecords_per_request = (superTblInfo->maxSqlLen - 1280 - superTblInfo->lenOfTagOfOneRow) / superTblInfo->lenOfOneRow;
- } else {
- nrecords_per_request = (superTblInfo->maxSqlLen - 1280) / superTblInfo->lenOfOneRow;
- }
-
- int nrecords_no_last_req = nrecords_per_request;
- int nrecords_last_req = 0;
- int loop_cnt = 0;
- if (0 != superTblInfo->insertRate) {
- if (nrecords_no_last_req >= superTblInfo->insertRate) {
- nrecords_no_last_req = superTblInfo->insertRate;
- } else {
- nrecords_last_req = superTblInfo->insertRate % nrecords_per_request;
- loop_cnt = (superTblInfo->insertRate / nrecords_per_request) + (superTblInfo->insertRate % nrecords_per_request ? 1 : 0) ;
- }
- }
-
- if (nrecords_no_last_req <= 0) {
- nrecords_no_last_req = 1;
- }
-
- if (nrecords_no_last_req >= INT16_MAX) {
- nrecords_no_last_req = INT16_MAX - 1;
- }
-
- if (nrecords_last_req >= INT16_MAX) {
- nrecords_last_req = INT16_MAX - 1;
- }
-
- int nrecords_cur_req = nrecords_no_last_req;
- int loop_cnt_orig = loop_cnt;
-
- //printf("========nrecords_per_request:%d, nrecords_no_last_req:%d, nrecords_last_req:%d, loop_cnt:%d\n", nrecords_per_request, nrecords_no_last_req, nrecords_last_req, loop_cnt);
-
- int64_t time_counter = winfo->start_time;
-
- int64_t st = 0;
- int64_t et = 0;
- for (int i = 0; i < superTblInfo->insertRows;) {
- if (superTblInfo->insertRate && (et - st) < 1000) {
- taosMsleep(1000 - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
- }
-
- if (superTblInfo->insertRate) {
- st = taosGetTimestampMs();
- }
-
- for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) {
- int inserted = i;
- int64_t tmp_time = time_counter;
-
- int sampleUsePos = samplePos;
- int k = 0;
- while (1)
- {
- int len = 0;
- memset(buffer, 0, superTblInfo->maxSqlLen);
- char *pstr = buffer;
-
- if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
- char* tagsValBuf = NULL;
- if (0 == superTblInfo->tagSource) {
- tagsValBuf = generateTagVaulesForStb(superTblInfo);
- } else {
- tagsValBuf = getTagValueFromTagSample(superTblInfo, tID % superTblInfo->tagSampleCount);
- }
- if (NULL == tagsValBuf) {
- goto free_and_statistics_2;
- }
-
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values", winfo->db_name, superTblInfo->childTblPrefix, tID, winfo->db_name, superTblInfo->sTblName, tagsValBuf);
- tmfree(tagsValBuf);
- } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values", winfo->db_name, superTblInfo->childTblName + tID * TSDB_TABLE_NAME_LEN);
- } else {
- len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values", winfo->db_name, superTblInfo->childTblPrefix, tID);
- }
-
- for (k = 0; k < nrecords_cur_req;) {
- int retLen = 0;
- if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
- retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf);
- if (retLen < 0) {
- goto free_and_statistics_2;
- }
- } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) {
- int rand_num = rand_tinyint() % 100;
- if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) {
- int64_t d = tmp_time - rand() % superTblInfo->disorderRange;
- retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo);
- //printf("disorder rows, rand_num:%d, last ts:%"PRId64" current ts:%"PRId64"\n", rand_num, tmp_time, d);
- } else {
- retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo);
- }
- if (retLen < 0) {
- goto free_and_statistics_2;
- }
- }
- len += retLen;
- inserted++;
- k++;
- totalRowsInserted++;
-
- if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) break;
- }
-
- if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
- //printf("===== sql: %s \n\n", buffer);
- //int64_t t1 = taosGetTimestampMs();
- int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE);
- if (0 > affectedRows){
- goto free_and_statistics_2;
- }
- totalAffectedRows += affectedRows;
-
- int64_t currentPrintTime = taosGetTimestampMs();
- if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
- lastPrintTime = currentPrintTime;
- }
- //int64_t t2 = taosGetTimestampMs();
- //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0);
- } else {
- #ifdef TD_LOWA_CURL
- //int64_t t1 = taosGetTimestampMs();
- int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle);
- //int64_t t2 = taosGetTimestampMs();
- //printf("http insert sql return, Spent %ld ms \n", t2 - t1);
-
- if (0 != retCode) {
- printf("========curl return fail, threadID[%d]\n", winfo->threadID);
- goto free_and_statistics_2;
- }
- #else
- printf("========no use http mode for no curl lib!\n");
- goto free_and_statistics_2;
- #endif
- }
-
- //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt);
-
- if (loop_cnt) {
- loop_cnt--;
- if ((1 == loop_cnt) && (0 != nrecords_last_req)) {
- nrecords_cur_req = nrecords_last_req;
- } else if (0 == loop_cnt){
- nrecords_cur_req = nrecords_no_last_req;
- loop_cnt = loop_cnt_orig;
- break;
- }
- } else {
- break;
- }
- }
-
- if (tID == winfo->end_table_id) {
- if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) {
- samplePos = sampleUsePos;
- }
- i = inserted;
- time_counter = tmp_time;
- }
- }
-
- if (superTblInfo->insertRate) {
- et = taosGetTimestampMs();
- }
- //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i);
- }
-
- free_and_statistics_2:
- tmfree(buffer);
- tmfree(sampleDataBuf);
- tmfclose(fp);
-
- winfo->totalRowsInserted = totalRowsInserted;
- winfo->totalAffectedRows = totalAffectedRows;
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows);
- return NULL;
-}
-
-void callBack(void *param, TAOS_RES *res, int code) {
- threadInfo* winfo = (threadInfo*)param;
-
- if (winfo->superTblInfo->insertRate) {
- winfo->et = taosGetTimestampMs();
- if (winfo->et - winfo->st < 1000) {
- taosMsleep(1000 - (winfo->et - winfo->st)); // ms
- }
- }
-
- char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen);
- char *data = calloc(1, MAX_DATA_SIZE);
- char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id);
- if (winfo->counter >= winfo->superTblInfo->insertRows) {
- winfo->start_table_id++;
- winfo->counter = 0;
- }
- if (winfo->start_table_id > winfo->end_table_id) {
- tsem_post(&winfo->lock_sem);
- free(buffer);
- free(data);
- taos_free_result(res);
- return;
- }
-
- for (int i = 0; i < winfo->nrecords_per_request; i++) {
- int rand_num = rand() % 100;
- if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio)
- {
- int64_t d = winfo->lastTs - rand() % 1000000 + rand_num;
- //generateData(data, datatype, ncols_per_record, d, len_of_binary);
- (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo);
- } else {
- //generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary);
- (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo);
- }
- pstr += sprintf(pstr, "%s", data);
- winfo->counter++;
-
- if (winfo->counter >= winfo->superTblInfo->insertRows) {
- break;
- }
- }
-
- if (winfo->superTblInfo->insertRate) {
- winfo->st = taosGetTimestampMs();
- }
- taos_query_a(winfo->taos, buffer, callBack, winfo);
- free(buffer);
- free(data);
-
- taos_free_result(res);
-}
-
-void *asyncWrite(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
-
- winfo->nrecords_per_request = 0;
- //if (AUTO_CREATE_SUBTBL == winfo->superTblInfo->autoCreateTable) {
- winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280 - winfo->superTblInfo->lenOfTagOfOneRow) / winfo->superTblInfo->lenOfOneRow;
- //} else {
- // winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280) / winfo->superTblInfo->lenOfOneRow;
- //}
-
- if (0 != winfo->superTblInfo->insertRate) {
- if (winfo->nrecords_per_request >= winfo->superTblInfo->insertRate) {
- winfo->nrecords_per_request = winfo->superTblInfo->insertRate;
- }
- }
-
- if (winfo->nrecords_per_request <= 0) {
- winfo->nrecords_per_request = 1;
- }
-
- if (winfo->nrecords_per_request >= INT16_MAX) {
- winfo->nrecords_per_request = INT16_MAX - 1;
- }
-
- if (winfo->nrecords_per_request >= INT16_MAX) {
- winfo->nrecords_per_request = INT16_MAX - 1;
- }
-
- winfo->st = 0;
- winfo->et = 0;
- winfo->lastTs = winfo->start_time;
-
- if (winfo->superTblInfo->insertRate) {
- winfo->st = taosGetTimestampMs();
- }
- taos_query_a(winfo->taos, "show databases", callBack, winfo);
-
- tsem_wait(&(winfo->lock_sem));
-
- return NULL;
-}
-
-void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* superTblInfo) {
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
- threadInfo *infos = malloc(threads * sizeof(threadInfo));
- memset(pids, 0, threads * sizeof(pthread_t));
- memset(infos, 0, threads * sizeof(threadInfo));
- int ntables = superTblInfo->childTblCount;
-
- int a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
-
- int b = 0;
- if (threads != 0) {
- b = ntables % threads;
- }
-
- //TAOS* taos;
- //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
- // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
- // if (NULL == taos) {
- // printf("connect to server fail, reason: %s\n", taos_errstr(NULL));
- // exit(-1);
- // }
- //}
-
- int32_t timePrec = TSDB_TIME_PRECISION_MILLI;
- if (0 != precision[0]) {
- if (0 == strncasecmp(precision, "ms", 2)) {
- timePrec = TSDB_TIME_PRECISION_MILLI;
- } else if (0 == strncasecmp(precision, "us", 2)) {
- timePrec = TSDB_TIME_PRECISION_MICRO;
- } else {
- printf("No support precision: %s\n", precision);
- exit(-1);
- }
- }
-
- int64_t start_time;
- if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
- start_time = taosGetTimestamp(timePrec);
- } else {
- (void)taosParseTime(superTblInfo->startTimestamp, &start_time, strlen(superTblInfo->startTimestamp), timePrec, 0);
- }
-
- double start = getCurrentTime();
-
- int last = 0;
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infos + i;
- t_info->threadID = i;
- tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
- t_info->superTblInfo = superTblInfo;
-
- t_info->start_time = start_time;
-
- if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
- //t_info->taos = taos;
- t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
- if (NULL == t_info->taos) {
- printf("connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL));
- exit(-1);
- }
- } else {
- t_info->taos = NULL;
- #ifdef TD_LOWA_CURL
- t_info->curl_handle = curl_easy_init();
- #endif
- }
-
- if (0 == superTblInfo->multiThreadWriteOneTbl) {
- t_info->start_table_id = last;
- t_info->end_table_id = i < b ? last + a : last + a - 1;
- last = t_info->end_table_id + 1;
- } else {
- t_info->start_table_id = 0;
- t_info->end_table_id = superTblInfo->childTblCount - 1;
- t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint();
- }
-
- tsem_init(&(t_info->lock_sem), 0, 0);
-
- if (SYNC == g_Dbs.queryMode) {
- pthread_create(pids + i, NULL, syncWrite, t_info);
- } else {
- pthread_create(pids + i, NULL, asyncWrite, t_info);
- }
- }
-
- for (int i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
-
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infos + i;
-
- tsem_destroy(&(t_info->lock_sem));
- taos_close(t_info->taos);
-
- superTblInfo->totalAffectedRows += t_info->totalAffectedRows;
- superTblInfo->totalRowsInserted += t_info->totalRowsInserted;
- #ifdef TD_LOWA_CURL
- if (t_info->curl_handle) {
- curl_easy_cleanup(t_info->curl_handle);
- }
- #endif
- }
-
- double end = getCurrentTime();
-
- //taos_close(taos);
-
- free(pids);
- free(infos);
-
- printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n",
- end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName);
- fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n",
- end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName);
-}
-
-
-void *readTable(void *sarg) {
-#if 1
- threadInfo *rinfo = (threadInfo *)sarg;
- TAOS *taos = rinfo->taos;
- char command[BUFFER_SIZE] = "\0";
- int64_t sTime = rinfo->start_time;
- char *tb_prefix = rinfo->tb_prefix;
- FILE *fp = fopen(rinfo->fp, "a");
- if (NULL == fp) {
- printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
- return NULL;
- }
-
- int num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
- int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
- int totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = g_Dbs.do_aggreFunc;
-
- int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
- printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
- }
- printf("%d records:\n", totalData);
- fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
-
- for (int j = 0; j < n; j++) {
- double totalT = 0;
- int count = 0;
- for (int i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime);
-
- double t = getCurrentTime();
- TAOS_RES *pSql = taos_query(taos, command);
- int32_t code = taos_errno(pSql);
-
- if (code != 0) {
- fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- return NULL;
- }
-
- while (taos_fetch_row(pSql) != NULL) {
- count++;
- }
-
- t = getCurrentTime() - t;
- totalT += t;
-
- taos_free_result(pSql);
- }
-
- fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n",
- aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
- (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
- printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT);
- }
- fprintf(fp, "\n");
- fclose(fp);
-#endif
- return NULL;
-}
-
-void *readMetric(void *sarg) {
-#if 1
- threadInfo *rinfo = (threadInfo *)sarg;
- TAOS *taos = rinfo->taos;
- char command[BUFFER_SIZE] = "\0";
- FILE *fp = fopen(rinfo->fp, "a");
- if (NULL == fp) {
- printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
- return NULL;
- }
-
- int num_of_DPT = rinfo->superTblInfo->insertRows;
- int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
- int totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = g_Dbs.do_aggreFunc;
-
- int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
- printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
- }
- printf("%d records:\n", totalData);
- fprintf(fp, "Querying On %d records:\n", totalData);
-
- for (int j = 0; j < n; j++) {
- char condition[BUFFER_SIZE - 30] = "\0";
- char tempS[64] = "\0";
-
- int m = 10 < num_of_tables ? 10 : num_of_tables;
-
- for (int i = 1; i <= m; i++) {
- if (i == 1) {
- sprintf(tempS, "t1 = %d", i);
- } else {
- sprintf(tempS, " or t1 = %d ", i);
- }
- strcat(condition, tempS);
-
- sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
-
- printf("Where condition: %s\n", condition);
- fprintf(fp, "%s\n", command);
-
- double t = getCurrentTime();
-
- TAOS_RES *pSql = taos_query(taos, command);
- int32_t code = taos_errno(pSql);
-
- if (code != 0) {
- fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- return NULL;
- }
- int count = 0;
- while (taos_fetch_row(pSql) != NULL) {
- count++;
- }
- t = getCurrentTime() - t;
-
- fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000);
- printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t);
-
- taos_free_result(pSql);
- }
- fprintf(fp, "\n");
- }
- fclose(fp);
-#endif
- return NULL;
-}
-
-
-int insertTestProcess() {
-
- g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a");
- if (NULL == g_fpOfInsertResult) {
- fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile);
- return 1;
- };
-
- printfInsertMeta();
- printfInsertMetaToFile(g_fpOfInsertResult);
-
- if (!g_args.answer_yes) {
- printf("Press enter key to continue\n\n");
- (void)getchar();
- }
-
- init_rand_data();
-
- // create database and super tables
- (void)createDatabases();
-
- // pretreatement
- prePareSampleData();
-
- double start;
- double end;
-
- // create child tables
- start = getCurrentTime();
- createChildTables();
- end = getCurrentTime();
- if (g_totalChildTables > 0) {
- printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount);
- fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount);
- }
-
- usleep(1000*1000);
-
- // create sub threads for inserting data
- //start = getCurrentTime();
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j];
- startMultiThreadInsertData(g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, superTblInfo);
- }
- }
- //end = getCurrentTime();
-
- //int64_t totalRowsInserted = 0;
- //int64_t totalAffectedRows = 0;
- //for (int i = 0; i < g_Dbs.dbCount; i++) {
- // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- // totalRowsInserted += g_Dbs.db[i].superTbls[j].totalRowsInserted;
- // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows;
- //}
- //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalRowsInserted, totalAffectedRows, g_Dbs.threadCount);
- if (NULL == g_args.metaFile && false == g_Dbs.insert_only) {
- // query data
- pthread_t read_id;
- threadInfo *rInfo = malloc(sizeof(threadInfo));
- rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
- rInfo->start_table_id = 0;
- rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1;
- //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
- //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows;
- rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
- rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port);
- strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix);
- strcpy(rInfo->fp, g_Dbs.resultFile);
-
- if (!g_Dbs.use_metric) {
- pthread_create(&read_id, NULL, readTable, rInfo);
- } else {
- pthread_create(&read_id, NULL, readMetric, rInfo);
- }
- pthread_join(read_id, NULL);
- taos_close(rInfo->taos);
- }
-
- postFreeResource();
-
- return 0;
-}
-
-void *superQueryProcess(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
-
- //char sqlStr[MAX_TB_NAME_SIZE*2];
- //sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- //queryDB(winfo->taos, sqlStr);
-
- int64_t st = 0;
- int64_t et = 0;
- while (1) {
- if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
- taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
- }
-
- st = taosGetTimestampMs();
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- int64_t t1 = taosGetTimestampUs();
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
- }
- selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
- int64_t t2 = taosGetTimestampUs();
- printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0);
- } else {
- #ifdef TD_LOWA_CURL
- int64_t t1 = taosGetTimestampUs();
- int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle);
- int64_t t2 = taosGetTimestampUs();
- printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0);
-
- if (0 != retCode) {
- printf("====curl return fail, threadID[%d]\n", winfo->threadID);
- return NULL;
- }
- #endif
- }
- }
- et = taosGetTimestampMs();
- printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0);
- }
- return NULL;
-}
-
-void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
- char sourceString[32] = "xxxx";
- char subTblName[MAX_TB_NAME_SIZE*3];
- sprintf(subTblName, "%s.%s", g_queryInfo.dbName, g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
-
- //printf("inSql: %s\n", inSql);
-
- char* pos = strstr(inSql, sourceString);
- if (0 == pos) {
- return;
- }
-
- strncpy(outSql, inSql, pos - inSql);
- //printf("1: %s\n", outSql);
- strcat(outSql, subTblName);
- //printf("2: %s\n", outSql);
- strcat(outSql, pos+strlen(sourceString));
- //printf("3: %s\n", outSql);
-}
-
-void *subQueryProcess(void *sarg) {
- char sqlstr[1024];
- threadInfo *winfo = (threadInfo *)sarg;
- int64_t st = 0;
- int64_t et = g_queryInfo.subQueryInfo.rate*1000;
- while (1) {
- if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) {
- taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
- }
-
- st = taosGetTimestampMs();
- for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
- for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
- memset(sqlstr,0,sizeof(sqlstr));
- replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i);
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
- }
- selectAndGetResult(winfo->taos, sqlstr, tmpFile);
- }
- }
- et = taosGetTimestampMs();
- printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0);
- }
- return NULL;
-}
-
-int queryTestProcess() {
- TAOS * taos = NULL;
- if (taos_init()) {
- fprintf(stderr, "Failed to init taos\n");
- exit(-1);
- }
-
- taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port);
- if (taos == NULL) {
- fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
- exit(-1);
- }
-
- if (0 != g_queryInfo.subQueryInfo.sqlCount) {
- (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount);
- }
-
- printfQueryMeta();
-
- if (!g_args.answer_yes) {
- printf("Press enter key to continue\n\n");
- (void)getchar();
- }
-
- printfQuerySystemInfo(taos);
-
- pthread_t *pids = NULL;
- threadInfo *infos = NULL;
- //==== create sub threads for query from specify table
- if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) {
-
- pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
- infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo));
- if ((NULL == pids) || (NULL == infos)) {
- printf("malloc failed for create threads\n");
- taos_close(taos);
- exit(-1);
- }
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
- threadInfo *t_info = infos + i;
- t_info->threadID = i;
-
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- t_info->taos = taos;
-
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- (void)queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE);
- } else {
- t_info->taos = NULL;
- #ifdef TD_LOWA_CURL
- t_info->curl_handle = curl_easy_init();
- #endif
- }
-
- pthread_create(pids + i, NULL, superQueryProcess, t_info);
- }
- }else {
- g_queryInfo.superQueryInfo.concurrent = 0;
- }
-
- pthread_t *pidsOfSub = NULL;
- threadInfo *infosOfSub = NULL;
- //==== create sub threads for query from all sub table of the super table
- if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) {
- pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t));
- infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo));
- if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
- printf("malloc failed for create threads\n");
- taos_close(taos);
- exit(-1);
- }
-
- int ntables = g_queryInfo.subQueryInfo.childTblCount;
- int threads = g_queryInfo.subQueryInfo.threadCnt;
-
- int a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
-
- int b = 0;
- if (threads != 0) {
- b = ntables % threads;
- }
-
- int last = 0;
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infosOfSub + i;
- t_info->threadID = i;
-
- t_info->start_table_id = last;
- t_info->end_table_id = i < b ? last + a : last + a - 1;
- last = t_info->end_table_id + 1;
- t_info->taos = taos;
- pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info);
- }
-
- g_queryInfo.subQueryInfo.threadCnt = threads;
- }else {
- g_queryInfo.subQueryInfo.threadCnt = 0;
- }
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
- pthread_join(pids[i], NULL);
- }
-
- tmfree((char*)pids);
- tmfree((char*)infos);
-
- for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) {
- pthread_join(pidsOfSub[i], NULL);
- }
-
- tmfree((char*)pidsOfSub);
- tmfree((char*)infosOfSub);
-
- taos_close(taos);
- return 0;
-}
-
-static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
- if (res == NULL || taos_errno(res) != 0) {
- printf("failed to subscribe result, code:%d, reason:%s\n", code, taos_errstr(res));
- return;
- }
-
- getResult(res, (char*)param);
- taos_free_result(res);
-}
-
-static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
- TAOS_SUB* tsub = NULL;
-
- if (g_queryInfo.superQueryInfo.subscribeMode) {
- tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, g_queryInfo.superQueryInfo.subscribeInterval);
- } else {
- tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, NULL, NULL, 0);
- }
-
- if (tsub == NULL) {
- printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
- return NULL;
- }
-
- return tsub;
-}
-
-void *subSubscribeProcess(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
- char subSqlstr[1024];
-
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){
- return NULL;
- }
-
- //int64_t st = 0;
- //int64_t et = 0;
- do {
- //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
- // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
- //}
-
- //st = taosGetTimestampMs();
- char topic[32] = {0};
- for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
- sprintf(topic, "taosdemo-subscribe-%d", i);
- memset(subSqlstr,0,sizeof(subSqlstr));
- replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i);
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
- }
- g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile);
- if (NULL == g_queryInfo.subQueryInfo.tsub[i]) {
- return NULL;
- }
- }
- //et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", pthread_self(), (double)(et - st)/1000.0);
- } while (0);
-
- // start loop to consume result
- TAOS_RES* res = NULL;
- while (1) {
- for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.subQueryInfo.subscribeMode) {
- continue;
- }
-
- res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]);
- if (res) {
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
- }
- getResult(res, tmpFile);
- }
- }
- }
- taos_free_result(res);
-
- for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
- taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], g_queryInfo.subQueryInfo.subscribeKeepProgress);
- }
- return NULL;
-}
-
-void *superSubscribeProcess(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
-
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) {
- return NULL;
- }
-
- //int64_t st = 0;
- //int64_t et = 0;
- do {
- //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
- // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
- //}
-
- //st = taosGetTimestampMs();
- char topic[32] = {0};
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- sprintf(topic, "taosdemo-subscribe-%d", i);
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
- }
- g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile);
- if (NULL == g_queryInfo.superQueryInfo.tsub[i]) {
- return NULL;
- }
- }
- //et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", pthread_self(), (double)(et - st)/1000.0);
- } while (0);
-
- // start loop to consume result
- TAOS_RES* res = NULL;
- while (1) {
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.superQueryInfo.subscribeMode) {
- continue;
- }
-
- res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]);
- if (res) {
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
- }
- getResult(res, tmpFile);
- }
- }
- }
- taos_free_result(res);
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress);
- }
- return NULL;
-}
-
-int subscribeTestProcess() {
- printfQueryMeta();
-
- if (!g_args.answer_yes) {
- printf("Press enter key to continue\n\n");
- (void)getchar();
- }
-
- TAOS * taos = NULL;
- if (taos_init()) {
- fprintf(stderr, "Failed to init taos\n");
- exit(-1);
- }
-
- taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port);
- if (taos == NULL) {
- fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
- exit(-1);
- }
-
- if (0 != g_queryInfo.subQueryInfo.sqlCount) {
- (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount);
- }
-
-
- pthread_t *pids = NULL;
- threadInfo *infos = NULL;
- //==== create sub threads for query from super table
- if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) {
- pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
- infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo));
- if ((NULL == pids) || (NULL == infos)) {
- printf("malloc failed for create threads\n");
- taos_close(taos);
- exit(-1);
- }
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
- threadInfo *t_info = infos + i;
- t_info->threadID = i;
- t_info->taos = taos;
- pthread_create(pids + i, NULL, superSubscribeProcess, t_info);
- }
- }
-
- //==== create sub threads for query from sub table
- pthread_t *pidsOfSub = NULL;
- threadInfo *infosOfSub = NULL;
- if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) {
- pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t));
- infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo));
- if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
- printf("malloc failed for create threads\n");
- taos_close(taos);
- exit(-1);
- }
-
- int ntables = g_queryInfo.subQueryInfo.childTblCount;
- int threads = g_queryInfo.subQueryInfo.threadCnt;
-
- int a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
-
- int b = 0;
- if (threads != 0) {
- b = ntables % threads;
- }
-
- int last = 0;
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infosOfSub + i;
- t_info->threadID = i;
-
- t_info->start_table_id = last;
- t_info->end_table_id = i < b ? last + a : last + a - 1;
- t_info->taos = taos;
- pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info);
- }
- g_queryInfo.subQueryInfo.threadCnt = threads;
- }
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
- pthread_join(pids[i], NULL);
- }
-
- tmfree((char*)pids);
- tmfree((char*)infos);
-
- for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) {
- pthread_join(pidsOfSub[i], NULL);
- }
-
- tmfree((char*)pidsOfSub);
- tmfree((char*)infosOfSub);
- taos_close(taos);
- return 0;
-}
-
-void initOfInsertMeta() {
- memset(&g_Dbs, 0, sizeof(SDbs));
-
- // set default values
- strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
- g_Dbs.port = 6030;
- strncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
- strncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
- g_Dbs.threadCount = 2;
- g_Dbs.use_metric = true;
-}
-
-void initOfQueryMeta() {
- memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo));
-
- // set default values
- strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
- g_queryInfo.port = 6030;
- strncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
- strncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
-}
-
-void setParaFromArg(){
- if (g_args.host) {
- strcpy(g_Dbs.host, g_args.host);
- } else {
- strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
- }
-
- if (g_args.user) {
- strcpy(g_Dbs.user, g_args.user);
- }
-
- if (g_args.password) {
- strcpy(g_Dbs.password, g_args.password);
- }
-
- if (g_args.port) {
- g_Dbs.port = g_args.port;
- }
-
- g_Dbs.dbCount = 1;
- g_Dbs.db[0].drop = 1;
-
- strncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
- g_Dbs.db[0].dbCfg.replica = g_args.replica;
- strncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
-
-
- strncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
-
- g_Dbs.use_metric = g_args.use_metric;
- g_Dbs.insert_only = g_args.insert_only;
-
- g_Dbs.db[0].superTblCount = 1;
- strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
- g_Dbs.threadCount = g_args.num_of_threads;
- g_Dbs.threadCountByCreateTbl = 1;
- g_Dbs.queryMode = g_args.mode;
-
- g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
- g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS;
- g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
- g_Dbs.db[0].superTbls[0].insertRate = 0;
- g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
- g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
- strncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
- strncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
- strncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE);
- strncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].timeStampStep = 10;
-
- // g_args.num_of_RPR;
- g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT;
- g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE;
-
- g_Dbs.do_aggreFunc = true;
-
- char dataString[STRING_LEN];
- char **data_type = g_args.datatype;
-
- memset(dataString, 0, STRING_LEN);
-
- if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) {
- g_Dbs.do_aggreFunc = false;
- }
-
- g_Dbs.db[0].superTbls[0].columnCount = 0;
- for (int i = 0; i < MAX_NUM_DATATYPE; i++) {
- if (data_type[i] == NULL) {
- break;
- }
-
- strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
- g_Dbs.db[0].superTbls[0].columnCount++;
- }
-
- if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) {
- g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR;
- } else {
- for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) {
- strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
- g_Dbs.db[0].superTbls[0].columnCount++;
- }
- }
-
- if (g_Dbs.use_metric) {
- strncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
-
- strncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
- g_Dbs.db[0].superTbls[0].tagCount = 2;
- } else {
- g_Dbs.db[0].superTbls[0].tagCount = 0;
- }
-}
-
-/* Function to do regular expression check */
-static int regexMatch(const char *s, const char *reg, int cflags) {
- regex_t regex;
- char msgbuf[100] = {0};
-
- /* Compile regular expression */
- if (regcomp(®ex, reg, cflags) != 0) {
- printf("Fail to compile regex\n");
- exit(-1);
- }
-
- /* Execute regular expression */
- int reti = regexec(®ex, s, 0, NULL, 0);
- if (!reti) {
- regfree(®ex);
- return 1;
- } else if (reti == REG_NOMATCH) {
- regfree(®ex);
- return 0;
- } else {
- regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
- printf("Regex match failed: %s\n", msgbuf);
- regfree(®ex);
- exit(-1);
- }
-
- return 0;
-}
-
-static int isCommentLine(char *line) {
- if (line == NULL) return 1;
-
- return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
-}
-
-void querySqlFile(TAOS* taos, char* sqlFile)
-{
- FILE *fp = fopen(sqlFile, "r");
- if (fp == NULL) {
- printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
- return;
- }
-
- int read_len = 0;
- char * cmd = calloc(1, MAX_SQL_SIZE);
- size_t cmd_len = 0;
- char * line = NULL;
- size_t line_len = 0;
-
- double t = getCurrentTime();
-
- while ((read_len = tgetline(&line, &line_len, fp)) != -1) {
- if (read_len >= MAX_SQL_SIZE) continue;
- line[--read_len] = '\0';
-
- if (read_len == 0 || isCommentLine(line)) { // line starts with #
- continue;
- }
-
- if (line[read_len - 1] == '\\') {
- line[read_len - 1] = ' ';
- memcpy(cmd + cmd_len, line, read_len);
- cmd_len += read_len;
- continue;
- }
-
- memcpy(cmd + cmd_len, line, read_len);
- queryDbExec(taos, cmd, NO_INSERT_TYPE);
- memset(cmd, 0, MAX_SQL_SIZE);
- cmd_len = 0;
- }
-
- t = getCurrentTime() - t;
- printf("run %s took %.6f second(s)\n\n", sqlFile, t);
-
- tmfree(cmd);
- tmfree(line);
- tmfclose(fp);
- return;
-}
-
-int main(int argc, char *argv[]) {
- parse_args(argc, argv, &g_args);
-
- if (g_args.metaFile) {
- initOfInsertMeta();
- initOfQueryMeta();
- if (false == getInfoFromJsonFile(g_args.metaFile)) {
- printf("Failed to read %s\n", g_args.metaFile);
- return 1;
- }
- } else {
-
- memset(&g_Dbs, 0, sizeof(SDbs));
- g_jsonType = INSERT_MODE;
- setParaFromArg();
-
- if (NULL != g_args.sqlFile) {
- TAOS* qtaos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port);
- querySqlFile(qtaos, g_args.sqlFile);
- taos_close(qtaos);
- return 0;
- }
-
- (void)insertTestProcess();
- if (g_Dbs.insert_only) return 0;
-
- // select
-
- //printf("At present, there is no integration of taosdemo, please wait patiently!\n");
- return 0;
- }
-
- if (INSERT_MODE == g_jsonType) {
- if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
- (void)insertTestProcess();
- } else if (QUERY_MODE == g_jsonType) {
- if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
- (void)queryTestProcess();
- } else if (SUBSCRIBE_MODE == g_jsonType) {
- if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
- (void)subscribeTestProcess();
- } else {
- ;
- }
-
- taos_cleanup();
- return 0;
-}
-
diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c
index 0b7b5ca487..538ed37879 100644
--- a/src/os/src/detail/osFile.c
+++ b/src/os/src/detail/osFile.c
@@ -25,7 +25,8 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) {
char tmpPath[PATH_MAX];
int32_t len = strlen(tsTempDir);
memcpy(tmpPath, tsTempDir, len);
-
+ static uint64_t seqId = 0;
+
if (tmpPath[len - 1] != '/') {
tmpPath[len++] = '/';
}
@@ -36,8 +37,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) {
strcat(tmpPath, "-%d-%s");
}
- char rand[8] = {0};
- taosRandStr(rand, tListLen(rand) - 1);
+ char rand[32] = {0};
+
+ sprintf(rand, "%"PRIu64, atomic_add_fetch_64(&seqId, 1));
+
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
}
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index ec898f51cc..13f706af65 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -154,7 +154,9 @@ void httpReleaseContext(HttpContext *pContext, bool clearRes) {
}
if (clearRes) {
- httpClearParser(pContext->parser);
+ if (pContext->parser) {
+ httpClearParser(pContext->parser);
+ }
memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd));
}
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index a31c032bf5..bcc876c953 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -232,6 +232,8 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t s
tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType);
+tSQLExpr *tSqlExprClone(tSQLExpr *pSrc);
+
void tSqlExprDestroy(tSQLExpr *pExpr);
tSQLExprList *tSqlExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pDistinct, SStrToken *pToken);
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index 16492c2666..8a01a736b7 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -674,6 +674,8 @@ expr(A) ::= expr(X) GE expr(Y). {A = tSqlExprCreate(X, Y, TK_GE);}
expr(A) ::= expr(X) NE expr(Y). {A = tSqlExprCreate(X, Y, TK_NE);}
expr(A) ::= expr(X) EQ expr(Y). {A = tSqlExprCreate(X, Y, TK_EQ);}
+expr(A) ::= expr(X) BETWEEN expr(Y) AND expr(Z). { tSQLExpr* X2 = tSqlExprClone(X); A = tSqlExprCreate(tSqlExprCreate(X, Y, TK_GE), tSqlExprCreate(X2, Z, TK_LE), TK_AND);}
+
expr(A) ::= expr(X) AND expr(Y). {A = tSqlExprCreate(X, Y, TK_AND);}
expr(A) ::= expr(X) OR expr(Y). {A = tSqlExprCreate(X, Y, TK_OR); }
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index f7c7f9ddc0..2d5287fb93 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -3687,6 +3687,14 @@ static void spread_function(SQLFunctionCtx *pCtx) {
LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, double, pCtx->inputType, numOfElems);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, float, pCtx->inputType, numOfElems);
+ } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
+ LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint8_t, pCtx->inputType, numOfElems);
+ } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
+ LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint16_t, pCtx->inputType, numOfElems);
+ } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) {
+ LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint32_t, pCtx->inputType, numOfElems);
+ } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) {
+ LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint64_t, pCtx->inputType, numOfElems);
}
if (!pCtx->hasNull) {
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index ec1c2fdb6e..3d3e7295b9 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -5162,6 +5162,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
assert(pQuery->prjInfo.vgroupLimit == -1);
}
+ setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
+
bool hasMoreBlock = true;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
SQueryCostInfo *summary = &pRuntimeEnv->summary;
diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c
index aa05c655c5..07bb307aba 100644
--- a/src/query/src/qParserImpl.c
+++ b/src/query/src/qParserImpl.c
@@ -289,6 +289,28 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
return pExpr;
}
+
+
+tSQLExpr *tSqlExprClone(tSQLExpr *pSrc) {
+ tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr));
+
+ memcpy(pExpr, pSrc, sizeof(*pSrc));
+
+ if (pSrc->pLeft) {
+ pExpr->pLeft = tSqlExprClone(pSrc->pLeft);
+ }
+
+ if (pSrc->pRight) {
+ pExpr->pRight = tSqlExprClone(pSrc->pRight);
+ }
+
+ //we don't clone pParam now because clone is only used for between/and
+ assert(pSrc->pParam == NULL);
+
+ return pExpr;
+}
+
+
void tSqlExprNodeDestroy(tSQLExpr *pExpr) {
if (pExpr == NULL) {
return;
@@ -309,8 +331,9 @@ void tSqlExprDestroy(tSQLExpr *pExpr) {
}
tSqlExprDestroy(pExpr->pLeft);
+ pExpr->pLeft = NULL;
tSqlExprDestroy(pExpr->pRight);
-
+ pExpr->pRight = NULL;
tSqlExprNodeDestroy(pExpr);
}
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index 2e5b66a3f5..2b1109688d 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -136,18 +136,18 @@ typedef union {
#define ParseCTX_FETCH
#define ParseCTX_STORE
#define YYFALLBACK 1
-#define YYNSTATE 290
-#define YYNRULE 253
-#define YYNRULE_WITH_ACTION 253
+#define YYNSTATE 294
+#define YYNRULE 254
+#define YYNRULE_WITH_ACTION 254
#define YYNTOKEN 210
-#define YY_MAX_SHIFT 289
-#define YY_MIN_SHIFTREDUCE 473
-#define YY_MAX_SHIFTREDUCE 725
-#define YY_ERROR_ACTION 726
-#define YY_ACCEPT_ACTION 727
-#define YY_NO_ACTION 728
-#define YY_MIN_REDUCE 729
-#define YY_MAX_REDUCE 981
+#define YY_MAX_SHIFT 293
+#define YY_MIN_SHIFTREDUCE 477
+#define YY_MAX_SHIFTREDUCE 730
+#define YY_ERROR_ACTION 731
+#define YY_ACCEPT_ACTION 732
+#define YY_NO_ACTION 733
+#define YY_MIN_REDUCE 734
+#define YY_MAX_REDUCE 987
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -214,139 +214,142 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (627)
+#define YY_ACTTAB_COUNT (651)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 888, 517, 727, 289, 517, 182, 287, 590, 28, 518,
- /* 10 */ 15, 161, 518, 43, 44, 771, 45, 46, 150, 162,
- /* 20 */ 194, 37, 126, 517, 238, 49, 47, 51, 48, 963,
- /* 30 */ 166, 518, 186, 42, 41, 264, 263, 40, 39, 38,
- /* 40 */ 43, 44, 877, 45, 46, 877, 184, 194, 37, 863,
- /* 50 */ 121, 238, 49, 47, 51, 48, 183, 866, 885, 221,
- /* 60 */ 42, 41, 126, 126, 40, 39, 38, 474, 475, 476,
- /* 70 */ 477, 478, 479, 480, 481, 482, 483, 484, 485, 288,
- /* 80 */ 43, 44, 211, 45, 46, 916, 254, 194, 37, 162,
- /* 90 */ 630, 238, 49, 47, 51, 48, 71, 94, 189, 964,
- /* 100 */ 42, 41, 274, 960, 40, 39, 38, 64, 65, 226,
- /* 110 */ 21, 252, 282, 281, 251, 250, 249, 280, 248, 279,
- /* 120 */ 278, 277, 247, 276, 275, 917, 70, 233, 830, 674,
- /* 130 */ 818, 819, 820, 821, 822, 823, 824, 825, 826, 827,
- /* 140 */ 828, 829, 831, 832, 44, 199, 45, 46, 274, 28,
- /* 150 */ 194, 37, 162, 959, 238, 49, 47, 51, 48, 860,
- /* 160 */ 201, 188, 964, 42, 41, 634, 214, 40, 39, 38,
- /* 170 */ 866, 45, 46, 218, 217, 194, 37, 958, 72, 238,
- /* 180 */ 49, 47, 51, 48, 16, 866, 205, 197, 42, 41,
- /* 190 */ 863, 283, 40, 39, 38, 193, 687, 22, 200, 678,
- /* 200 */ 170, 681, 203, 684, 178, 34, 171, 849, 850, 27,
- /* 210 */ 853, 106, 105, 169, 193, 687, 866, 179, 678, 75,
- /* 220 */ 681, 780, 684, 21, 150, 282, 281, 190, 191, 164,
- /* 230 */ 280, 237, 279, 278, 277, 614, 276, 275, 611, 10,
- /* 240 */ 612, 22, 613, 74, 165, 136, 190, 191, 63, 34,
- /* 250 */ 836, 854, 207, 834, 835, 261, 260, 167, 837, 852,
- /* 260 */ 839, 840, 838, 126, 841, 842, 208, 209, 204, 168,
- /* 270 */ 220, 256, 49, 47, 51, 48, 28, 177, 851, 927,
- /* 280 */ 42, 41, 92, 96, 40, 39, 38, 28, 86, 101,
- /* 290 */ 104, 95, 3, 140, 28, 50, 28, 98, 31, 81,
- /* 300 */ 77, 80, 28, 156, 152, 119, 28, 206, 686, 154,
- /* 310 */ 109, 108, 107, 34, 50, 42, 41, 862, 224, 40,
- /* 320 */ 39, 38, 29, 685, 235, 198, 69, 686, 863, 40,
- /* 330 */ 39, 38, 257, 192, 258, 863, 254, 863, 676, 864,
- /* 340 */ 262, 615, 685, 863, 266, 655, 656, 863, 286, 285,
- /* 350 */ 113, 772, 627, 622, 150, 680, 642, 683, 646, 23,
- /* 360 */ 123, 223, 54, 647, 706, 688, 239, 18, 17, 17,
- /* 370 */ 679, 55, 682, 26, 677, 4, 244, 58, 600, 241,
- /* 380 */ 602, 243, 29, 29, 54, 73, 601, 174, 85, 84,
- /* 390 */ 54, 175, 56, 12, 11, 173, 59, 91, 90, 61,
- /* 400 */ 973, 589, 14, 13, 618, 616, 619, 617, 103, 102,
- /* 410 */ 118, 116, 160, 172, 163, 865, 926, 195, 923, 922,
- /* 420 */ 196, 265, 879, 120, 887, 35, 909, 894, 896, 908,
- /* 430 */ 122, 137, 859, 135, 34, 138, 139, 782, 222, 246,
- /* 440 */ 158, 32, 255, 779, 117, 978, 641, 82, 977, 975,
- /* 450 */ 141, 259, 972, 88, 971, 227, 969, 142, 800, 690,
- /* 460 */ 185, 33, 30, 159, 231, 769, 97, 60, 876, 128,
- /* 470 */ 767, 99, 57, 127, 236, 52, 234, 232, 230, 100,
- /* 480 */ 765, 764, 130, 210, 228, 36, 151, 93, 762, 267,
- /* 490 */ 268, 269, 270, 761, 760, 271, 759, 272, 273, 758,
- /* 500 */ 153, 155, 755, 753, 751, 284, 749, 747, 157, 725,
- /* 510 */ 225, 66, 212, 67, 910, 213, 724, 215, 216, 723,
- /* 520 */ 180, 202, 711, 245, 219, 181, 176, 223, 78, 624,
- /* 530 */ 62, 763, 6, 240, 110, 111, 68, 757, 145, 643,
- /* 540 */ 144, 801, 143, 146, 147, 149, 148, 756, 1, 124,
- /* 550 */ 112, 187, 748, 229, 125, 2, 648, 861, 7, 8,
- /* 560 */ 689, 24, 133, 131, 129, 132, 134, 25, 5, 9,
- /* 570 */ 691, 19, 20, 242, 76, 558, 554, 74, 552, 551,
- /* 580 */ 550, 547, 253, 521, 83, 29, 79, 592, 53, 591,
- /* 590 */ 87, 89, 588, 542, 540, 532, 538, 534, 536, 530,
- /* 600 */ 528, 560, 559, 557, 556, 555, 553, 549, 548, 54,
- /* 610 */ 519, 489, 487, 729, 728, 728, 728, 728, 728, 728,
- /* 620 */ 728, 728, 728, 728, 728, 114, 115,
+ /* 0 */ 74, 521, 732, 293, 521, 165, 186, 291, 28, 522,
+ /* 10 */ 190, 893, 522, 43, 44, 969, 47, 48, 15, 776,
+ /* 20 */ 198, 37, 152, 46, 242, 51, 49, 53, 50, 854,
+ /* 30 */ 855, 27, 858, 42, 41, 871, 128, 40, 39, 38,
+ /* 40 */ 43, 44, 882, 47, 48, 882, 188, 198, 37, 868,
+ /* 50 */ 46, 242, 51, 49, 53, 50, 187, 128, 203, 225,
+ /* 60 */ 42, 41, 979, 165, 40, 39, 38, 43, 44, 890,
+ /* 70 */ 47, 48, 193, 970, 198, 37, 165, 46, 242, 51,
+ /* 80 */ 49, 53, 50, 871, 128, 192, 970, 42, 41, 258,
+ /* 90 */ 521, 40, 39, 38, 290, 289, 115, 239, 522, 71,
+ /* 100 */ 77, 43, 45, 128, 47, 48, 205, 66, 198, 37,
+ /* 110 */ 28, 46, 242, 51, 49, 53, 50, 40, 39, 38,
+ /* 120 */ 921, 42, 41, 278, 65, 40, 39, 38, 865, 678,
+ /* 130 */ 287, 871, 859, 210, 478, 479, 480, 481, 482, 483,
+ /* 140 */ 484, 485, 486, 487, 488, 489, 292, 72, 201, 215,
+ /* 150 */ 44, 868, 47, 48, 856, 871, 198, 37, 209, 46,
+ /* 160 */ 242, 51, 49, 53, 50, 869, 922, 204, 237, 42,
+ /* 170 */ 41, 96, 163, 40, 39, 38, 278, 21, 256, 286,
+ /* 180 */ 285, 255, 254, 253, 284, 252, 283, 282, 281, 251,
+ /* 190 */ 280, 279, 835, 594, 823, 824, 825, 826, 827, 828,
+ /* 200 */ 829, 830, 831, 832, 833, 834, 836, 837, 47, 48,
+ /* 210 */ 87, 86, 198, 37, 28, 46, 242, 51, 49, 53,
+ /* 220 */ 50, 268, 267, 16, 211, 42, 41, 265, 264, 40,
+ /* 230 */ 39, 38, 197, 691, 28, 634, 682, 207, 685, 174,
+ /* 240 */ 688, 22, 42, 41, 73, 175, 40, 39, 38, 34,
+ /* 250 */ 108, 107, 173, 197, 691, 867, 67, 682, 28, 685,
+ /* 260 */ 21, 688, 286, 285, 194, 195, 169, 284, 241, 283,
+ /* 270 */ 282, 281, 202, 280, 279, 868, 618, 28, 60, 615,
+ /* 280 */ 22, 616, 631, 617, 218, 194, 195, 123, 34, 23,
+ /* 290 */ 841, 222, 221, 839, 840, 857, 261, 61, 842, 868,
+ /* 300 */ 844, 845, 843, 208, 846, 847, 260, 212, 213, 224,
+ /* 310 */ 638, 51, 49, 53, 50, 262, 181, 28, 868, 42,
+ /* 320 */ 41, 94, 98, 40, 39, 38, 28, 88, 103, 106,
+ /* 330 */ 97, 10, 52, 3, 142, 76, 100, 138, 680, 31,
+ /* 340 */ 83, 79, 82, 158, 154, 690, 230, 659, 660, 156,
+ /* 350 */ 111, 110, 109, 52, 785, 266, 777, 152, 868, 152,
+ /* 360 */ 689, 626, 121, 684, 270, 687, 690, 868, 196, 227,
+ /* 370 */ 34, 228, 258, 646, 681, 29, 683, 125, 686, 650,
+ /* 380 */ 651, 689, 619, 56, 18, 711, 692, 243, 966, 17,
+ /* 390 */ 17, 57, 604, 245, 606, 247, 29, 29, 56, 75,
+ /* 400 */ 605, 63, 26, 593, 56, 248, 12, 11, 93, 92,
+ /* 410 */ 4, 965, 58, 14, 13, 622, 620, 623, 621, 105,
+ /* 420 */ 104, 120, 118, 932, 964, 182, 183, 167, 168, 170,
+ /* 430 */ 164, 171, 172, 178, 179, 177, 162, 176, 166, 870,
+ /* 440 */ 931, 199, 928, 927, 884, 200, 269, 122, 892, 35,
+ /* 450 */ 899, 901, 124, 139, 864, 914, 140, 141, 913, 137,
+ /* 460 */ 787, 250, 160, 32, 259, 34, 784, 984, 84, 983,
+ /* 470 */ 981, 143, 226, 119, 231, 263, 978, 90, 977, 975,
+ /* 480 */ 694, 144, 645, 805, 189, 235, 62, 881, 129, 33,
+ /* 490 */ 59, 240, 30, 54, 161, 132, 130, 238, 236, 131,
+ /* 500 */ 774, 99, 772, 133, 234, 134, 232, 101, 36, 102,
+ /* 510 */ 770, 95, 769, 271, 272, 214, 153, 767, 766, 765,
+ /* 520 */ 764, 763, 273, 155, 157, 760, 758, 756, 754, 752,
+ /* 530 */ 159, 274, 229, 68, 69, 915, 275, 276, 277, 184,
+ /* 540 */ 206, 249, 730, 185, 180, 288, 80, 216, 217, 768,
+ /* 550 */ 729, 220, 219, 112, 728, 762, 147, 761, 146, 806,
+ /* 560 */ 145, 148, 149, 151, 150, 113, 114, 753, 1, 716,
+ /* 570 */ 2, 223, 227, 628, 64, 6, 866, 244, 70, 647,
+ /* 580 */ 126, 135, 136, 191, 24, 233, 7, 652, 127, 8,
+ /* 590 */ 693, 5, 25, 9, 19, 246, 20, 695, 78, 76,
+ /* 600 */ 562, 558, 556, 555, 554, 551, 525, 257, 81, 85,
+ /* 610 */ 29, 55, 596, 595, 89, 91, 592, 546, 544, 536,
+ /* 620 */ 542, 538, 540, 534, 532, 564, 563, 561, 560, 559,
+ /* 630 */ 557, 553, 552, 56, 523, 493, 491, 734, 733, 733,
+ /* 640 */ 733, 733, 733, 733, 733, 733, 733, 733, 733, 116,
+ /* 650 */ 117,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 213, 1, 210, 211, 1, 212, 213, 5, 213, 9,
- /* 10 */ 270, 270, 9, 13, 14, 217, 16, 17, 220, 270,
- /* 20 */ 20, 21, 213, 1, 24, 25, 26, 27, 28, 280,
- /* 30 */ 270, 9, 230, 33, 34, 33, 34, 37, 38, 39,
+ /* 0 */ 218, 1, 210, 211, 1, 270, 212, 213, 213, 9,
+ /* 10 */ 230, 213, 9, 13, 14, 280, 16, 17, 270, 217,
+ /* 20 */ 20, 21, 220, 23, 24, 25, 26, 27, 28, 247,
+ /* 30 */ 248, 249, 250, 33, 34, 255, 213, 37, 38, 39,
/* 40 */ 13, 14, 253, 16, 17, 253, 251, 20, 21, 254,
- /* 50 */ 213, 24, 25, 26, 27, 28, 267, 255, 271, 267,
- /* 60 */ 33, 34, 213, 213, 37, 38, 39, 45, 46, 47,
- /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 80 */ 13, 14, 60, 16, 17, 276, 77, 20, 21, 270,
- /* 90 */ 37, 24, 25, 26, 27, 28, 256, 74, 279, 280,
- /* 100 */ 33, 34, 79, 270, 37, 38, 39, 107, 268, 272,
- /* 110 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
- /* 120 */ 96, 97, 98, 99, 100, 276, 276, 278, 229, 102,
- /* 130 */ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
- /* 140 */ 241, 242, 243, 244, 14, 230, 16, 17, 79, 213,
- /* 150 */ 20, 21, 270, 270, 24, 25, 26, 27, 28, 213,
- /* 160 */ 230, 279, 280, 33, 34, 112, 131, 37, 38, 39,
- /* 170 */ 255, 16, 17, 138, 139, 20, 21, 270, 218, 24,
- /* 180 */ 25, 26, 27, 28, 44, 255, 66, 251, 33, 34,
- /* 190 */ 254, 230, 37, 38, 39, 1, 2, 101, 252, 5,
- /* 200 */ 60, 7, 66, 9, 270, 109, 66, 247, 248, 249,
- /* 210 */ 250, 71, 72, 73, 1, 2, 255, 270, 5, 218,
- /* 220 */ 7, 217, 9, 86, 220, 88, 89, 33, 34, 270,
- /* 230 */ 93, 37, 95, 96, 97, 2, 99, 100, 5, 101,
- /* 240 */ 7, 101, 9, 105, 270, 107, 33, 34, 218, 109,
- /* 250 */ 229, 250, 132, 232, 233, 135, 136, 270, 237, 0,
- /* 260 */ 239, 240, 241, 213, 243, 244, 33, 34, 132, 270,
- /* 270 */ 130, 135, 25, 26, 27, 28, 213, 137, 248, 246,
- /* 280 */ 33, 34, 61, 62, 37, 38, 39, 213, 67, 68,
- /* 290 */ 69, 70, 61, 62, 213, 101, 213, 76, 67, 68,
- /* 300 */ 69, 70, 213, 61, 62, 101, 213, 213, 114, 67,
- /* 310 */ 68, 69, 70, 109, 101, 33, 34, 254, 102, 37,
- /* 320 */ 38, 39, 106, 129, 274, 251, 276, 114, 254, 37,
- /* 330 */ 38, 39, 251, 59, 251, 254, 77, 254, 1, 245,
- /* 340 */ 251, 108, 129, 254, 251, 120, 121, 254, 63, 64,
- /* 350 */ 65, 217, 106, 102, 220, 5, 102, 7, 102, 113,
- /* 360 */ 106, 110, 106, 102, 102, 102, 15, 106, 106, 106,
- /* 370 */ 5, 106, 7, 101, 37, 101, 104, 106, 102, 102,
- /* 380 */ 102, 102, 106, 106, 106, 106, 102, 270, 133, 134,
- /* 390 */ 106, 270, 127, 133, 134, 270, 125, 133, 134, 101,
- /* 400 */ 255, 103, 133, 134, 5, 5, 7, 7, 74, 75,
- /* 410 */ 61, 62, 270, 270, 270, 255, 246, 246, 246, 246,
- /* 420 */ 246, 246, 253, 213, 213, 269, 277, 213, 213, 277,
- /* 430 */ 213, 213, 213, 257, 109, 213, 213, 213, 253, 213,
- /* 440 */ 213, 213, 213, 213, 59, 213, 114, 213, 213, 213,
- /* 450 */ 213, 213, 213, 213, 213, 273, 213, 213, 213, 108,
- /* 460 */ 273, 213, 213, 213, 273, 213, 213, 124, 266, 264,
- /* 470 */ 213, 213, 126, 265, 118, 123, 122, 117, 116, 213,
- /* 480 */ 213, 213, 262, 213, 115, 128, 213, 85, 213, 84,
- /* 490 */ 49, 81, 83, 213, 213, 53, 213, 82, 80, 213,
- /* 500 */ 213, 213, 213, 213, 213, 77, 213, 213, 213, 5,
- /* 510 */ 214, 214, 140, 214, 214, 5, 5, 140, 5, 5,
- /* 520 */ 214, 214, 87, 214, 131, 214, 214, 110, 218, 102,
- /* 530 */ 111, 214, 101, 104, 215, 215, 106, 214, 222, 102,
- /* 540 */ 226, 228, 227, 225, 223, 221, 224, 214, 219, 101,
- /* 550 */ 215, 1, 214, 101, 101, 216, 102, 253, 119, 119,
- /* 560 */ 102, 106, 259, 261, 263, 260, 258, 106, 101, 101,
- /* 570 */ 108, 101, 101, 104, 74, 9, 5, 105, 5, 5,
- /* 580 */ 5, 5, 15, 78, 134, 106, 74, 5, 16, 5,
- /* 590 */ 134, 134, 102, 5, 5, 5, 5, 5, 5, 5,
- /* 600 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 106,
- /* 610 */ 78, 59, 58, 0, 281, 281, 281, 281, 281, 281,
- /* 620 */ 281, 281, 281, 281, 281, 21, 21, 281, 281, 281,
- /* 630 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
- /* 640 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
- /* 650 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ /* 50 */ 23, 24, 25, 26, 27, 28, 267, 213, 230, 267,
+ /* 60 */ 33, 34, 255, 270, 37, 38, 39, 13, 14, 271,
+ /* 70 */ 16, 17, 279, 280, 20, 21, 270, 23, 24, 25,
+ /* 80 */ 26, 27, 28, 255, 213, 279, 280, 33, 34, 77,
+ /* 90 */ 1, 37, 38, 39, 63, 64, 65, 274, 9, 276,
+ /* 100 */ 218, 13, 14, 213, 16, 17, 230, 107, 20, 21,
+ /* 110 */ 213, 23, 24, 25, 26, 27, 28, 37, 38, 39,
+ /* 120 */ 276, 33, 34, 79, 218, 37, 38, 39, 213, 102,
+ /* 130 */ 230, 255, 250, 213, 45, 46, 47, 48, 49, 50,
+ /* 140 */ 51, 52, 53, 54, 55, 56, 57, 276, 251, 60,
+ /* 150 */ 14, 254, 16, 17, 248, 255, 20, 21, 66, 23,
+ /* 160 */ 24, 25, 26, 27, 28, 245, 276, 252, 278, 33,
+ /* 170 */ 34, 74, 270, 37, 38, 39, 79, 86, 87, 88,
+ /* 180 */ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ /* 190 */ 99, 100, 229, 5, 231, 232, 233, 234, 235, 236,
+ /* 200 */ 237, 238, 239, 240, 241, 242, 243, 244, 16, 17,
+ /* 210 */ 133, 134, 20, 21, 213, 23, 24, 25, 26, 27,
+ /* 220 */ 28, 33, 34, 44, 132, 33, 34, 135, 136, 37,
+ /* 230 */ 38, 39, 1, 2, 213, 37, 5, 66, 7, 60,
+ /* 240 */ 9, 101, 33, 34, 256, 66, 37, 38, 39, 109,
+ /* 250 */ 71, 72, 73, 1, 2, 254, 268, 5, 213, 7,
+ /* 260 */ 86, 9, 88, 89, 33, 34, 270, 93, 37, 95,
+ /* 270 */ 96, 97, 251, 99, 100, 254, 2, 213, 106, 5,
+ /* 280 */ 101, 7, 106, 9, 131, 33, 34, 213, 109, 113,
+ /* 290 */ 229, 138, 139, 232, 233, 0, 251, 125, 237, 254,
+ /* 300 */ 239, 240, 241, 132, 243, 244, 135, 33, 34, 130,
+ /* 310 */ 112, 25, 26, 27, 28, 251, 137, 213, 254, 33,
+ /* 320 */ 34, 61, 62, 37, 38, 39, 213, 67, 68, 69,
+ /* 330 */ 70, 101, 101, 61, 62, 105, 76, 107, 1, 67,
+ /* 340 */ 68, 69, 70, 61, 62, 114, 272, 120, 121, 67,
+ /* 350 */ 68, 69, 70, 101, 217, 251, 217, 220, 254, 220,
+ /* 360 */ 129, 102, 101, 5, 251, 7, 114, 254, 59, 110,
+ /* 370 */ 109, 102, 77, 102, 37, 106, 5, 106, 7, 102,
+ /* 380 */ 102, 129, 108, 106, 106, 102, 102, 15, 270, 106,
+ /* 390 */ 106, 106, 102, 102, 102, 102, 106, 106, 106, 106,
+ /* 400 */ 102, 101, 101, 103, 106, 104, 133, 134, 133, 134,
+ /* 410 */ 101, 270, 127, 133, 134, 5, 5, 7, 7, 74,
+ /* 420 */ 75, 61, 62, 246, 270, 270, 270, 270, 270, 270,
+ /* 430 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 255,
+ /* 440 */ 246, 246, 246, 246, 253, 246, 246, 213, 213, 269,
+ /* 450 */ 213, 213, 213, 213, 213, 277, 213, 213, 277, 257,
+ /* 460 */ 213, 213, 213, 213, 213, 109, 213, 213, 213, 213,
+ /* 470 */ 213, 213, 253, 59, 273, 213, 213, 213, 213, 213,
+ /* 480 */ 108, 213, 114, 213, 273, 273, 124, 266, 265, 213,
+ /* 490 */ 126, 118, 213, 123, 213, 262, 264, 122, 117, 263,
+ /* 500 */ 213, 213, 213, 261, 116, 260, 115, 213, 128, 213,
+ /* 510 */ 213, 85, 213, 84, 49, 213, 213, 213, 213, 213,
+ /* 520 */ 213, 213, 81, 213, 213, 213, 213, 213, 213, 213,
+ /* 530 */ 213, 83, 214, 214, 214, 214, 53, 82, 80, 214,
+ /* 540 */ 214, 214, 5, 214, 214, 77, 218, 140, 5, 214,
+ /* 550 */ 5, 5, 140, 215, 5, 214, 222, 214, 226, 228,
+ /* 560 */ 227, 225, 223, 221, 224, 215, 215, 214, 219, 87,
+ /* 570 */ 216, 131, 110, 102, 111, 101, 253, 104, 106, 102,
+ /* 580 */ 101, 259, 258, 1, 106, 101, 119, 102, 101, 119,
+ /* 590 */ 102, 101, 106, 101, 101, 104, 101, 108, 74, 105,
+ /* 600 */ 9, 5, 5, 5, 5, 5, 78, 15, 74, 134,
+ /* 610 */ 106, 16, 5, 5, 134, 134, 102, 5, 5, 5,
+ /* 620 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ /* 630 */ 5, 5, 5, 106, 78, 59, 58, 0, 281, 281,
+ /* 640 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 21,
+ /* 650 */ 21, 281, 281, 281, 281, 281, 281, 281, 281, 281,
/* 660 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
/* 670 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
/* 680 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
@@ -364,93 +367,99 @@ static const YYCODETYPE yy_lookahead[] = {
/* 800 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
/* 810 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
/* 820 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
- /* 830 */ 281, 281, 281, 281, 281, 281, 281,
+ /* 830 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ /* 840 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ /* 850 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ /* 860 */ 281,
};
-#define YY_SHIFT_COUNT (289)
+#define YY_SHIFT_COUNT (293)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (613)
+#define YY_SHIFT_MAX (637)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 140, 24, 137, 9, 194, 213, 3, 3, 3, 3,
- /* 10 */ 3, 3, 3, 3, 3, 0, 22, 213, 233, 233,
- /* 20 */ 233, 233, 96, 3, 3, 3, 3, 259, 3, 3,
- /* 30 */ 23, 9, 69, 69, 627, 213, 213, 213, 213, 213,
- /* 40 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
- /* 50 */ 213, 213, 213, 233, 233, 2, 2, 2, 2, 2,
- /* 60 */ 2, 2, 204, 3, 3, 53, 3, 3, 3, 225,
- /* 70 */ 225, 246, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0 */ 179, 91, 174, 12, 231, 252, 3, 3, 3, 3,
+ /* 10 */ 3, 3, 3, 3, 3, 0, 89, 252, 274, 274,
+ /* 20 */ 274, 274, 140, 3, 3, 3, 3, 295, 3, 3,
+ /* 30 */ 97, 12, 44, 44, 651, 252, 252, 252, 252, 252,
+ /* 40 */ 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+ /* 50 */ 252, 252, 252, 252, 252, 274, 274, 188, 188, 188,
+ /* 60 */ 188, 188, 188, 188, 261, 3, 3, 198, 3, 3,
+ /* 70 */ 3, 227, 227, 176, 3, 3, 3, 3, 3, 3,
/* 80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
/* 90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
/* 100 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 325,
- /* 120 */ 385, 385, 385, 332, 332, 332, 385, 343, 346, 352,
- /* 130 */ 356, 354, 360, 362, 369, 357, 325, 385, 385, 385,
- /* 140 */ 9, 385, 385, 402, 405, 441, 410, 409, 442, 415,
- /* 150 */ 418, 385, 428, 385, 428, 385, 428, 385, 627, 627,
- /* 160 */ 27, 67, 67, 67, 130, 155, 247, 247, 247, 221,
- /* 170 */ 231, 242, 282, 282, 282, 282, 120, 35, 292, 292,
- /* 180 */ 138, 136, 285, 251, 216, 254, 256, 261, 262, 263,
- /* 190 */ 350, 365, 337, 274, 351, 265, 271, 276, 277, 278,
- /* 200 */ 279, 284, 272, 255, 260, 264, 298, 269, 399, 400,
- /* 210 */ 334, 349, 504, 372, 510, 511, 377, 513, 514, 435,
- /* 220 */ 393, 417, 427, 419, 429, 431, 430, 437, 448, 550,
- /* 230 */ 452, 454, 453, 455, 439, 461, 440, 458, 467, 462,
- /* 240 */ 468, 429, 470, 469, 471, 472, 500, 566, 571, 573,
- /* 250 */ 574, 575, 576, 505, 567, 512, 450, 479, 479, 572,
- /* 260 */ 456, 457, 479, 582, 584, 490, 479, 588, 589, 590,
- /* 270 */ 591, 592, 593, 594, 595, 596, 597, 598, 599, 600,
- /* 280 */ 601, 602, 603, 503, 532, 604, 605, 552, 554, 613,
+ /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 120 */ 3, 356, 414, 414, 414, 368, 368, 368, 414, 362,
+ /* 130 */ 364, 370, 373, 375, 381, 388, 391, 380, 356, 414,
+ /* 140 */ 414, 414, 12, 414, 414, 426, 429, 465, 441, 448,
+ /* 150 */ 483, 455, 458, 414, 468, 414, 468, 414, 468, 414,
+ /* 160 */ 651, 651, 27, 54, 88, 54, 54, 136, 192, 286,
+ /* 170 */ 286, 286, 286, 260, 272, 282, 209, 209, 209, 209,
+ /* 180 */ 92, 153, 80, 80, 230, 171, 31, 259, 269, 271,
+ /* 190 */ 277, 278, 283, 284, 358, 371, 337, 309, 372, 285,
+ /* 200 */ 172, 290, 291, 292, 293, 298, 301, 77, 273, 275,
+ /* 210 */ 300, 280, 410, 411, 345, 360, 537, 407, 543, 545,
+ /* 220 */ 412, 546, 549, 482, 440, 462, 471, 463, 473, 474,
+ /* 230 */ 472, 477, 479, 582, 484, 485, 487, 478, 467, 486,
+ /* 240 */ 470, 488, 490, 489, 492, 473, 493, 491, 495, 494,
+ /* 250 */ 524, 591, 596, 597, 598, 599, 600, 528, 592, 534,
+ /* 260 */ 475, 504, 504, 595, 480, 481, 504, 607, 608, 514,
+ /* 270 */ 504, 612, 613, 614, 615, 616, 617, 618, 619, 620,
+ /* 280 */ 621, 622, 623, 624, 625, 626, 627, 527, 556, 628,
+ /* 290 */ 629, 576, 578, 637,
};
-#define YY_REDUCE_COUNT (159)
-#define YY_REDUCE_MIN (-260)
-#define YY_REDUCE_MAX (339)
+#define YY_REDUCE_COUNT (161)
+#define YY_REDUCE_MIN (-265)
+#define YY_REDUCE_MAX (354)
static const short yy_reduce_ofst[] = {
- /* 0 */ -208, -101, 21, -40, -181, -118, -205, -151, 50, -64,
- /* 10 */ 74, 81, 83, 89, 93, -213, -207, -251, -198, -85,
- /* 20 */ -70, -39, -211, -163, -191, -150, -54, 1, 94, 63,
- /* 30 */ -202, 30, 4, 134, -160, -260, -259, -240, -167, -117,
- /* 40 */ -93, -66, -53, -41, -26, -13, -1, 117, 121, 125,
- /* 50 */ 142, 143, 144, 145, 160, 33, 170, 171, 172, 173,
- /* 60 */ 174, 175, 169, 210, 211, 156, 214, 215, 217, 149,
- /* 70 */ 152, 176, 218, 219, 222, 223, 224, 226, 227, 228,
- /* 80 */ 229, 230, 232, 234, 235, 236, 237, 238, 239, 240,
- /* 90 */ 241, 243, 244, 245, 248, 249, 250, 252, 253, 257,
- /* 100 */ 258, 266, 267, 268, 270, 273, 275, 280, 281, 283,
- /* 110 */ 286, 287, 288, 289, 290, 291, 293, 294, 295, 185,
- /* 120 */ 296, 297, 299, 182, 187, 191, 300, 202, 208, 205,
- /* 130 */ 301, 220, 302, 305, 303, 308, 304, 306, 307, 309,
- /* 140 */ 310, 311, 312, 313, 315, 314, 316, 318, 321, 322,
- /* 150 */ 324, 317, 319, 323, 320, 333, 335, 338, 329, 339,
+ /* 0 */ -208, -37, 61, -218, -207, -194, -205, -110, -177, -103,
+ /* 10 */ 21, 45, 64, 104, 113, -202, -206, -265, -220, -172,
+ /* 20 */ -124, -100, -211, 74, -156, -129, -85, -118, -80, 1,
+ /* 30 */ -198, -94, 137, 139, -12, -252, -98, -4, 118, 141,
+ /* 40 */ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+ /* 50 */ 164, 165, 166, 167, 168, -193, 184, 177, 194, 195,
+ /* 60 */ 196, 197, 199, 200, 191, 234, 235, 180, 237, 238,
+ /* 70 */ 239, 178, 181, 202, 240, 241, 243, 244, 247, 248,
+ /* 80 */ 249, 250, 251, 253, 254, 255, 256, 257, 258, 262,
+ /* 90 */ 263, 264, 265, 266, 268, 270, 276, 279, 281, 287,
+ /* 100 */ 288, 289, 294, 296, 297, 299, 302, 303, 304, 305,
+ /* 110 */ 306, 307, 308, 310, 311, 312, 313, 314, 315, 316,
+ /* 120 */ 317, 219, 318, 319, 320, 201, 211, 212, 321, 221,
+ /* 130 */ 223, 232, 236, 233, 242, 245, 322, 324, 323, 325,
+ /* 140 */ 326, 327, 328, 329, 330, 331, 333, 332, 334, 336,
+ /* 150 */ 339, 340, 342, 335, 338, 341, 350, 343, 351, 353,
+ /* 160 */ 349, 354,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 726, 781, 770, 778, 966, 966, 726, 726, 726, 726,
- /* 10 */ 726, 726, 726, 726, 726, 889, 744, 966, 726, 726,
- /* 20 */ 726, 726, 726, 726, 726, 726, 726, 778, 726, 726,
- /* 30 */ 783, 778, 783, 783, 884, 726, 726, 726, 726, 726,
- /* 40 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 50 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 60 */ 726, 726, 726, 726, 726, 891, 893, 895, 726, 913,
- /* 70 */ 913, 882, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 80 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 90 */ 726, 726, 726, 726, 726, 726, 726, 768, 726, 766,
- /* 100 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 110 */ 726, 726, 726, 754, 726, 726, 726, 726, 726, 726,
- /* 120 */ 746, 746, 746, 726, 726, 726, 746, 920, 924, 918,
- /* 130 */ 906, 914, 905, 901, 900, 928, 726, 746, 746, 746,
- /* 140 */ 778, 746, 746, 799, 797, 795, 787, 793, 789, 791,
- /* 150 */ 785, 746, 776, 746, 776, 746, 776, 746, 817, 833,
- /* 160 */ 726, 929, 965, 919, 955, 954, 961, 953, 952, 726,
- /* 170 */ 726, 726, 948, 949, 951, 950, 726, 726, 957, 956,
- /* 180 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 190 */ 726, 726, 726, 931, 726, 925, 921, 726, 726, 726,
- /* 200 */ 726, 726, 726, 726, 726, 726, 843, 726, 726, 726,
- /* 210 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 220 */ 726, 881, 726, 726, 726, 726, 892, 726, 726, 726,
- /* 230 */ 726, 726, 726, 915, 726, 907, 726, 726, 726, 726,
- /* 240 */ 726, 855, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 250 */ 726, 726, 726, 726, 726, 726, 726, 976, 974, 726,
- /* 260 */ 726, 726, 970, 726, 726, 726, 968, 726, 726, 726,
- /* 270 */ 726, 726, 726, 726, 726, 726, 726, 726, 726, 726,
- /* 280 */ 726, 726, 726, 802, 726, 752, 750, 726, 742, 726,
+ /* 0 */ 731, 786, 775, 783, 972, 972, 731, 731, 731, 731,
+ /* 10 */ 731, 731, 731, 731, 731, 894, 749, 972, 731, 731,
+ /* 20 */ 731, 731, 731, 731, 731, 731, 731, 783, 731, 731,
+ /* 30 */ 788, 783, 788, 788, 889, 731, 731, 731, 731, 731,
+ /* 40 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 50 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 60 */ 731, 731, 731, 731, 731, 731, 731, 896, 898, 900,
+ /* 70 */ 731, 918, 918, 887, 731, 731, 731, 731, 731, 731,
+ /* 80 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 90 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 773,
+ /* 100 */ 731, 771, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 110 */ 731, 731, 731, 731, 731, 759, 731, 731, 731, 731,
+ /* 120 */ 731, 731, 751, 751, 751, 731, 731, 731, 751, 925,
+ /* 130 */ 929, 923, 911, 919, 910, 906, 905, 933, 731, 751,
+ /* 140 */ 751, 751, 783, 751, 751, 804, 802, 800, 792, 798,
+ /* 150 */ 794, 796, 790, 751, 781, 751, 781, 751, 781, 751,
+ /* 160 */ 822, 838, 731, 934, 731, 971, 924, 961, 960, 967,
+ /* 170 */ 959, 958, 957, 731, 731, 731, 953, 954, 956, 955,
+ /* 180 */ 731, 731, 963, 962, 731, 731, 731, 731, 731, 731,
+ /* 190 */ 731, 731, 731, 731, 731, 731, 731, 936, 731, 930,
+ /* 200 */ 926, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 210 */ 848, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 220 */ 731, 731, 731, 731, 731, 886, 731, 731, 731, 731,
+ /* 230 */ 897, 731, 731, 731, 731, 731, 731, 920, 731, 912,
+ /* 240 */ 731, 731, 731, 731, 731, 860, 731, 731, 731, 731,
+ /* 250 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 260 */ 731, 982, 980, 731, 731, 731, 976, 731, 731, 731,
+ /* 270 */ 974, 731, 731, 731, 731, 731, 731, 731, 731, 731,
+ /* 280 */ 731, 731, 731, 731, 731, 731, 731, 807, 731, 757,
+ /* 290 */ 755, 731, 747, 731,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -1280,34 +1289,35 @@ static const char *const yyRuleName[] = {
/* 222 */ "expr ::= expr GE expr",
/* 223 */ "expr ::= expr NE expr",
/* 224 */ "expr ::= expr EQ expr",
- /* 225 */ "expr ::= expr AND expr",
- /* 226 */ "expr ::= expr OR expr",
- /* 227 */ "expr ::= expr PLUS expr",
- /* 228 */ "expr ::= expr MINUS expr",
- /* 229 */ "expr ::= expr STAR expr",
- /* 230 */ "expr ::= expr SLASH expr",
- /* 231 */ "expr ::= expr REM expr",
- /* 232 */ "expr ::= expr LIKE expr",
- /* 233 */ "expr ::= expr IN LP exprlist RP",
- /* 234 */ "exprlist ::= exprlist COMMA expritem",
- /* 235 */ "exprlist ::= expritem",
- /* 236 */ "expritem ::= expr",
- /* 237 */ "expritem ::=",
- /* 238 */ "cmd ::= RESET QUERY CACHE",
- /* 239 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
- /* 240 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
- /* 241 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
- /* 242 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
- /* 243 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
- /* 244 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
- /* 245 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist",
- /* 246 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids",
- /* 247 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist",
- /* 248 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids",
- /* 249 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids",
- /* 250 */ "cmd ::= KILL CONNECTION INTEGER",
- /* 251 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
- /* 252 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
+ /* 225 */ "expr ::= expr BETWEEN expr AND expr",
+ /* 226 */ "expr ::= expr AND expr",
+ /* 227 */ "expr ::= expr OR expr",
+ /* 228 */ "expr ::= expr PLUS expr",
+ /* 229 */ "expr ::= expr MINUS expr",
+ /* 230 */ "expr ::= expr STAR expr",
+ /* 231 */ "expr ::= expr SLASH expr",
+ /* 232 */ "expr ::= expr REM expr",
+ /* 233 */ "expr ::= expr LIKE expr",
+ /* 234 */ "expr ::= expr IN LP exprlist RP",
+ /* 235 */ "exprlist ::= exprlist COMMA expritem",
+ /* 236 */ "exprlist ::= expritem",
+ /* 237 */ "expritem ::= expr",
+ /* 238 */ "expritem ::=",
+ /* 239 */ "cmd ::= RESET QUERY CACHE",
+ /* 240 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
+ /* 241 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
+ /* 242 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
+ /* 243 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
+ /* 244 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
+ /* 245 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
+ /* 246 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist",
+ /* 247 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids",
+ /* 248 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist",
+ /* 249 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids",
+ /* 250 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids",
+ /* 251 */ "cmd ::= KILL CONNECTION INTEGER",
+ /* 252 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
+ /* 253 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
};
#endif /* NDEBUG */
@@ -1992,34 +2002,35 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
270, /* (222) expr ::= expr GE expr */
270, /* (223) expr ::= expr NE expr */
270, /* (224) expr ::= expr EQ expr */
- 270, /* (225) expr ::= expr AND expr */
- 270, /* (226) expr ::= expr OR expr */
- 270, /* (227) expr ::= expr PLUS expr */
- 270, /* (228) expr ::= expr MINUS expr */
- 270, /* (229) expr ::= expr STAR expr */
- 270, /* (230) expr ::= expr SLASH expr */
- 270, /* (231) expr ::= expr REM expr */
- 270, /* (232) expr ::= expr LIKE expr */
- 270, /* (233) expr ::= expr IN LP exprlist RP */
- 279, /* (234) exprlist ::= exprlist COMMA expritem */
- 279, /* (235) exprlist ::= expritem */
- 280, /* (236) expritem ::= expr */
- 280, /* (237) expritem ::= */
- 211, /* (238) cmd ::= RESET QUERY CACHE */
- 211, /* (239) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- 211, /* (240) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- 211, /* (241) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- 211, /* (242) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- 211, /* (243) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- 211, /* (244) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- 211, /* (245) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- 211, /* (246) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- 211, /* (247) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- 211, /* (248) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- 211, /* (249) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- 211, /* (250) cmd ::= KILL CONNECTION INTEGER */
- 211, /* (251) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- 211, /* (252) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ 270, /* (225) expr ::= expr BETWEEN expr AND expr */
+ 270, /* (226) expr ::= expr AND expr */
+ 270, /* (227) expr ::= expr OR expr */
+ 270, /* (228) expr ::= expr PLUS expr */
+ 270, /* (229) expr ::= expr MINUS expr */
+ 270, /* (230) expr ::= expr STAR expr */
+ 270, /* (231) expr ::= expr SLASH expr */
+ 270, /* (232) expr ::= expr REM expr */
+ 270, /* (233) expr ::= expr LIKE expr */
+ 270, /* (234) expr ::= expr IN LP exprlist RP */
+ 279, /* (235) exprlist ::= exprlist COMMA expritem */
+ 279, /* (236) exprlist ::= expritem */
+ 280, /* (237) expritem ::= expr */
+ 280, /* (238) expritem ::= */
+ 211, /* (239) cmd ::= RESET QUERY CACHE */
+ 211, /* (240) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ 211, /* (241) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ 211, /* (242) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ 211, /* (243) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ 211, /* (244) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ 211, /* (245) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ 211, /* (246) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ 211, /* (247) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ 211, /* (248) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ 211, /* (249) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ 211, /* (250) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ 211, /* (251) cmd ::= KILL CONNECTION INTEGER */
+ 211, /* (252) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ 211, /* (253) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
@@ -2250,34 +2261,35 @@ static const signed char yyRuleInfoNRhs[] = {
-3, /* (222) expr ::= expr GE expr */
-3, /* (223) expr ::= expr NE expr */
-3, /* (224) expr ::= expr EQ expr */
- -3, /* (225) expr ::= expr AND expr */
- -3, /* (226) expr ::= expr OR expr */
- -3, /* (227) expr ::= expr PLUS expr */
- -3, /* (228) expr ::= expr MINUS expr */
- -3, /* (229) expr ::= expr STAR expr */
- -3, /* (230) expr ::= expr SLASH expr */
- -3, /* (231) expr ::= expr REM expr */
- -3, /* (232) expr ::= expr LIKE expr */
- -5, /* (233) expr ::= expr IN LP exprlist RP */
- -3, /* (234) exprlist ::= exprlist COMMA expritem */
- -1, /* (235) exprlist ::= expritem */
- -1, /* (236) expritem ::= expr */
- 0, /* (237) expritem ::= */
- -3, /* (238) cmd ::= RESET QUERY CACHE */
- -7, /* (239) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- -7, /* (240) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- -7, /* (241) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- -7, /* (242) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- -8, /* (243) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- -9, /* (244) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- -7, /* (245) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- -7, /* (246) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- -7, /* (247) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- -7, /* (248) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- -8, /* (249) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- -3, /* (250) cmd ::= KILL CONNECTION INTEGER */
- -5, /* (251) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- -5, /* (252) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ -5, /* (225) expr ::= expr BETWEEN expr AND expr */
+ -3, /* (226) expr ::= expr AND expr */
+ -3, /* (227) expr ::= expr OR expr */
+ -3, /* (228) expr ::= expr PLUS expr */
+ -3, /* (229) expr ::= expr MINUS expr */
+ -3, /* (230) expr ::= expr STAR expr */
+ -3, /* (231) expr ::= expr SLASH expr */
+ -3, /* (232) expr ::= expr REM expr */
+ -3, /* (233) expr ::= expr LIKE expr */
+ -5, /* (234) expr ::= expr IN LP exprlist RP */
+ -3, /* (235) exprlist ::= exprlist COMMA expritem */
+ -1, /* (236) exprlist ::= expritem */
+ -1, /* (237) expritem ::= expr */
+ 0, /* (238) expritem ::= */
+ -3, /* (239) cmd ::= RESET QUERY CACHE */
+ -7, /* (240) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ -7, /* (241) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ -7, /* (242) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ -7, /* (243) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ -8, /* (244) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ -9, /* (245) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ -7, /* (246) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ -7, /* (247) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ -7, /* (248) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ -7, /* (249) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ -8, /* (250) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ -3, /* (251) cmd ::= KILL CONNECTION INTEGER */
+ -5, /* (252) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ -5, /* (253) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -3049,7 +3061,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 189: /* having_opt ::= */
case 199: /* where_opt ::= */ yytestcase(yyruleno==199);
- case 237: /* expritem ::= */ yytestcase(yyruleno==237);
+ case 238: /* expritem ::= */ yytestcase(yyruleno==238);
{yymsp[1].minor.yy326 = 0;}
break;
case 190: /* having_opt ::= HAVING expr */
@@ -3166,65 +3178,69 @@ static YYACTIONTYPE yy_reduce(
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_EQ);}
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 225: /* expr ::= expr AND expr */
+ case 225: /* expr ::= expr BETWEEN expr AND expr */
+{ tSQLExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy326); yylhsminor.yy326 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy326, yymsp[-2].minor.yy326, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy326, TK_LE), TK_AND);}
+ yymsp[-4].minor.yy326 = yylhsminor.yy326;
+ break;
+ case 226: /* expr ::= expr AND expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_AND);}
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 226: /* expr ::= expr OR expr */
+ case 227: /* expr ::= expr OR expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_OR); }
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 227: /* expr ::= expr PLUS expr */
+ case 228: /* expr ::= expr PLUS expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_PLUS); }
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 228: /* expr ::= expr MINUS expr */
+ case 229: /* expr ::= expr MINUS expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_MINUS); }
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 229: /* expr ::= expr STAR expr */
+ case 230: /* expr ::= expr STAR expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_STAR); }
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 230: /* expr ::= expr SLASH expr */
+ case 231: /* expr ::= expr SLASH expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_DIVIDE);}
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 231: /* expr ::= expr REM expr */
+ case 232: /* expr ::= expr REM expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_REM); }
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 232: /* expr ::= expr LIKE expr */
+ case 233: /* expr ::= expr LIKE expr */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_LIKE); }
yymsp[-2].minor.yy326 = yylhsminor.yy326;
break;
- case 233: /* expr ::= expr IN LP exprlist RP */
+ case 234: /* expr ::= expr IN LP exprlist RP */
{yylhsminor.yy326 = tSqlExprCreate(yymsp[-4].minor.yy326, (tSQLExpr*)yymsp[-1].minor.yy522, TK_IN); }
yymsp[-4].minor.yy326 = yylhsminor.yy326;
break;
- case 234: /* exprlist ::= exprlist COMMA expritem */
+ case 235: /* exprlist ::= exprlist COMMA expritem */
{yylhsminor.yy522 = tSqlExprListAppend(yymsp[-2].minor.yy522,yymsp[0].minor.yy326,0, 0);}
yymsp[-2].minor.yy522 = yylhsminor.yy522;
break;
- case 235: /* exprlist ::= expritem */
+ case 236: /* exprlist ::= expritem */
{yylhsminor.yy522 = tSqlExprListAppend(0,yymsp[0].minor.yy326,0, 0);}
yymsp[0].minor.yy522 = yylhsminor.yy522;
break;
- case 236: /* expritem ::= expr */
+ case 237: /* expritem ::= expr */
{yylhsminor.yy326 = yymsp[0].minor.yy326;}
yymsp[0].minor.yy326 = yylhsminor.yy326;
break;
- case 238: /* cmd ::= RESET QUERY CACHE */
+ case 239: /* cmd ::= RESET QUERY CACHE */
{ setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
break;
- case 239: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ case 240: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 240: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ case 241: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3235,14 +3251,14 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 241: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ case 242: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 242: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ case 243: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3253,7 +3269,7 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 243: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ case 244: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -3267,7 +3283,7 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 244: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ case 245: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
{
yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n;
@@ -3279,14 +3295,14 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 245: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ case 246: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 246: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ case 247: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3297,14 +3313,14 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 247: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ case 248: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 248: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ case 249: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3315,7 +3331,7 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 249: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ case 250: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -3329,13 +3345,13 @@ static YYACTIONTYPE yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 250: /* cmd ::= KILL CONNECTION INTEGER */
+ case 251: /* cmd ::= KILL CONNECTION INTEGER */
{setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);}
break;
- case 251: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ case 252: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);}
break;
- case 252: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ case 253: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);}
break;
default:
diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c
index ec4bbb33a5..be4073760d 100644
--- a/src/sync/src/syncRetrieve.c
+++ b/src/sync/src/syncRetrieve.c
@@ -170,14 +170,14 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead) {
return sizeof(SWalHead) + pHead->len;
}
-static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset) {
+static int64_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset) {
int32_t sfd = open(name, O_RDONLY | O_BINARY);
if (sfd < 0) {
sError("%s, failed to open wal:%s for retrieve since:%s", pPeer->id, name, tstrerror(errno));
return -1;
}
- int32_t code = (int32_t)taosLSeek(sfd, offset, SEEK_SET);
+ int64_t code = taosLSeek(sfd, offset, SEEK_SET);
if (code < 0) {
sError("%s, failed to seek %" PRId64 " in wal:%s for retrieve since:%s", pPeer->id, offset, name, tstrerror(errno));
close(sfd);
@@ -187,7 +187,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi
sDebug("%s, retrieve last wal:%s, offset:%" PRId64 " fver:%" PRIu64, pPeer->id, name, offset, fversion);
SWalHead *pHead = malloc(SYNC_MAX_SIZE);
- int32_t bytes = 0;
+ int64_t bytes = 0;
while (1) {
code = syncReadOneWalRecord(sfd, pHead);
@@ -198,13 +198,13 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi
if (code == 0) {
code = bytes;
- sDebug("%s, read to the end of wal, bytes:%d", pPeer->id, bytes);
+ sDebug("%s, read to the end of wal, bytes:%" PRId64, pPeer->id, bytes);
break;
}
- sDebug("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version);
+ sTrace("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version);
- int32_t wsize = code;
+ int32_t wsize = (int32_t)code;
int32_t ret = taosWriteMsg(pPeer->syncFd, pHead, wsize);
if (ret != wsize) {
code = -1;
@@ -228,7 +228,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi
return code;
}
-static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) {
+static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) {
SSyncNode *pNode = pPeer->pSyncNode;
int32_t once = 0; // last WAL has once ever been processed
int64_t offset = 0;
@@ -243,9 +243,9 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index)
if (syncAreFilesModified(pNode, pPeer)) return -1;
if (syncGetWalVersion(pNode, pPeer) < 0) return -1;
- int32_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset);
+ int64_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset);
if (bytes < 0) {
- sDebug("%s, failed to retrieve last wal", pPeer->id);
+ sDebug("%s, failed to retrieve last wal, bytes:%" PRId64, pPeer->id, bytes);
return bytes;
}
@@ -263,7 +263,7 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index)
// if all data up to fversion is read out, it is over
if (pPeer->sversion >= fversion && fversion > 0) {
- sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%d sver:%" PRIu64, pPeer->id, fversion, bytes,
+ sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%" PRId64 " sver:%" PRIu64, pPeer->id, fversion, bytes,
pPeer->sversion);
return 0;
}
@@ -277,19 +277,19 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index)
// if bytes > 0, file is updated, or fversion is not reached but file still open, read again
once = 1;
offset += bytes;
- sDebug("%s, continue retrieve last wal, bytes:%d offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id,
+ sDebug("%s, continue retrieve last wal, bytes:%" PRId64 " offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id,
bytes, offset, pPeer->sversion, fversion);
}
return -1;
}
-static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
+static int64_t syncRetrieveWal(SSyncPeer *pPeer) {
SSyncNode * pNode = pPeer->pSyncNode;
char fname[TSDB_FILENAME_LEN * 3];
char wname[TSDB_FILENAME_LEN * 2];
int32_t size;
- int32_t code = -1;
+ int64_t code = -1;
int64_t index = 0;
while (1) {
@@ -297,7 +297,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
wname[0] = 0;
code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &index);
if (code < 0) {
- sError("%s, failed to get wal info since:%s, code:0x%x", pPeer->id, strerror(errno), code);
+ sError("%s, failed to get wal info since:%s, code:0x%" PRIx64, pPeer->id, strerror(errno), code);
break;
}
@@ -309,6 +309,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
if (code == 0) { // last wal
code = syncProcessLastWal(pPeer, wname, index);
+ sInfo("%s, last wal processed, code:%" PRId64, pPeer->id, code);
break;
}
@@ -319,7 +320,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
struct stat fstat;
if (stat(fname, &fstat) < 0) {
code = -1;
- sDebug("%s, failed to stat wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code);
+ sDebug("%s, failed to stat wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code);
break;
}
@@ -329,14 +330,14 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
int32_t sfd = open(fname, O_RDONLY | O_BINARY);
if (sfd < 0) {
code = -1;
- sError("%s, failed to open wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code);
+ sError("%s, failed to open wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code);
break;
}
code = (int32_t)taosSendFile(pPeer->syncFd, sfd, NULL, size);
close(sfd);
if (code < 0) {
- sError("%s, failed to send wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code);
+ sError("%s, failed to send wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code);
break;
}
@@ -357,7 +358,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
code = -1;
}
} else {
- sError("%s, failed to send wal since %s, code:0x%x", pPeer->id, strerror(errno), code);
+ sError("%s, failed to send wal since %s, code:0x%" PRIx64, pPeer->id, strerror(errno), code);
}
return code;
@@ -404,9 +405,9 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
if (pPeer->sversion == 0) pPeer->sversion = 1;
sInfo("%s, start to retrieve wals", pPeer->id);
- int32_t code = syncRetrieveWal(pPeer);
- if (code != 0) {
- sError("%s, failed to retrieve wals, code:0x%x", pPeer->id, code);
+ int64_t code = syncRetrieveWal(pPeer);
+ if (code < 0) {
+ sError("%s, failed to retrieve wals, code:0x%" PRIx64, pPeer->id, code);
return -1;
}
diff --git a/src/tfs/src/tdisk.c b/src/tfs/src/tdisk.c
index 7cdaf7fd09..37798d3a88 100644
--- a/src/tfs/src/tdisk.c
+++ b/src/tfs/src/tdisk.c
@@ -52,7 +52,7 @@ int tfsUpdateDiskInfo(SDisk *pDisk) {
}
pDisk->dmeta.size = diskSize.tsize;
- pDisk->dmeta.free = diskSize.tsize - diskSize.avail;
+ pDisk->dmeta.free = diskSize.avail;
return code;
-}
\ No newline at end of file
+}
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index e0fe51e22a..ae12e25d56 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -171,7 +171,9 @@ static void *taosThreadToOpenNewFile(void *param) {
int32_t fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
if (fd < 0) {
- uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno));
+ tsLogObj.openInProgress = 0;
+ tsLogObj.lines = tsLogObj.maxLines - 1000;
+ uError("open new log file fail! fd:%d reason:%s, reuse lastlog", fd, strerror(errno));
return NULL;
}
diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile
index aa1815fc63..2f8b0de09d 100644
--- a/tests/Jenkinsfile
+++ b/tests/Jenkinsfile
@@ -55,6 +55,8 @@ pipeline {
sh '''
cd ${WKC}/tests
./test-all.sh b1
+ cd ${WKC}/tests
+ ./test-all.sh full jdbc
date'''
}
}
diff --git a/tests/pytest/cluster/clusterEnvSetup/taosdemoxWrapper.py b/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py
similarity index 92%
rename from tests/pytest/cluster/clusterEnvSetup/taosdemoxWrapper.py
rename to tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py
index ad0696669a..457dd4ee5a 100644
--- a/tests/pytest/cluster/clusterEnvSetup/taosdemoxWrapper.py
+++ b/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py
@@ -15,7 +15,7 @@ import os
import random
import argparse
-class taosdemoxWrapper:
+class taosdemoWrapper:
def __init__(self, host, metadata, database, tables, threads, configDir, replica,
columnType, columnsPerTable, rowsPerTable, disorderRatio, disorderRange, charTypeLen):
@@ -35,11 +35,11 @@ class taosdemoxWrapper:
def run(self):
if self.metadata is None:
- os.system("taosdemox -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y"
+ os.system("taosdemo -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y"
% (self.host, self.database, self.tables, self.threads, self.configDir, self.replica, self.columnType,
self.rowsPerTable, self.disorderRatio, self.disorderRange, self.charTypeLen))
else:
- os.system("taosdemox -f %s" % self.metadata)
+ os.system("taosdemo -f %s" % self.metadata)
parser = argparse.ArgumentParser()
@@ -136,7 +136,7 @@ parser.add_argument(
help='Out of order datas range, ms (default: 16)')
args = parser.parse_args()
-taosdemox = taosdemoxWrapper(args.host_name, args.metadata, args.db_name, args.num_of_tables,
+taosdemo = taosdemoWrapper(args.host_name, args.metadata, args.db_name, args.num_of_tables,
args.num_of_threads, args.config_dir, args.replica, args.column_type, args.num_of_cols,
args.num_of_rows, args.disorder_ratio, args.disorder_range, args.char_type_length)
-taosdemox.run()
\ No newline at end of file
+taosdemo.run()
diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py
index e832c9a74e..d1f180373b 100644
--- a/tests/pytest/concurrent_inquiry.py
+++ b/tests/pytest/concurrent_inquiry.py
@@ -40,7 +40,7 @@ class ConcurrentInquiry:
# stableNum = 2,subtableNum = 1000,insertRows = 100):
def __init__(self,ts,host,user,password,dbname,
stb_prefix,subtb_prefix,n_Therads,r_Therads,probabilities,loop,
- stableNum ,subtableNum ,insertRows ,mix_table):
+ stableNum ,subtableNum ,insertRows ,mix_table, replay):
self.n_numOfTherads = n_Therads
self.r_numOfTherads = r_Therads
self.ts=ts
@@ -65,6 +65,7 @@ class ConcurrentInquiry:
self.mix_table = mix_table
self.max_ts = datetime.datetime.now()
self.min_ts = datetime.datetime.now() - datetime.timedelta(days=5)
+ self.replay = replay
def SetThreadsNum(self,num):
self.numOfTherads=num
@@ -412,7 +413,7 @@ class ConcurrentInquiry:
)
cl = conn.cursor()
cl.execute("use %s;" % self.dbname)
-
+ fo = open('bak_sql_n_%d'%threadID,'w+')
print("Thread %d: starting" % threadID)
loop = self.loop
while loop:
@@ -423,6 +424,7 @@ class ConcurrentInquiry:
else:
sql=self.gen_query_join()
print("sql is ",sql)
+ fo.write(sql+'\n')
start = time.time()
cl.execute(sql)
cl.fetchall()
@@ -438,13 +440,49 @@ class ConcurrentInquiry:
exit(-1)
loop -= 1
if loop == 0: break
-
+ fo.close()
cl.close()
conn.close()
print("Thread %d: finishing" % threadID)
+
+ def query_thread_nr(self,threadID): #使用原生python接口进行重放
+ host = self.host
+ user = self.user
+ password = self.password
+ conn = taos.connect(
+ host,
+ user,
+ password,
+ )
+ cl = conn.cursor()
+ cl.execute("use %s;" % self.dbname)
+ replay_sql = []
+ with open('bak_sql_n_%d'%threadID,'r') as f:
+ replay_sql = f.readlines()
+ print("Replay Thread %d: starting" % threadID)
+ for sql in replay_sql:
+ try:
+ print("sql is ",sql)
+ start = time.time()
+ cl.execute(sql)
+ cl.fetchall()
+ end = time.time()
+ print("time cost :",end-start)
+ except Exception as e:
+ print('-'*40)
+ print(
+ "Failure thread%d, sql: %s \nexception: %s" %
+ (threadID, str(sql),str(e)))
+ err_uec='Unable to establish connection'
+ if err_uec in str(e) and loop >0:
+ exit(-1)
+ cl.close()
+ conn.close()
+ print("Replay Thread %d: finishing" % threadID)
def query_thread_r(self,threadID): #使用rest接口查询
print("Thread %d: starting" % threadID)
+ fo = open('bak_sql_r_%d'%threadID,'w+')
loop = self.loop
while loop:
try:
@@ -453,6 +491,7 @@ class ConcurrentInquiry:
else:
sql=self.gen_query_join()
print("sql is ",sql)
+ fo.write(sql+'\n')
start = time.time()
self.rest_query(sql)
end = time.time()
@@ -467,20 +506,53 @@ class ConcurrentInquiry:
exit(-1)
loop -= 1
if loop == 0: break
-
- print("Thread %d: finishing" % threadID)
+ fo.close()
+ print("Thread %d: finishing" % threadID)
+
+ def query_thread_rr(self,threadID): #使用rest接口重放
+ print("Replay Thread %d: starting" % threadID)
+ replay_sql = []
+ with open('bak_sql_r_%d'%threadID,'r') as f:
+ replay_sql = f.readlines()
+
+ for sql in replay_sql:
+ try:
+ print("sql is ",sql)
+ start = time.time()
+ self.rest_query(sql)
+ end = time.time()
+ print("time cost :",end-start)
+ except Exception as e:
+ print('-'*40)
+ print(
+ "Failure thread%d, sql: %s \nexception: %s" %
+ (threadID, str(sql),str(e)))
+ err_uec='Unable to establish connection'
+ if err_uec in str(e) and loop >0:
+ exit(-1)
+ print("Replay Thread %d: finishing" % threadID)
def run(self):
print(self.n_numOfTherads,self.r_numOfTherads)
threads = []
- for i in range(self.n_numOfTherads):
- thread = threading.Thread(target=self.query_thread_n, args=(i,))
- threads.append(thread)
- thread.start()
- for i in range(self.r_numOfTherads):
- thread = threading.Thread(target=self.query_thread_r, args=(i,))
- threads.append(thread)
- thread.start()
+ if self.replay: #whether replay
+ for i in range(self.n_numOfTherads):
+ thread = threading.Thread(target=self.query_thread_nr, args=(i,))
+ threads.append(thread)
+ thread.start()
+ for i in range(self.r_numOfTherads):
+ thread = threading.Thread(target=self.query_thread_rr, args=(i,))
+ threads.append(thread)
+ thread.start()
+ else:
+ for i in range(self.n_numOfTherads):
+ thread = threading.Thread(target=self.query_thread_n, args=(i,))
+ threads.append(thread)
+ thread.start()
+ for i in range(self.r_numOfTherads):
+ thread = threading.Thread(target=self.query_thread_r, args=(i,))
+ threads.append(thread)
+ thread.start()
parser = argparse.ArgumentParser()
parser.add_argument(
@@ -595,13 +667,20 @@ parser.add_argument(
default=0,
type=int,
help='0:stable & substable ,1:subtable ,2:stable (default: 0)')
+parser.add_argument(
+ '-R',
+ '--replay',
+ action='store',
+ default=0,
+ type=int,
+ help='0:not replay ,1:replay (default: 0)')
args = parser.parse_args()
q = ConcurrentInquiry(
args.ts,args.host_name,args.user,args.password,args.db_name,
args.stb_name_prefix,args.subtb_name_prefix,args.number_of_native_threads,args.number_of_rest_threads,
args.probabilities,args.loop_per_thread,args.number_of_stables,args.number_of_tables ,args.number_of_records,
- args.mix_stable_subtable )
+ args.mix_stable_subtable, args.replay )
if args.create_table:
q.gen_data()
diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh
index 528316700d..502c859dad 100755
--- a/tests/pytest/handle_crash_gen_val_log.sh
+++ b/tests/pytest/handle_crash_gen_val_log.sh
@@ -16,7 +16,7 @@ TOP_DIR=`pwd`
TAOSD_DIR=`find . -name "taosd"|grep -v community|head -n1`
nohup $TAOSD_DIR >/dev/null &
cd -
-./crash_gen.sh --valgrind -p -t 10 -s 350 -b 4
+./crash_gen.sh --valgrind -p -t 10 -s 500 -b 4
pidof taosd|xargs kill -9
grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log
@@ -36,11 +36,13 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log |
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
- if [ "$defiMemError" -gt 3 -a "$defiMemError" -lt 1013 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports \
- Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
+ if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
+ cat valgrind.err
+ echo -e "${RED} ## Memory errors number valgrind reports \
+ Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
+ cat valgrind.err
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
diff --git a/tests/pytest/tools/lowaTest.py b/tests/pytest/tools/lowaTest.py
index 2b65dcf3ef..ad8b5925bd 100644
--- a/tests/pytest/tools/lowaTest.py
+++ b/tests/pytest/tools/lowaTest.py
@@ -51,7 +51,7 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
- os.system("yes | %staosdemox -f tools/insert.json" % binPath)
+ os.system("yes | %staosdemo -f tools/insert.json" % binPath)
tdSql.execute("use db01")
tdSql.query("select count(*) from stb01")
diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py
index 2a4a552c8f..1cb2f71d8f 100644
--- a/tests/pytest/tools/taosdemoTest.py
+++ b/tests/pytest/tools/taosdemoTest.py
@@ -23,9 +23,10 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
-
+
self.numberOfTables = 10000
self.numberOfRecords = 100
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -38,9 +39,10 @@ class TDTestCase:
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
- buildPath = root[:len(root)-len("/build/bin")]
+ buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
+
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
@@ -48,18 +50,21 @@ class TDTestCase:
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
- binPath = buildPath+ "/build/bin/"
- os.system("yes | %staosdemo -t %d -n %d -x" % (binPath,self.numberOfTables, self.numberOfRecords))
+ binPath = buildPath + "/build/bin/"
+ os.system("%staosdemo -y -M -t %d -n %d -x" %
+ (binPath, self.numberOfTables, self.numberOfRecords))
tdSql.execute("use test")
tdSql.query("select count(*) from meters")
tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords)
- tdSql.query("select sum(f1) from test.meters interval(1h) sliding(30m)")
+ tdSql.query(
+ "select sum(col1) from test.meters interval(1h) sliding(30m)")
tdSql.checkRows(2)
- tdSql.query("select apercentile(f1, 1) from test.meters interval(10s)")
- tdSql.checkRows(11)
+ tdSql.query(
+ "select apercentile(col1, 1) from test.meters interval(10s)")
+ tdSql.checkRows(1)
tdSql.error("select loc, count(loc) from test.meters")
diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTest2.py
index 1e492aa8fc..75a79d0585 100644
--- a/tests/pytest/tools/taosdemoTest2.py
+++ b/tests/pytest/tools/taosdemoTest2.py
@@ -31,11 +31,12 @@ class TDTestCase:
def insertDataAndAlterTable(self, threadID):
if(threadID == 0):
- os.system("yes | taosdemo -t %d -n %d -x" % (self.numberOfTables, self.numberOfRecords))
+ os.system("taosdemo -M -y -t %d -n %d -x" %
+ (self.numberOfTables, self.numberOfRecords))
if(threadID == 1):
time.sleep(2)
print("use test")
- tdSql.execute("use test")
+ tdSql.execute("use test")
# check if all the tables have heen created
while True:
tdSql.query("show tables")
@@ -52,19 +53,19 @@ class TDTestCase:
print("number of records: %d" % rows)
if(rows > 0):
break
- time.sleep(1)
- print("alter table test.meters add column f4 int")
- tdSql.execute("alter table test.meters add column f4 int")
- print("insert into test.t0 values (now, 1, 2, 3, 4)")
- tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4)")
+ time.sleep(1)
+ print("alter table test.meters add column col10 int")
+ tdSql.execute("alter table test.meters add column col10 int")
+ print("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)")
+ tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)")
- def run(self):
+ def run(self):
tdSql.prepare()
t1 = threading.Thread(target=self.insertDataAndAlterTable, args=(0, ))
t2 = threading.Thread(target=self.insertDataAndAlterTable, args=(1, ))
- t1.start()
+ t1.start()
t2.start()
t1.join()
t2.join()
@@ -78,4 +79,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/script/general/parser/between_and.sim b/tests/script/general/parser/between_and.sim
new file mode 100644
index 0000000000..2e031c4917
--- /dev/null
+++ b/tests/script/general/parser/between_and.sim
@@ -0,0 +1,165 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
+system sh/exec.sh -n dnode1 -s start
+
+sleep 100
+sql connect
+print ======================== dnode1 start
+
+$db = testdb
+
+sql create database $db
+sql use $db
+
+sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10))
+
+sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1");
+sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2");
+sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3");
+sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4");
+
+sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1")
+sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2")
+sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3")
+sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4")
+
+sql select tbname,id1 from st2;
+
+if $rows != 4 then
+ return -1
+endi
+
+
+sql select * from st2;
+
+if $rows != 8 then
+ return -1
+endi
+
+sql select * from st2 where ts between now-50s and now+450s
+
+if $rows != 5 then
+ return -1
+endi
+
+sql select tbname,id1 from st2 where id1 between 2 and 3;
+
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != tb2 then
+ return -1
+endi
+if $data01 != 2 then
+ return -1
+endi
+if $data10 != tb3 then
+ return -1
+endi
+if $data11 != 3 then
+ return -1
+endi
+
+sql select tbname,id2 from st2 where id2 between 2.0 and 3.0;
+
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != tb2 then
+ return -1
+endi
+if $data01 != 2.00000 then
+ return -1
+endi
+if $data10 != tb3 then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+
+
+sql select tbname,id4 from st2 where id4 between 2.0 and 3.0;
+
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != tb2 then
+ return -1
+endi
+if $data01 != 2.000000000 then
+ return -1
+endi
+if $data10 != tb3 then
+ return -1
+endi
+if $data11 != 3.000000000 then
+ return -1
+endi
+
+
+sql select tbname,id5 from st2 where id5 between 2.0 and 3.0;
+
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != tb2 then
+ return -1
+endi
+if $data01 != 2 then
+ return -1
+endi
+if $data10 != tb3 then
+ return -1
+endi
+if $data11 != 3 then
+ return -1
+endi
+
+sql select tbname,id6 from st2 where id6 between 2.0 and 3.0;
+
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != tb2 then
+ return -1
+endi
+if $data01 != 2 then
+ return -1
+endi
+if $data10 != tb3 then
+ return -1
+endi
+if $data11 != 3 then
+ return -1
+endi
+
+sql select * from st2 where f1 between 2 and 3 and f2 between 2.0 and 3.0 and f3 between 2.0 and 3.0 and f4 between 2.0 and 3.0 and f5 between 2.0 and 3.0 and f6 between 2.0 and 3.0;
+
+if $rows != 2 then
+ return -1
+endi
+
+if $data01 != 2 then
+ return -1
+endi
+if $data11 != 3 then
+ return -1
+endi
+
+sql_error select * from st2 where f7 between 2.0 and 3.0;
+sql_error select * from st2 where f8 between 2.0 and 3.0;
+sql_error select * from st2 where f9 between 2.0 and 3.0;
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim
index ca020c4063..af16bfd4f1 100644
--- a/tests/script/general/parser/function.sim
+++ b/tests/script/general/parser/function.sim
@@ -763,3 +763,20 @@ endi
if $data01 != 1.414213562 then
return -1
endi
+
+sql create stable st1 (ts timestamp, f1 int, f2 int) tags (id int);
+sql create table tb1 using st1 tags(1);
+
+sql insert into tb1 values (now, 1, 1);
+
+sql select stddev(f1) from st1 group by f1;
+
+if $rows != 1 then
+ return -1
+endi
+
+
+if $data00 != 0.000000000 then
+ return -1
+endi
+
diff --git a/tests/test-all.sh b/tests/test-all.sh
index f03e3f88c3..3177da745f 100755
--- a/tests/test-all.sh
+++ b/tests/test-all.sh
@@ -7,6 +7,21 @@ GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
+tests_dir=`pwd`
+IN_TDINTERNAL="community"
+
+function stopTaosd {
+ echo "Stop taosd"
+ systemctl stop taosd
+ PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
+ while [ -n "$PID" ]
+ do
+ pkill -TERM -x taosd
+ sleep 1
+ PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
+ done
+}
+
function dohavecore(){
corefile=`find $corepath -mmin 1`
if [ -n "$corefile" ];then
@@ -19,8 +34,7 @@ function dohavecore(){
function runSimCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then
- case=`echo $line | grep sim$ |awk '{print $NF}'`
- IN_TDINTERNAL="community"
+ case=`echo $line | grep sim$ |awk '{print $NF}'`
start_time=`date +%s`
date +%F\ %T | tee -a out.log
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
@@ -51,8 +65,7 @@ function runSimCaseOneByOnefq {
if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then
case=`echo $line | grep sim$ |awk '{print $NF}'`
- start_time=`date +%s`
- IN_TDINTERNAL="community"
+ start_time=`date +%s`
date +%F\ %T | tee -a out.log
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
echo -n $case
@@ -143,12 +156,13 @@ function runPyCaseOneByOnefq {
fi
done < $1
}
+
totalFailed=0
totalPyFailed=0
+totalJDBCFailed=0
-tests_dir=`pwd`
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
-if [ "$2" != "python" ]; then
+if [ "$2" != "jdbc" ] && [ "$2" != "python" ]; then
echo "### run TSIM test case ###"
cd $tests_dir/script
@@ -217,11 +231,10 @@ if [ "$2" != "python" ]; then
fi
fi
-if [ "$2" != "sim" ]; then
+if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] ; then
echo "### run Python test case ###"
cd $tests_dir
- IN_TDINTERNAL="community"
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cd ../..
@@ -286,4 +299,48 @@ if [ "$2" != "sim" ]; then
fi
fi
-exit $(($totalFailed + $totalPyFailed))
+
+if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$1" == "full" ]; then
+ echo "### run JDBC test case ###"
+
+ cd $tests_dir
+
+ if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
+ cd ../../
+ else
+ cd ../
+ fi
+
+ pwd
+ cd debug/
+
+ stopTaosd
+ nohup build/bin/taosd -c /etc/taos/ > /dev/null 2>&1 &
+ sleep 30
+
+ cd $tests_dir/../src/connector/jdbc
+
+ mvn test > jdbc-out.log 2>&1
+ tail -n 20 jdbc-out.log
+
+ cases=`grep 'Tests run' jdbc-out.log | awk 'END{print $3}'`
+ totalJDBCCases=`echo ${cases/%,}`
+ failed=`grep 'Tests run' jdbc-out.log | awk 'END{print $5}'`
+ JDBCFailed=`echo ${failed/%,}`
+ error=`grep 'Tests run' jdbc-out.log | awk 'END{print $7}'`
+ JDBCError=`echo ${error/%,}`
+
+ totalJDBCFailed=`expr $JDBCFailed + $JDBCError`
+ totalJDBCSuccess=`expr $totalJDBCCases - $totalJDBCFailed`
+
+ if [ "$totalJDBCSuccess" -gt "0" ]; then
+ echo -e "\n${GREEN} ### Total $totalJDBCSuccess JDBC case(s) succeed! ### ${NC}"
+ fi
+
+ if [ "$totalJDBCFailed" -ne "0" ]; then
+ echo -e "\n${RED} ### Total $totalJDBCFailed JDBC case(s) failed! ### ${NC}"
+ fi
+ dohavecore 1
+fi
+
+exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed))