Merge branch 'develop' of https://github.com/taosdata/TDengine into develop
This commit is contained in:
commit
4b3eafad4e
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
|||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.15-dist.jar DESTINATION connector/jdbc)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.16-dist.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.0.13.0")
|
||||
SET(TD_VER_NUMBER "2.0.14.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -68,7 +68,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
2) UPDATE 标志数据库支持更新相同时间戳数据;
|
||||
|
||||
3) 数据库名最大长度为33;
|
||||
|
||||
4) 一条SQL 语句的最大长度为65480个字符;
|
||||
|
||||
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
|
||||
|
||||
- **显示系统当前参数**
|
||||
|
@ -119,6 +121,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
|
||||
|
||||
- **显示系统所有数据库**
|
||||
|
||||
```mysql
|
||||
SHOW DATABASES;
|
||||
```
|
||||
|
@ -130,10 +133,15 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]);
|
||||
```
|
||||
说明:
|
||||
|
||||
1) 表的第一个字段必须是TIMESTAMP,并且系统自动将其设为主键;
|
||||
|
||||
2) 表名最大长度为192;
|
||||
|
||||
3) 表的每行长度不能超过16k个字符;
|
||||
|
||||
4) 子表名只能由字母、数字和下划线组成,且不能以数字开头
|
||||
|
||||
5) 使用数据类型binary或nchar,需指定其最长的字节数,如binary(20),表示20字节;
|
||||
|
||||
- **以超级表为模板创建数据表**
|
||||
|
@ -149,7 +157,12 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
|
||||
```
|
||||
以更快的速度批量创建大量数据表。(服务器端 2.0.14 及以上版本)
|
||||
说明:批量建表方式要求数据表必须以超级表为模板。
|
||||
|
||||
说明:
|
||||
|
||||
1)批量建表方式要求数据表必须以超级表为模板。
|
||||
|
||||
2)在不超出 SQL 语句长度限制的前提下,单条语句中的建表数量建议控制在 1000~3000 之间,将会获得比较理想的建表速度。
|
||||
|
||||
- **删除数据表**
|
||||
|
||||
|
@ -164,7 +177,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
```
|
||||
|
||||
显示当前数据库下的所有数据表信息。
|
||||
|
||||
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
|
||||
|
||||
通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
|
||||
|
||||
- **在线修改显示字符宽度**
|
||||
|
@ -185,7 +200,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
ALTER TABLE tb_name ADD COLUMN field_name data_type;
|
||||
```
|
||||
说明:
|
||||
|
||||
1) 列的最大个数为1024,最小个数为2;
|
||||
|
||||
2) 列名最大长度为64;
|
||||
|
||||
- **表删除列**
|
||||
|
@ -204,9 +221,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
创建STable, 与创建表的SQL语法相似,但需指定TAGS字段的名称和类型
|
||||
|
||||
说明:
|
||||
|
||||
1) TAGS 列的数据类型不能是timestamp类型;
|
||||
|
||||
2) TAGS 列名不能与其他列名相同;
|
||||
|
||||
3) TAGS 列名不能为预留关键字;
|
||||
|
||||
4) TAGS 最多允许128个,至少1个,总长度不超过16k个字符。
|
||||
|
||||
- **删除超级表**
|
||||
|
@ -333,7 +354,9 @@ SELECT select_expr [, select_expr ...]
|
|||
[LIMIT limit_val [, OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
```
|
||||
|
||||
说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
|
||||
|
||||
```mysql
|
||||
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
|
||||
Query OK, 0 row(s) affected (0.008245s)
|
||||
|
@ -614,10 +637,20 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中记录行数或某列的非空值个数。
|
||||
|
||||
返回结果数据类型:长整型INT64。
|
||||
|
||||
应用字段:应用全部字段。
|
||||
|
||||
适用于:表、超级表。
|
||||
说明:1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。2)针对同一表的(不包含NULL值)字段查询结果均相同。3)如果统计对象是具体的列,则返回该列中非NULL值的记录数量。
|
||||
|
||||
说明:
|
||||
|
||||
1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。
|
||||
|
||||
2)针对同一表的(不包含NULL值)字段查询结果均相同。
|
||||
|
||||
3)如果统计对象是具体的列,则返回该列中非NULL值的记录数量。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -639,8 +672,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的平均值。
|
||||
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool字段。
|
||||
|
||||
适用于:表、超级表。
|
||||
|
||||
示例:
|
||||
|
@ -663,8 +699,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT TWA(field_name) FROM tb_name WHERE clause;
|
||||
```
|
||||
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
|
||||
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
适用于:表、超级表。
|
||||
|
||||
- **SUM**
|
||||
|
@ -672,8 +711,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的和。
|
||||
|
||||
返回结果数据类型:双精度浮点数Double和长整型INT64。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
适用于:表、超级表。
|
||||
|
||||
示例:
|
||||
|
@ -696,9 +738,12 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的均方差。
|
||||
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
适用于:表。
|
||||
|
||||
适用于:表。(从 2.0.15 版本开始,本函数也支持超级表)
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -714,9 +759,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。
|
||||
|
||||
返回结果数据类型:字符串表达式(斜率, 截距)。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
说明:自变量是时间戳,因变量是该列的值。
|
||||
|
||||
适用于:表。
|
||||
|
||||
示例:
|
||||
|
@ -735,7 +784,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最小值。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
示例:
|
||||
|
@ -758,7 +809,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最大值。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
示例:
|
||||
|
@ -781,9 +834,18 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最先写入的非NULL值。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:所有字段。
|
||||
说明:1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;3) 如果结果集中所有列全部为NULL值,则不返回结果。
|
||||
|
||||
说明:
|
||||
|
||||
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
|
||||
|
||||
2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;
|
||||
|
||||
3) 如果结果集中所有列全部为NULL值,则不返回结果。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -805,9 +867,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:所有字段。
|
||||
说明:1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
|
||||
|
||||
说明:
|
||||
|
||||
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
|
||||
|
||||
2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -829,9 +898,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
|
||||
|
||||
说明:
|
||||
|
||||
1)*k*值取值范围1≤*k*≤100;
|
||||
|
||||
2)系统同时返回该记录关联的时间戳列。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -856,9 +932,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
|
||||
|
||||
说明:
|
||||
|
||||
1)*k*值取值范围1≤*k*≤100;
|
||||
|
||||
2)系统同时返回该记录关联的时间戳列。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -882,8 +965,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值百分比分位数。
|
||||
|
||||
返回结果数据类型: 双精度浮点数Double。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
|
||||
|
||||
示例:
|
||||
|
@ -900,9 +986,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
|
||||
|
||||
返回结果数据类型: 双精度浮点数Double。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
|
||||
|
||||
```mysql
|
||||
taos> SELECT APERCENTILE(current, 20) FROM d1001;
|
||||
apercentile(current, 20) |
|
||||
|
@ -916,8 +1006,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
||||
```
|
||||
功能说明:返回表(超级表)的最后一条记录。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:所有字段。
|
||||
|
||||
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
|
||||
|
||||
示例:
|
||||
|
@ -941,8 +1034,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT DIFF(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值与前一行对应值的差。
|
||||
|
||||
返回结果数据类型: 同应用字段。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
|
||||
示例:
|
||||
|
@ -960,8 +1056,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的最大值和最小值之差。
|
||||
|
||||
返回结果数据类型: 双精度浮点数。
|
||||
|
||||
应用字段:不能应用在binary、nchar、bool类型字段。
|
||||
|
||||
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
|
||||
|
||||
示例:
|
||||
|
@ -985,9 +1084,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
|
||||
|
||||
返回结果数据类型:双精度浮点数。
|
||||
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)支持两列或多列之间进行计算,可使用括号控制计算优先级;2)NULL字段不参与计算,如果参与计算的某行中包含NULL,该行的计算结果为NULL。
|
||||
|
||||
说明:
|
||||
|
||||
1)支持两列或多列之间进行计算,可使用括号控制计算优先级;
|
||||
|
||||
2)NULL字段不参与计算,如果参与计算的某行中包含NULL,该行的计算结果为NULL。
|
||||
|
||||
```mysql
|
||||
taos> SELECT current + voltage * phase FROM d1001;
|
||||
|
@ -1051,7 +1157,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
|
|||
- 数据库名最大长度为32
|
||||
- 表名最大长度为192,每行数据最大长度16k个字符
|
||||
- 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳
|
||||
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
|
||||
- 标签最多允许128个,可以1个,标签总长度不超过16k个字符
|
||||
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ taos -C 或 taos --dump-config
|
|||
CREATE USER <user_name> PASS <'password'>;
|
||||
```
|
||||
|
||||
创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
|
||||
创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
|
||||
|
||||
```sql
|
||||
DROP USER <user_name>;
|
||||
|
@ -267,13 +267,15 @@ DROP USER <user_name>;
|
|||
ALTER USER <user_name> PASS <'password'>;
|
||||
```
|
||||
|
||||
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
|
||||
修改用户密码,为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
|
||||
|
||||
```sql
|
||||
ALTER USER <user_name> PRIVILEGE <super|write|read>;
|
||||
ALTER USER <user_name> PRIVILEGE <write|read>;
|
||||
```
|
||||
|
||||
修改用户权限为:super/write/read,不需要添加单引号
|
||||
修改用户权限为:write 或 read,不需要添加单引号
|
||||
|
||||
说明:系统内共有 super/write/read 三种权限级别,但目前不允许通过 alter 指令把 super 权限赋予用户。
|
||||
|
||||
```mysql
|
||||
SHOW USERS;
|
||||
|
@ -432,11 +434,12 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
- 数据库名:不能包含“.”以及特殊字符,不能超过32个字符
|
||||
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符
|
||||
- 表的列名:不能包含特殊字符,不能超过64个字符
|
||||
- 数据库名、表名、列名,都不能以数字开头
|
||||
- 表的列数:不能超过1024列
|
||||
- 记录的最大长度:包括时间戳8 byte,不能超过16KB
|
||||
- 单条SQL语句默认最大字符串长度:65480 byte
|
||||
- 数据库副本数:不能超过3
|
||||
- 用户名:不能超过20个byte
|
||||
- 用户名:不能超过23个byte
|
||||
- 用户密码:不能超过15个byte
|
||||
- 标签(Tags)数量:不能超过128个
|
||||
- 标签的总长度:不能超过16Kbyte
|
||||
|
|
|
@ -248,7 +248,7 @@ Master Vnode遵循下面的写入流程:
|
|||
1. Master vnode收到应用的数据插入请求,验证OK,进入下一步;
|
||||
2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
|
||||
3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version);
|
||||
4. 写入内存,并加记录加入到skip list;
|
||||
4. 写入内存,并将记录加入到skip list;
|
||||
5. Master vnode返回确认信息给应用,表示写入成功。
|
||||
6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。
|
||||
|
||||
|
@ -372,7 +372,7 @@ select count(*) from d1001 interval(1h);
|
|||
select count(*) from d1001 interval(1h) fill(prev);
|
||||
```
|
||||
|
||||
针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,这返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。
|
||||
针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,则返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。
|
||||
|
||||
### 多表聚合查询
|
||||
TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
|
||||
|
|
|
@ -218,7 +218,7 @@ SHOW MNODES;
|
|||
|
||||
如果一个数据节点离线,TDengine集群将自动检测到。有如下两种情况:
|
||||
|
||||
- 该数据节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
|
||||
- 该数据节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重新上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
|
||||
- 离线后,在offlineThreshold的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。
|
||||
|
||||
**注意:**如果一个虚拟节点组(包括mnode组)里所归属的每个数据节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点,副本数为3,如果3个数据节点都宕机,然后2个数据节点重启,是无法工作的,只有等3个数据节点都重启成功,才能对外服务。
|
||||
|
@ -227,5 +227,5 @@ SHOW MNODES;
|
|||
|
||||
如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
|
||||
|
||||
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
|
||||
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数时,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
|
||||
|
||||
|
|
|
@ -89,6 +89,8 @@ SHOW DNODES;
|
|||
```
|
||||
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。
|
||||
|
||||
如果集群配置了Arbitrator,那么它也会在这个节点列表中显示出来,其role列的值会是“arb”。
|
||||
|
||||
###查看虚拟节点组
|
||||
|
||||
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||
|
@ -139,4 +141,6 @@ SHOW MNODES;
|
|||
|
||||
如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
|
||||
|
||||
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
|
||||
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了,当副本数为偶数时,系统将自动连接配置的Arbitrator。
|
||||
|
||||
在配置了Arbitrator的情况下,它也会显示在“show dnodes;”指令给出的节点列表中。
|
||||
|
|
|
@ -252,7 +252,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
- `void taos_free_result(TAOS_RES *res)`
|
||||
|
||||
释放查询结果集以及相关的资源。查询完成后,务必调用该API释放资源,否则可能导致应用内存泄露。
|
||||
释放查询结果集以及相关的资源。查询完成后,务必调用该API释放资源,否则可能导致应用内存泄露。但也需注意,释放资源后,如果再调用`taos_consume`等获取查询结果的函数,将导致应用Crash。
|
||||
|
||||
- `char *taos_errstr(TAOS_RES *res)`
|
||||
|
||||
|
@ -262,11 +262,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
获取最近一次API调用失败的原因,返回值为错误代码。
|
||||
|
||||
**注意**:对于每个数据库应用,2.0及以上版本 TDengine 推荐只建立一个连接。同时在应用中将该连接 (TAOS*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性。C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。
|
||||
**注意**:2.0及以上版本 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池。而不推荐在应用中将该连接 (TAOS\*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性,但 “USE statement” 等状态量有可能在线程之间相互干扰。此外,C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。
|
||||
|
||||
### 异步查询API
|
||||
|
||||
同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。
|
||||
同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2\~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。
|
||||
|
||||
异步API都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是SQL操作的结果集,如果为空,比如insert操作,表示没有记录返回,如果不为空,比如select操作,表示有记录返回。
|
||||
|
||||
|
@ -288,13 +288,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
* res:`taos_query_a`回调时返回的结果集
|
||||
* fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。
|
||||
|
||||
- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
|
||||
|
||||
异步获取一条记录。其中:
|
||||
|
||||
* res:`taos_query_a`回调时返回的结果集
|
||||
* fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。
|
||||
|
||||
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
|
||||
|
||||
### 参数绑定API
|
||||
|
@ -425,7 +418,7 @@ cd C:\TDengine\connector\python\windows
|
|||
python -m pip install python3\
|
||||
```
|
||||
|
||||
*如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
|
||||
* 如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
|
||||
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
|
||||
|
||||
### 使用
|
||||
|
@ -442,7 +435,7 @@ import taos
|
|||
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
|
||||
c1 = conn.cursor()
|
||||
```
|
||||
*<em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
|
||||
* <em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
|
||||
|
||||
* 写入数据
|
||||
```python
|
||||
|
@ -510,17 +503,17 @@ conn.close()
|
|||
|
||||
用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法:
|
||||
|
||||
- _TDengineConnection_类
|
||||
- _TDengineConnection_ 类
|
||||
|
||||
参考python中help(taos.TDengineConnection)。
|
||||
这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下,这个连接实例可以是每个线程申请一个,也可以多线程共享一个连接。
|
||||
这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下,推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。
|
||||
|
||||
- _TDengineCursor_类
|
||||
- _TDengineCursor_ 类
|
||||
|
||||
参考python中help(taos.TDengineCursor)。
|
||||
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能夸线程共享使用,否则会导致返回结果出现错误。
|
||||
|
||||
- _connect_方法
|
||||
- _connect_ 方法
|
||||
|
||||
用于生成taos.TDengineConnection的实例。
|
||||
|
||||
|
@ -800,7 +793,7 @@ go env -w GOPROXY=https://goproxy.io,direct
|
|||
|
||||
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
|
||||
|
||||
该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
|
||||
该API用来打开DB,返回一个类型为\*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
|
||||
|
||||
**注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。
|
||||
|
||||
|
@ -822,7 +815,7 @@ go env -w GOPROXY=https://goproxy.io,direct
|
|||
|
||||
- `func (s *Stmt) Query(args ...interface{}) (*Rows, error)`
|
||||
|
||||
sql.Open内置的方法,Query executes a prepared query statement with the given arguments and returns the query results as a *Rows.
|
||||
sql.Open内置的方法,Query executes a prepared query statement with the given arguments and returns the query results as a \*Rows.
|
||||
|
||||
- `func (s *Stmt) Close() error`
|
||||
|
||||
|
@ -894,7 +887,7 @@ Node-example-raw.js
|
|||
|
||||
验证方法:
|
||||
|
||||
1. 新建安装验证目录,例如:~/tdengine-test,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。
|
||||
1. 新建安装验证目录,例如:\~/tdengine-test,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。
|
||||
|
||||
2. 在命令中执行以下命令:
|
||||
|
||||
|
|
|
@ -1,5 +1,19 @@
|
|||
# 常见问题
|
||||
|
||||
## 0. 怎么报告问题?
|
||||
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
1. /var/log/taos (如果没有修改过默认路径)
|
||||
2. /etc/taos
|
||||
|
||||
附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
|
||||
|
||||
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。
|
||||
```
|
||||
alter dnode <dnode_id> debugFlag 135;
|
||||
```
|
||||
但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
|
||||
|
||||
## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
|
||||
|
||||
2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
|
||||
|
@ -118,16 +132,8 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A
|
|||
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
|
||||
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
|
||||
|
||||
## 17. 怎么报告问题?
|
||||
## 17. TDengine 是否支持删除或更新已经写入的数据?
|
||||
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
1. /var/log/taos
|
||||
2. /etc/taos
|
||||
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
|
||||
|
||||
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
|
||||
|
||||
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。
|
||||
```
|
||||
alter dnode <dnode_id> debugFlag 135;
|
||||
```
|
||||
但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
|
||||
从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
|
|
@ -36,6 +36,9 @@
|
|||
# 0.0: only one core available.
|
||||
# tsRatioOfQueryCores 1.0
|
||||
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
# keepColumnName 0
|
||||
|
||||
# number of management nodes in the system
|
||||
# numOfMnodes 3
|
||||
|
||||
|
@ -159,10 +162,10 @@
|
|||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
# minimalLogDirGB 0.1
|
||||
|
||||
# stop writing temporary files when the disk size of the log folder is less than this value
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
# minimalTmpDirGB 0.1
|
||||
|
||||
# stop writing data when the disk size of the log folder is less than this value
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
# minimalDataDirGB 0.1
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.0.13.0'
|
||||
version: '2.0.14.0'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
@ -72,7 +72,7 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.0.13.0
|
||||
- usr/lib/libtaos.so.2.0.14.0
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TSC_LOG_H
|
||||
#define TDENGINE_TSC_LOG_H
|
||||
#ifndef TDENGINE_TSCLOG_H
|
||||
#define TDENGINE_TSCLOG_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -22,7 +22,7 @@ extern "C" {
|
|||
|
||||
#include "tlog.h"
|
||||
|
||||
extern int32_t cDebugFlag;
|
||||
extern uint32_t cDebugFlag;
|
||||
extern int8_t tscEmbedded;
|
||||
|
||||
#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TSCJOINPROCESS_H
|
||||
#define TDENGINE_TSCJOINPROCESS_H
|
||||
#ifndef TDENGINE_TSCSUBQUERY_H
|
||||
#define TDENGINE_TSCSUBQUERY_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -47,9 +47,8 @@ void tscLockByThread(int64_t *lockedBy);
|
|||
|
||||
void tscUnlockByThread(int64_t *lockedBy);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_TSCJOINPROCESS_H
|
||||
#endif // TDENGINE_TSCSUBQUERY_H
|
||||
|
|
|
@ -110,7 +110,7 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList);
|
|||
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable);
|
||||
|
||||
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql);
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
|
||||
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
|
||||
STableDataBlocks** dataBlocks, SArray* pBlockList);
|
||||
|
||||
|
@ -256,11 +256,11 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
|
|||
* @param pPrevSql
|
||||
* @return
|
||||
*/
|
||||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd);
|
||||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
|
||||
|
||||
void registerSqlObj(SSqlObj* pSql);
|
||||
|
||||
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql);
|
||||
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
|
||||
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
|
||||
|
||||
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
|
||||
|
|
|
@ -24,10 +24,6 @@ extern "C" {
|
|||
#include "tstoken.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
|
||||
|
||||
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
|
||||
|
||||
/**
|
||||
* get the number of tags of this table
|
||||
* @param pTableMeta
|
||||
|
@ -79,26 +75,6 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
|
|||
*/
|
||||
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
|
||||
|
||||
/**
|
||||
* check if the schema is valid or not, including following aspects:
|
||||
* 1. number of columns
|
||||
* 2. column types
|
||||
* 3. column length
|
||||
* 4. column names
|
||||
* 5. total length
|
||||
*
|
||||
* @param pSchema
|
||||
* @param numOfCols
|
||||
* @return
|
||||
*/
|
||||
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
|
||||
|
||||
/**
|
||||
* get the schema for the "tbname" column. it is a built column
|
||||
* @return
|
||||
*/
|
||||
SSchema tscGetTbnameColumnSchema();
|
||||
|
||||
/**
|
||||
* create the table meta from the msg
|
||||
* @param pTableMetaMsg
|
||||
|
|
|
@ -235,7 +235,7 @@ typedef struct {
|
|||
int32_t numOfTablesInSubmit;
|
||||
};
|
||||
|
||||
uint32_t insertType;
|
||||
uint32_t insertType; // TODO remove it
|
||||
int32_t clauseIndex; // index of multiple subclause query
|
||||
|
||||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
|
@ -317,7 +317,8 @@ typedef struct STscObj {
|
|||
} STscObj;
|
||||
|
||||
typedef struct SSubqueryState {
|
||||
int32_t numOfRemain; // the number of remain unfinished subquery
|
||||
pthread_mutex_t mutex;
|
||||
int8_t *states;
|
||||
int32_t numOfSub; // the number of total sub-queries
|
||||
uint64_t numOfRetrievedRows; // total number of points in this query
|
||||
} SSubqueryState;
|
||||
|
@ -327,8 +328,8 @@ typedef struct SSqlObj {
|
|||
pthread_t owner; // owner of sql object, by which it is executed
|
||||
STscObj *pTscObj;
|
||||
int64_t rpcRid;
|
||||
void (*fp)();
|
||||
void (*fetchFp)();
|
||||
__async_cb_func_t fp;
|
||||
__async_cb_func_t fetchFp;
|
||||
void *param;
|
||||
int64_t stime;
|
||||
uint32_t queryId;
|
||||
|
@ -463,7 +464,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
|||
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
|
||||
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData;
|
||||
} else {
|
||||
assert(bytes == tDataTypeDesc[type].nSize);
|
||||
assert(bytes == tDataTypes[type].bytes);
|
||||
|
||||
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64;
|
||||
pRes->length[columnIndex] = bytes;
|
||||
|
@ -480,7 +481,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
|||
|
||||
pRes->length[columnIndex] = realLen;
|
||||
} else {
|
||||
assert(bytes == tDataTypeDesc[type].nSize);
|
||||
assert(bytes == tDataTypes[type].bytes);
|
||||
|
||||
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData;
|
||||
pRes->length[columnIndex] = bytes;
|
||||
|
|
|
@ -32,7 +32,6 @@ taos_errstr
|
|||
taos_errno
|
||||
taos_query_a
|
||||
taos_fetch_rows_a
|
||||
taos_fetch_row_a
|
||||
taos_subscribe
|
||||
taos_consume
|
||||
taos_unsubscribe
|
||||
|
|
|
@ -20,24 +20,19 @@
|
|||
#include "trpc.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "tscLocalMerge.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tsched.h"
|
||||
#include "tschemautil.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
static void tscProcessFetchRow(SSchedMsg *pMsg);
|
||||
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
|
||||
|
||||
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)());
|
||||
|
||||
/*
|
||||
* Proxy function to perform sequentially query&retrieve operation.
|
||||
* If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection
|
||||
* query), it will sequentially query&retrieve data for all vnodes
|
||||
*/
|
||||
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
|
||||
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
|
||||
|
||||
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* param, const char* sqlstr, size_t sqlLen) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
@ -148,7 +143,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
}
|
||||
|
||||
// actual continue retrieve function with user-specified callback function
|
||||
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()) {
|
||||
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, __async_cb_func_t fp) {
|
||||
SSqlObj *pSql = (SSqlObj *)tres;
|
||||
if (pSql == NULL) { // error
|
||||
tscError("sql object is NULL");
|
||||
|
@ -191,11 +186,6 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf
|
|||
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
|
||||
}
|
||||
|
||||
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
|
||||
// query completed, continue to retrieve
|
||||
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
|
||||
}
|
||||
|
||||
void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
|
||||
SSqlObj *pSql = (SSqlObj *)taosa;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
|
@ -263,103 +253,6 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
|
|||
}
|
||||
}
|
||||
|
||||
void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), void *param) {
|
||||
SSqlObj *pSql = (SSqlObj *)taosa;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscError("sql object is NULL");
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
if (pRes->qhandle == 0) {
|
||||
tscError("qhandle is NULL");
|
||||
pSql->param = param;
|
||||
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
|
||||
|
||||
tscAsyncResultOnError(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
pSql->fetchFp = fp;
|
||||
pSql->param = param;
|
||||
|
||||
if (pRes->row >= pRes->numOfRows) {
|
||||
tscResetForNextRetrieve(pRes);
|
||||
pSql->fp = tscAsyncFetchSingleRowProxy;
|
||||
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
|
||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
}
|
||||
|
||||
tscProcessSql(pSql);
|
||||
} else {
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
schedMsg.fp = tscProcessFetchRow;
|
||||
schedMsg.ahandle = pSql;
|
||||
schedMsg.thandle = pRes->tsrow;
|
||||
schedMsg.msg = NULL;
|
||||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
}
|
||||
}
|
||||
|
||||
void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) {
|
||||
SSqlObj *pSql = (SSqlObj *)tres;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
if (numOfRows == 0) {
|
||||
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
|
||||
tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode);
|
||||
} else {
|
||||
/*
|
||||
* 1. has reach the limitation
|
||||
* 2. no remain virtual nodes to be retrieved anymore
|
||||
*/
|
||||
(*pSql->fetchFp)(pSql->param, pSql, NULL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < pCmd->numOfCols; ++i){
|
||||
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
|
||||
if (pSup->pSqlExpr != NULL) {
|
||||
// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i) + pSup->pSqlExpr->resBytes * pRes->row;
|
||||
} else {
|
||||
//todo add
|
||||
}
|
||||
}
|
||||
|
||||
pRes->row++;
|
||||
|
||||
(*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow);
|
||||
}
|
||||
|
||||
void tscProcessFetchRow(SSchedMsg *pMsg) {
|
||||
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
for (int i = 0; i < pCmd->numOfCols; ++i) {
|
||||
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
|
||||
|
||||
if (pSup->pSqlExpr != NULL) {
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
|
||||
} else {
|
||||
// todo add
|
||||
}
|
||||
}
|
||||
|
||||
pRes->row++;
|
||||
(*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow);
|
||||
}
|
||||
|
||||
// this function will be executed by queue task threads, so the terrno is not valid
|
||||
static void tscProcessAsyncError(SSchedMsg *pMsg) {
|
||||
void (*fp)() = pMsg->ahandle;
|
||||
|
@ -372,7 +265,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
|
|||
int32_t* c = malloc(sizeof(int32_t));
|
||||
*c = code;
|
||||
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
SSchedMsg schedMsg = {0};
|
||||
schedMsg.fp = tscProcessAsyncError;
|
||||
schedMsg.ahandle = fp;
|
||||
schedMsg.thandle = param;
|
||||
|
@ -380,7 +273,6 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
|
|||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
}
|
||||
|
||||
|
||||
void tscAsyncResultOnError(SSqlObj *pSql) {
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
|
||||
|
|
|
@ -79,7 +79,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
|
|||
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i;
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(dst, pSchema[i].name, pField->bytes);
|
||||
|
||||
char *type = tDataTypeDesc[pSchema[i].type].aName;
|
||||
char *type = tDataTypes[pSchema[i].type].name;
|
||||
|
||||
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
|
||||
dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
|
||||
|
@ -119,7 +119,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
|
|||
|
||||
// type name
|
||||
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
|
||||
char *type = tDataTypeDesc[pSchema[i].type].aName;
|
||||
char *type = tDataTypes[pSchema[i].type].name;
|
||||
|
||||
output = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(output, type, pField->bytes);
|
||||
|
@ -619,9 +619,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
|
|||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||
}
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes);
|
||||
} else {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name);
|
||||
}
|
||||
}
|
||||
sprintf(result + strlen(result) - 1, "%s", ")");
|
||||
|
@ -646,9 +646,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
|||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||
}
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
|
||||
} else {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name);
|
||||
}
|
||||
}
|
||||
snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS (");
|
||||
|
@ -660,9 +660,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
|||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||
}
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
|
||||
} else {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name);
|
||||
}
|
||||
}
|
||||
sprintf(result + strlen(result) - 1, "%s", ")");
|
||||
|
|
|
@ -979,14 +979,13 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
|
|||
pQueryInfo->limit.offset -= newRows;
|
||||
pRes->numOfRows = 0;
|
||||
|
||||
int32_t rpoints = taosNumOfRemainRows(pFillInfo);
|
||||
if (rpoints <= 0) {
|
||||
if (!taosFillHasMoreResults(pFillInfo)) {
|
||||
if (!doneOutput) { // reduce procedure has not completed yet, but current results for fill are exhausted
|
||||
break;
|
||||
}
|
||||
|
||||
// all output in current group are completed
|
||||
int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
|
||||
int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
|
||||
if (totalRemainRows <= 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -1337,14 +1336,14 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
|
|||
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
|
||||
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
|
||||
|
||||
if (pFillInfo != NULL && taosNumOfRemainRows(pFillInfo) > 0) {
|
||||
if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
|
||||
assert(pQueryInfo->fillType != TSDB_FILL_NONE);
|
||||
|
||||
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
|
||||
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
|
||||
|
||||
// the first column must be the timestamp column
|
||||
int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
|
||||
int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
|
||||
if (rows > 0) { // do fill gap
|
||||
doFillResult(pSql, pLocalReducer, false);
|
||||
}
|
||||
|
@ -1373,7 +1372,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
|
|||
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
|
||||
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
|
||||
|
||||
int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
|
||||
int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
|
||||
if (rows > 0) {
|
||||
doFillResult(pSql, pLocalReducer, true);
|
||||
}
|
||||
|
@ -1428,6 +1427,10 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
tscResetForNextRetrieve(pRes);
|
||||
|
||||
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
|
||||
if (pRes->code == TSDB_CODE_SUCCESS) {
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
|
||||
return pRes->code;
|
||||
}
|
||||
|
|
|
@ -1281,7 +1281,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
|
||||
if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
}
|
||||
|
@ -1336,15 +1336,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
}
|
||||
|
||||
if (tscIsInsertData(pSql->sqlstr)) {
|
||||
/*
|
||||
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
|
||||
* the error handle callback function can rightfully restore the user-defined callback function (fp).
|
||||
*/
|
||||
if (initial && (pSql->cmd.insertType != TSDB_QUERY_TYPE_STMT_INSERT)) {
|
||||
pSql->fetchFp = pSql->fp;
|
||||
pSql->fp = (void(*)())tscHandleMultivnodeInsert;
|
||||
}
|
||||
|
||||
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -1398,7 +1389,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
|
|||
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
|
||||
}
|
||||
|
||||
if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -255,7 +255,6 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
|
|||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// functions for insertion statement preparation
|
||||
|
||||
static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
|
||||
if (bind->is_null != NULL && *(bind->is_null)) {
|
||||
setNull(data + param->offset, param->type, param->bytes);
|
||||
|
@ -697,71 +696,52 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
|
|||
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
||||
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
||||
|
||||
int32_t alloced = 1, binded = 0;
|
||||
if (pCmd->batchSize > 0) {
|
||||
alloced = (pCmd->batchSize + 1) / 2;
|
||||
binded = pCmd->batchSize / 2;
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
|
||||
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
if (pCmd->pTableBlockHashList == NULL) {
|
||||
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
}
|
||||
|
||||
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i);
|
||||
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk);
|
||||
uint32_t dataSize = totalDataSize / alloced;
|
||||
assert(dataSize * alloced == totalDataSize);
|
||||
STableDataBlocks* pBlock = NULL;
|
||||
|
||||
if (alloced == binded) {
|
||||
totalDataSize += dataSize + sizeof(SSubmitBlk);
|
||||
if (totalDataSize > pBlock->nAllocSize) {
|
||||
const double factor = 1.5;
|
||||
void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
|
||||
if (tmp == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
pBlock->pData = (char*)tmp;
|
||||
pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
|
||||
}
|
||||
int32_t ret =
|
||||
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||
if (ret != 0) {
|
||||
// todo handle error
|
||||
}
|
||||
|
||||
uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
|
||||
if (totalDataSize > pBlock->nAllocSize) {
|
||||
const double factor = 1.5;
|
||||
|
||||
void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
|
||||
if (tmp == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
char* data = pBlock->pData + sizeof(SSubmitBlk) + dataSize * binded;
|
||||
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
|
||||
SParamInfo* param = pBlock->params + j;
|
||||
int code = doBindParam(data, param, bind + param->idx);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("param %d: type mismatch or invalid", param->idx);
|
||||
return code;
|
||||
}
|
||||
pBlock->pData = (char*)tmp;
|
||||
pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
|
||||
}
|
||||
|
||||
char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * pCmd->batchSize;
|
||||
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
|
||||
SParamInfo* param = &pBlock->params[j];
|
||||
|
||||
int code = doBindParam(data, param, &bind[param->idx]);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("param %d: type mismatch or invalid", param->idx);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
// actual work of all data blocks is done, update block size and numOfRows.
|
||||
// note we don't do this block by block during the binding process, because
|
||||
// we cannot recover if something goes wrong.
|
||||
pCmd->batchSize = binded * 2 + 1;
|
||||
|
||||
if (binded < alloced) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
size_t total = taosArrayGetSize(pCmd->pDataBlocks);
|
||||
for (int32_t i = 0; i < total; ++i) {
|
||||
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i);
|
||||
|
||||
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk);
|
||||
pBlock->size += totalDataSize / alloced;
|
||||
|
||||
SSubmitBlk* pSubmit = (SSubmitBlk*)pBlock->pData;
|
||||
pSubmit->numOfRows += pSubmit->numOfRows / alloced;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int insertStmtAddBatch(STscStmt* stmt) {
|
||||
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
||||
if ((pCmd->batchSize % 2) == 1) {
|
||||
++pCmd->batchSize;
|
||||
}
|
||||
++pCmd->batchSize;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -793,50 +773,66 @@ static int insertStmtExecute(STscStmt* stmt) {
|
|||
if (pCmd->batchSize == 0) {
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
if ((pCmd->batchSize % 2) == 1) {
|
||||
++pCmd->batchSize;
|
||||
}
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
assert(pCmd->numOfClause == 1);
|
||||
|
||||
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) {
|
||||
// merge according to vgid
|
||||
int code = tscMergeTableDataBlocks(stmt->pSql);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
STableDataBlocks *pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
|
||||
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// set the next sent data vnode index in data block arraylist
|
||||
pTableMetaInfo->vgroupIndex = 1;
|
||||
} else {
|
||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||
if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSqlObj *pSql = stmt->pSql;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
pRes->numOfRows = 0;
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
|
||||
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
if (pCmd->pTableBlockHashList == NULL) {
|
||||
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
}
|
||||
|
||||
STableDataBlocks* pBlock = NULL;
|
||||
|
||||
int32_t ret =
|
||||
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||
assert(ret == 0);
|
||||
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
|
||||
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
|
||||
pBlk->numOfRows = pCmd->batchSize;
|
||||
pBlk->dataLen = 0;
|
||||
pBlk->uid = pTableMeta->id.uid;
|
||||
pBlk->tid = pTableMeta->id.tid;
|
||||
|
||||
int code = tscMergeTableDataBlocks(stmt->pSql, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
STableDataBlocks* pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
|
||||
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
SSqlObj* pSql = stmt->pSql;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
pRes->numOfRows = 0;
|
||||
pRes->numOfTotal = 0;
|
||||
pRes->numOfClauseTotal = 0;
|
||||
|
||||
pRes->qhandle = 0;
|
||||
|
||||
pSql->cmd.insertType = 0;
|
||||
pSql->fetchFp = waitForQueryRsp;
|
||||
pSql->fp = (void(*)())tscHandleMultivnodeInsert;
|
||||
|
||||
tscDoQuery(pSql);
|
||||
tscProcessSql(pSql);
|
||||
|
||||
// wait for the callback function to post the semaphore
|
||||
tsem_wait(&pSql->rspSem);
|
||||
return pSql->res.code;
|
||||
|
||||
// data block reset
|
||||
pCmd->batchSize = 0;
|
||||
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
|
||||
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
|
||||
tfree(pCmd->pTableNameList[i]);
|
||||
}
|
||||
}
|
||||
|
||||
pCmd->numOfTables = 0;
|
||||
tfree(pCmd->pTableNameList);
|
||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||
|
||||
return pSql->res.code;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -867,11 +863,11 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
|
|||
}
|
||||
|
||||
tsem_init(&pSql->rspSem, 0, 0);
|
||||
pSql->signature = pSql;
|
||||
pSql->pTscObj = pObj;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||
pSql->signature = pSql;
|
||||
pSql->pTscObj = pObj;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||
pStmt->pSql = pSql;
|
||||
|
||||
pStmt->pSql = pSql;
|
||||
return pStmt;
|
||||
}
|
||||
|
||||
|
@ -890,7 +886,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
pSql->param = (void*) pSql;
|
||||
pSql->fp = waitForQueryRsp;
|
||||
pSql->cmd.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
||||
pSql->fetchFp = waitForQueryRsp;
|
||||
|
||||
pCmd->insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
||||
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
|
||||
tscError("%p failed to malloc payload buffer", pSql);
|
||||
|
@ -956,8 +954,9 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
|
|||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (pStmt->isInsert) {
|
||||
return insertStmtBindParam(pStmt, bind);
|
||||
} else {
|
||||
return normalStmtBindParam(pStmt, bind);
|
||||
}
|
||||
return normalStmtBindParam(pStmt, bind);
|
||||
}
|
||||
|
||||
int taos_stmt_add_batch(TAOS_STMT* stmt) {
|
||||
|
@ -981,7 +980,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
|
|||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (pStmt->isInsert) {
|
||||
ret = insertStmtExecute(pStmt);
|
||||
} else {
|
||||
} else { // normal stmt query
|
||||
char* sql = normalStmtBuildSql(pStmt);
|
||||
if (sql == NULL) {
|
||||
ret = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
|
@ -995,6 +994,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
|
|||
free(sql);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -229,7 +229,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
|
|||
}
|
||||
|
||||
int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||
if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) {
|
||||
if (pInfo == NULL || pSql == NULL) {
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
||||
|
@ -264,6 +264,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
case TSDB_SQL_DROP_DB: {
|
||||
const char* msg2 = "invalid name";
|
||||
const char* msg3 = "param name too long";
|
||||
const char* msg4 = "table is not super table";
|
||||
|
||||
SStrToken* pzName = &pInfo->pDCLInfo->a[0];
|
||||
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
|
||||
|
@ -285,6 +286,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
if(code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (pInfo->pDCLInfo->tableType == TSDB_SUPER_TABLE) {
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
|
||||
pzName->n = strdequote(pzName->z);
|
||||
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
|
||||
|
@ -642,7 +655,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
pSql->cmd.parseFinished = 1;
|
||||
return tscBuildMsg[pCmd->command](pSql, pInfo);
|
||||
if (tscBuildMsg[pCmd->command] != NULL) {
|
||||
return tscBuildMsg[pCmd->command](pSql, pInfo);
|
||||
} else {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1738,7 +1755,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
|
|||
return -1;
|
||||
} else {
|
||||
type = TSDB_DATA_TYPE_DOUBLE;
|
||||
bytes = tDataTypeDesc[type].nSize;
|
||||
bytes = tDataTypes[type].bytes;
|
||||
}
|
||||
} else {
|
||||
type = pSchema->type;
|
||||
|
@ -1844,7 +1861,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
}
|
||||
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
|
||||
} else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
|
||||
char buf[8] = {0};
|
||||
|
@ -1856,7 +1873,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
}
|
||||
if (val == 1) {
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
|
||||
} else {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
|
@ -1876,12 +1893,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
isTag = true;
|
||||
}
|
||||
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag);
|
||||
}
|
||||
} else { // count(*) is equalled to count(primary_timestamp_key)
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
|
||||
}
|
||||
|
||||
|
@ -4510,6 +4527,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
|
|||
}
|
||||
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_PREV;
|
||||
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_NEXT;
|
||||
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
|
||||
pQueryInfo->fillType = TSDB_FILL_LINEAR;
|
||||
} else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) {
|
||||
|
@ -4788,6 +4807,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
const char* msg17 = "invalid column name";
|
||||
const char* msg18 = "primary timestamp column cannot be dropped";
|
||||
const char* msg19 = "invalid new tag name";
|
||||
const char* msg20 = "table is not super table";
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -4813,6 +4833,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
if (pAlterSQL->tableType == TSDB_SUPER_TABLE && !(UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg20);
|
||||
}
|
||||
|
||||
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
|
||||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
|
||||
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
|
||||
|
@ -4869,7 +4893,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
char name1[128] = {0};
|
||||
strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen);
|
||||
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
|
||||
SArray* pVarList = pAlterSQL->varList;
|
||||
|
@ -4905,14 +4929,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
char name[TSDB_COL_NAME_LEN] = {0};
|
||||
strncpy(name, pItem->pVar.pz, pItem->pVar.nLen);
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
|
||||
pItem = taosArrayGet(pVarList, 1);
|
||||
memset(name, 0, tListLen(name));
|
||||
|
||||
strncpy(name, pItem->pVar.pz, pItem->pVar.nLen);
|
||||
f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
|
||||
f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) {
|
||||
// Note: update can only be applied to table not super table.
|
||||
|
@ -4987,7 +5011,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
int32_t len = 0;
|
||||
if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) {
|
||||
len = tDataTypeDesc[pTagsSchema->type].nSize;
|
||||
len = tDataTypes[pTagsSchema->type].bytes;
|
||||
} else {
|
||||
len = varDataTLen(pUpdateMsg->data + schemaLen);
|
||||
}
|
||||
|
@ -5034,7 +5058,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
char name1[TSDB_COL_NAME_LEN] = {0};
|
||||
tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
}
|
||||
|
||||
|
@ -5997,7 +6021,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
|||
|
||||
SColumnIndex ind = {0};
|
||||
SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
|
||||
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, getNewResColId(pQueryInfo), tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false);
|
||||
tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pQueryInfo), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false);
|
||||
|
||||
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
|
||||
tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
|
||||
|
@ -6101,8 +6125,10 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
|
|||
int32_t tmpLen = 0;
|
||||
tmpLen =
|
||||
sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId);
|
||||
|
||||
if (tmpLen + offset >= totalBufSize - 1) break;
|
||||
|
||||
|
||||
offset += sprintf(str + offset, "%s", tmpBuf);
|
||||
|
||||
if (i < size - 1) {
|
||||
|
@ -6112,6 +6138,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
|
|||
|
||||
assert(offset < totalBufSize);
|
||||
str[offset] = ']';
|
||||
assert(offset < totalBufSize);
|
||||
tscDebug("%p select clause:%s", pSql, str);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,68 +66,6 @@ STableComInfo tscGetTableInfo(const STableMeta* pTableMeta) {
|
|||
return pTableMeta->tableInfo;
|
||||
}
|
||||
|
||||
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
|
||||
int32_t rowLen = 0;
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
// 1. valid types
|
||||
if (!isValidDataType(pSchema[i].type)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// 2. valid length for each type
|
||||
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) {
|
||||
return false;
|
||||
}
|
||||
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (pSchema[i].bytes > TSDB_MAX_NCHAR_LEN) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (pSchema[i].bytes != tDataTypeDesc[pSchema[i].type].nSize) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. valid column names
|
||||
for (int32_t j = i + 1; j < numOfCols; ++j) {
|
||||
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
rowLen += pSchema[i].bytes;
|
||||
}
|
||||
|
||||
return rowLen <= maxLen;
|
||||
}
|
||||
|
||||
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
|
||||
if (!VALIDNUMOFCOLS(numOfCols)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!VALIDNUMOFTAGS(numOfTags)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* first column must be the timestamp, which is a primary key */
|
||||
if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) {
|
||||
assert(pTableMeta != NULL);
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@
|
|||
#include "ttimer.h"
|
||||
#include "tlockfree.h"
|
||||
|
||||
///SRpcCorEpSet tscMgmtEpSet;
|
||||
|
||||
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
|
||||
|
||||
int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql);
|
||||
|
@ -341,7 +339,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
if (pSql->retry > pSql->maxRetry) {
|
||||
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
|
||||
} else {
|
||||
// wait for a little bit moment and then retry, todo do not sleep in rpc callback thread
|
||||
// wait for a little bit moment and then retry
|
||||
// todo do not sleep in rpc callback thread, add this process into queueu to process
|
||||
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
||||
int32_t duration = getWaitingTimeInterval(pSql->retry);
|
||||
taosMsleep(duration);
|
||||
|
@ -484,9 +483,9 @@ int tscProcessSql(SSqlObj *pSql) {
|
|||
pSql->res.code = TSDB_CODE_TSC_APP_ERROR;
|
||||
return pSql->res.code;
|
||||
}
|
||||
} else if (pCmd->command < TSDB_SQL_LOCAL) {
|
||||
} else if (pCmd->command >= TSDB_SQL_LOCAL) {
|
||||
//pSql->epSet = tscMgmtEpSet;
|
||||
} else { // local handler
|
||||
// } else { // local handler
|
||||
return (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||
}
|
||||
|
||||
|
@ -1157,7 +1156,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
strcpy(pDropTableMsg->tableId, pTableMetaInfo->name);
|
||||
strcpy(pDropTableMsg->tableFname, pTableMetaInfo->name);
|
||||
pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
|
||||
|
||||
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
|
||||
|
@ -1180,7 +1179,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo * UNUSED_PARAM(pInfo)) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
pCmd->payloadLen = sizeof(SDropUserMsg);
|
||||
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
|
||||
|
@ -1347,7 +1346,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pMsg += sizeof(SCreateTableMsg);
|
||||
|
||||
SCreatedTableInfo* p = taosArrayGet(list, i);
|
||||
strcpy(pCreate->tableId, p->fullname);
|
||||
strcpy(pCreate->tableFname, p->fullname);
|
||||
pCreate->igExists = (p->igExist)? 1 : 0;
|
||||
|
||||
// use dbinfo from table id without modifying current db info
|
||||
|
@ -1360,7 +1359,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
} else { // create (super) table
|
||||
pCreateTableMsg->numOfTables = htonl(1); // only one table will be created
|
||||
|
||||
strcpy(pCreateMsg->tableId, pTableMetaInfo->name);
|
||||
strcpy(pCreateMsg->tableFname, pTableMetaInfo->name);
|
||||
|
||||
// use dbinfo from table id without modifying current db info
|
||||
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db);
|
||||
|
@ -1431,7 +1430,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
|
||||
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
|
||||
|
||||
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name);
|
||||
strcpy(pAlterTableMsg->tableFname, pTableMetaInfo->name);
|
||||
pAlterTableMsg->type = htons(pAlterInfo->type);
|
||||
|
||||
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
|
||||
|
@ -1630,7 +1629,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
|
||||
strcpy(pInfoMsg->tableId, pTableMetaInfo->name);
|
||||
strcpy(pInfoMsg->tableFname, pTableMetaInfo->name);
|
||||
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
|
||||
|
||||
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
|
||||
|
@ -1799,7 +1798,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
|
||||
(pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
|
||||
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
|
||||
pMetaMsg->tid, pMetaMsg->tableId);
|
||||
pMetaMsg->tid, pMetaMsg->tableFname);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
|
@ -1831,12 +1830,16 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
assert(isValidDataType(pSchema->type));
|
||||
pSchema++;
|
||||
}
|
||||
|
||||
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
assert(pTableMetaInfo->pTableMeta == NULL);
|
||||
|
||||
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
|
||||
if (!isValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
|
||||
tscError("%p invalid table meta from mnode, name:%s", pSql, pTableMetaInfo->name);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
|
||||
// check if super table hashmap or not
|
||||
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
|
||||
|
@ -2097,7 +2100,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void createHBObj(STscObj* pObj) {
|
||||
static void createHbObj(STscObj* pObj) {
|
||||
if (pObj->hbrid != 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -2160,7 +2163,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
|
|||
pObj->superAuth = pConnect->superAuth;
|
||||
pObj->connId = htonl(pConnect->connId);
|
||||
|
||||
createHBObj(pObj);
|
||||
createHbObj(pObj);
|
||||
|
||||
//launch a timer to send heartbeat to maintain the connection and send status to mnode
|
||||
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, (void *)pObj->rid, tscTmr, &pObj->pTimer);
|
||||
|
@ -2190,7 +2193,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
|
|||
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
|
||||
(int32_t) taosHashGetSize(tscTableMetaInfo));
|
||||
|
||||
assert(pTableMetaInfo->pTableMeta == NULL);
|
||||
pTableMetaInfo->pTableMeta = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -781,6 +781,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
|
|||
|
||||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
int len = 0;
|
||||
|
||||
for (int i = 0; i < num_fields; ++i) {
|
||||
if (i > 0) {
|
||||
str[len++] = ' ';
|
||||
|
@ -838,13 +839,15 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
size_t xlen = 0;
|
||||
for (xlen = 0; xlen < fields[i].bytes - VARSTR_HEADER_SIZE; xlen++) {
|
||||
char c = ((char *)row[i])[xlen];
|
||||
if (c == 0) break;
|
||||
str[len++] = c;
|
||||
int32_t charLen = varDataLen((char*)row[i] - VARSTR_HEADER_SIZE);
|
||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
assert(charLen <= fields[i].bytes);
|
||||
} else {
|
||||
assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE);
|
||||
}
|
||||
str[len] = 0;
|
||||
|
||||
memcpy(str + len, row[i], charLen);
|
||||
len += charLen;
|
||||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
|
|
|
@ -55,6 +55,58 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
|
|||
}
|
||||
}
|
||||
|
||||
static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, int8_t state) {
|
||||
assert(idx < subState->numOfSub);
|
||||
assert(subState->states);
|
||||
|
||||
pthread_mutex_lock(&subState->mutex);
|
||||
|
||||
tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
|
||||
|
||||
subState->states[idx] = state;
|
||||
|
||||
pthread_mutex_unlock(&subState->mutex);
|
||||
}
|
||||
|
||||
static bool allSubqueryDone(SSqlObj *pParentSql) {
|
||||
bool done = true;
|
||||
SSubqueryState *subState = &pParentSql->subState;
|
||||
|
||||
//lock in caller
|
||||
|
||||
for (int i = 0; i < subState->numOfSub; i++) {
|
||||
if (0 == subState->states[i]) {
|
||||
tscDebug("%p subquery:%p,%d is NOT finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
|
||||
done = false;
|
||||
break;
|
||||
} else {
|
||||
tscDebug("%p subquery:%p,%d is finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
|
||||
}
|
||||
}
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
|
||||
SSubqueryState *subState = &pParentSql->subState;
|
||||
|
||||
assert(idx < subState->numOfSub);
|
||||
|
||||
pthread_mutex_lock(&subState->mutex);
|
||||
|
||||
tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx);
|
||||
|
||||
subState->states[idx] = 1;
|
||||
|
||||
bool done = allSubqueryDone(pParentSql);
|
||||
|
||||
pthread_mutex_unlock(&subState->mutex);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
|
@ -367,10 +419,6 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
// scan all subquery, if one sub query has only ts, ignore it
|
||||
tscDebug("%p start to launch secondary subqueries, %d out of %d needs to query", pSql, numOfSub, pSql->subState.numOfSub);
|
||||
|
||||
//the subqueries that do not actually launch the secondary query to virtual node is set as completed.
|
||||
SSubqueryState* pState = &pSql->subState;
|
||||
pState->numOfRemain = numOfSub;
|
||||
|
||||
bool success = true;
|
||||
|
||||
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
|
@ -403,6 +451,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
success = false;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
tscClearSubqueryInfo(&pNew->cmd);
|
||||
pSql->pSubs[i] = pNew;
|
||||
|
@ -480,6 +529,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
}
|
||||
}
|
||||
|
||||
subquerySetState(pPrevSub, &pSql->subState, i, 0);
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
|
||||
tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
|
||||
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
|
||||
|
@ -517,20 +568,25 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
|
|||
SJoinSupporter* p = pSub->param;
|
||||
tscDestroyJoinSupporter(p);
|
||||
|
||||
if (pSub->res.code == TSDB_CODE_SUCCESS) {
|
||||
taos_free_result(pSub);
|
||||
}
|
||||
taos_free_result(pSub);
|
||||
pSql->pSubs[i] = NULL;
|
||||
}
|
||||
|
||||
if (pSql->subState.states) {
|
||||
pthread_mutex_destroy(&pSql->subState.mutex);
|
||||
}
|
||||
|
||||
tfree(pSql->subState.states);
|
||||
|
||||
|
||||
pSql->subState.numOfSub = 0;
|
||||
}
|
||||
|
||||
static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
|
||||
assert(pSqlObj->subState.numOfRemain > 0);
|
||||
|
||||
if (atomic_sub_fetch_32(&pSqlObj->subState.numOfRemain, 1) <= 0) {
|
||||
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
|
||||
static void quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
|
||||
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
|
||||
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
|
||||
freeJoinSubqueryObj(pSqlObj);
|
||||
return;
|
||||
}
|
||||
|
||||
//tscDestroyJoinSupporter(pSupporter);
|
||||
|
@ -777,6 +833,15 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// check for the error code firstly
|
||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
// todo retry if other subqueries are not failed
|
||||
|
@ -785,7 +850,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
|
||||
|
||||
pParentSql->res.code = numOfRows;
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
return;
|
||||
|
@ -802,7 +867,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
tscError("%p failed to malloc memory", pSql);
|
||||
|
||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
return;
|
||||
|
@ -844,9 +909,10 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
|
||||
// no data exists in next vnode, mark the <tid, tags> query completed
|
||||
// only when there is no subquery exits any more, proceeds to get the intersect of the <tid, tags> tuple sets.
|
||||
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
|
||||
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
|
||||
tscDebug("%p tagRetrieve:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
SArray *s1 = NULL, *s2 = NULL;
|
||||
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
|
||||
|
@ -891,8 +957,10 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo2->pVgroupTables);
|
||||
|
||||
pParentSql->subState.numOfSub = 2;
|
||||
pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub;
|
||||
|
||||
|
||||
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
|
||||
tscDebug("%p reset all sub states to 0", pParentSql);
|
||||
|
||||
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
|
||||
SSqlObj* sub = pParentSql->pSubs[m];
|
||||
issueTSCompQuery(sub, sub->param, pParentSql);
|
||||
|
@ -915,6 +983,15 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// check for the error code firstly
|
||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
// todo retry if other subqueries are not failed yet
|
||||
|
@ -922,7 +999,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
|
||||
|
||||
pParentSql->res.code = numOfRows;
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
return;
|
||||
|
@ -937,7 +1014,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
|
||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
|
@ -955,7 +1032,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
|
||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
|
@ -1009,9 +1086,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
return;
|
||||
}
|
||||
|
||||
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
|
||||
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql);
|
||||
|
||||
|
@ -1049,6 +1126,17 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
|||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
assert(numOfRows == taos_errno(pSql));
|
||||
|
||||
|
@ -1088,9 +1176,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
|||
}
|
||||
}
|
||||
|
||||
assert(pState->numOfRemain > 0);
|
||||
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
|
||||
tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pState->numOfRemain, pState->numOfSub);
|
||||
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
|
||||
tscDebug("%p sub:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pState->numOfSub);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1205,15 +1292,16 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
|
|||
}
|
||||
}
|
||||
|
||||
// get the number of subquery that need to retrieve the next vnode.
|
||||
|
||||
if (orderedPrjQuery) {
|
||||
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
if (pSub != NULL && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) {
|
||||
pSql->subState.numOfRemain++;
|
||||
subquerySetState(pSub, &pSql->subState, i, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
|
@ -1270,7 +1358,19 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
|
|||
// retrieve data from current vnode.
|
||||
tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch);
|
||||
SJoinSupporter* pSupporter = NULL;
|
||||
pSql->subState.numOfRemain = numOfFetch;
|
||||
|
||||
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
SSqlObj* pSql1 = pSql->pSubs[i];
|
||||
if (pSql1 == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SSqlRes* pRes1 = &pSql1->res;
|
||||
|
||||
if (pRes1->row >= pRes1->numOfRows) {
|
||||
subquerySetState(pSql1, &pSql->subState, i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
SSqlObj* pSql1 = pSql->pSubs[i];
|
||||
|
@ -1372,14 +1472,8 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
|||
// retrieve actual query results from vnode during the second stage join subquery
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
|
||||
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
|
||||
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
|
@ -1392,13 +1486,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
|||
tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code));
|
||||
pParentSql->res.code = code;
|
||||
|
||||
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
|
||||
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
quitAllSubquery(pSql, pParentSql, pSupporter);
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
|
@ -1422,9 +1510,9 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
|||
|
||||
// In case of consequence query from other vnode, do not wait for other query response here.
|
||||
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
|
||||
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
|
||||
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tscSetupOutputColumnIndex(pParentSql);
|
||||
|
@ -1436,6 +1524,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
|||
if (pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data
|
||||
pSql->cmd.command = TSDB_SQL_FETCH;
|
||||
|
||||
tscProcessSql(pSql);
|
||||
} else { // first retrieve from vnode during the secondary stage sub-query
|
||||
// set the command flag must be after the semaphore been correctly set.
|
||||
|
@ -1471,8 +1560,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
|||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pSql->pSubs[pSql->subState.numOfRemain++] = pNew;
|
||||
assert(pSql->subState.numOfRemain <= pSql->subState.numOfSub);
|
||||
pSql->pSubs[tableIndex] = pNew;
|
||||
|
||||
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
|
||||
addGroupInfoForSubquery(pSql, pNew, 0, tableIndex);
|
||||
|
@ -1604,6 +1692,19 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
pSql->subState.numOfSub = pQueryInfo->numOfTables;
|
||||
|
||||
if (pSql->subState.states == NULL) {
|
||||
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
|
||||
if (pSql->subState.states == NULL) {
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pthread_mutex_init(&pSql->subState.mutex, NULL);
|
||||
}
|
||||
|
||||
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
|
||||
tscDebug("%p reset all sub states to 0", pSql);
|
||||
|
||||
bool hasEmptySub = false;
|
||||
|
||||
tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables);
|
||||
|
@ -1636,12 +1737,23 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
|
||||
(*pSql->fp)(pSql->param, pSql, 0);
|
||||
} else {
|
||||
int fail = 0;
|
||||
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
|
||||
pSql->subState.numOfRemain = i - 1; // the already sent request will continue and do not go to the error process routine
|
||||
break;
|
||||
if (fail) {
|
||||
(*pSub->fp)(pSub->param, pSub, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
|
||||
pRes->code = code;
|
||||
(*pSub->fp)(pSub->param, pSub, 0);
|
||||
fail = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if(fail) {
|
||||
return;
|
||||
}
|
||||
|
||||
pSql->cmd.command = TSDB_SQL_TABLE_JOIN_RETRIEVE;
|
||||
|
@ -1742,7 +1854,21 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
pState->numOfRemain = pState->numOfSub;
|
||||
if (pState->states == NULL) {
|
||||
pState->states = calloc(pState->numOfSub, sizeof(*pState->states));
|
||||
if (pState->states == NULL) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscAsyncResultOnError(pSql);
|
||||
tfree(pMemoryBuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pthread_mutex_init(&pState->mutex, NULL);
|
||||
}
|
||||
|
||||
memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub);
|
||||
tscDebug("%p reset all sub states to 0", pSql);
|
||||
|
||||
pRes->code = TSDB_CODE_SUCCESS;
|
||||
|
||||
int32_t i = 0;
|
||||
|
@ -1891,7 +2017,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
assert(pSql != NULL);
|
||||
|
||||
SSubqueryState* pState = &pParentSql->subState;
|
||||
assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
|
||||
|
||||
// retrieved in subquery failed. OR query cancelled in retrieve phase.
|
||||
if (taos_errno(pSql) == TSDB_CODE_SUCCESS && pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1922,14 +2047,12 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
}
|
||||
}
|
||||
|
||||
int32_t remain = -1;
|
||||
if ((remain = atomic_sub_fetch_32(&pState->numOfRemain, 1)) > 0) {
|
||||
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
|
||||
pState->numOfSub - remain);
|
||||
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
|
||||
tscDebug("%p sub:%p,%d freed, not finished, total:%d", pParentSql, pSql, trsupport->subqueryIndex, pState->numOfSub);
|
||||
|
||||
tscFreeRetrieveSup(pSql);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// all subqueries are failed
|
||||
tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub,
|
||||
|
@ -1994,14 +2117,12 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
return;
|
||||
}
|
||||
|
||||
int32_t remain = -1;
|
||||
if ((remain = atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1)) > 0) {
|
||||
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
|
||||
pState->numOfSub - remain);
|
||||
if (!subAndCheckDone(pSql, pParentSql, idx)) {
|
||||
tscDebug("%p sub:%p orderOfSub:%d freed, not finished", pParentSql, pSql, trsupport->subqueryIndex);
|
||||
|
||||
tscFreeRetrieveSup(pSql);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// all sub-queries are returned, start to local merge process
|
||||
pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
|
||||
|
@ -2047,7 +2168,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
SSqlObj * pParentSql = trsupport->pParentSql;
|
||||
|
||||
SSubqueryState* pState = &pParentSql->subState;
|
||||
assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
|
||||
|
@ -2268,7 +2388,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
|||
}
|
||||
}
|
||||
|
||||
if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) {
|
||||
if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
|
||||
tscDebug("%p insert:%p,%d completed, total:%d", pParentObj, tres, pSupporter->index, pParentObj->subState.numOfSub);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2302,6 +2423,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
|||
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
|
||||
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
|
||||
|
||||
subquerySetState(pSql, &pParentObj->subState, i, 0);
|
||||
|
||||
tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql);
|
||||
}
|
||||
}
|
||||
|
@ -2316,7 +2439,6 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
|||
}
|
||||
|
||||
pParentObj->cmd.parseFinished = false;
|
||||
pParentObj->subState.numOfRemain = numOfFailed;
|
||||
|
||||
tscResetSqlCmdObj(&pParentObj->cmd);
|
||||
|
||||
|
@ -2392,7 +2514,19 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
// the number of already initialized subqueries
|
||||
int32_t numOfSub = 0;
|
||||
|
||||
pSql->subState.numOfRemain = pSql->subState.numOfSub;
|
||||
if (pSql->subState.states == NULL) {
|
||||
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
|
||||
if (pSql->subState.states == NULL) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pthread_mutex_init(&pSql->subState.mutex, NULL);
|
||||
}
|
||||
|
||||
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
|
||||
tscDebug("%p reset all sub states to 0", pSql);
|
||||
|
||||
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
|
||||
if (pSql->pSubs == NULL) {
|
||||
goto _error;
|
||||
|
|
|
@ -441,6 +441,12 @@ static void tscFreeSubobj(SSqlObj* pSql) {
|
|||
pSql->pSubs[i] = NULL;
|
||||
}
|
||||
|
||||
if (pSql->subState.states) {
|
||||
pthread_mutex_destroy(&pSql->subState.mutex);
|
||||
}
|
||||
|
||||
tfree(pSql->subState.states);
|
||||
|
||||
pSql->subState.numOfSub = 0;
|
||||
}
|
||||
|
||||
|
@ -603,6 +609,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
|
|||
assert(pCmd->numOfClause == 1);
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
|
||||
// todo refactor
|
||||
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
|
||||
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
|
||||
tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
|
||||
|
@ -689,7 +696,6 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
|||
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
|
||||
STableDataBlocks** dataBlocks, SArray* pBlockList) {
|
||||
*dataBlocks = NULL;
|
||||
|
||||
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
|
||||
if (t1 != NULL) {
|
||||
*dataBlocks = *t1;
|
||||
|
@ -785,9 +791,13 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
|
|||
return result;
|
||||
}
|
||||
|
||||
static void extractTableNameList(SSqlCmd* pCmd) {
|
||||
static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
|
||||
pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList);
|
||||
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
|
||||
if (pCmd->pTableNameList == NULL) {
|
||||
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
|
||||
} else {
|
||||
memset(pCmd->pTableNameList, 0, pCmd->numOfTables * POINTER_BYTES);
|
||||
}
|
||||
|
||||
STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL);
|
||||
int32_t i = 0;
|
||||
|
@ -797,10 +807,12 @@ static void extractTableNameList(SSqlCmd* pCmd) {
|
|||
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
|
||||
}
|
||||
|
||||
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
|
||||
if (freeBlockMap) {
|
||||
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
||||
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
|
@ -880,7 +892,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
|
|||
pOneTableBlock = *p;
|
||||
}
|
||||
|
||||
extractTableNameList(pCmd);
|
||||
extractTableNameList(pCmd, freeBlockMap);
|
||||
|
||||
// free the table data blocks;
|
||||
pCmd->pDataBlocks = pVnodeDataBlockList;
|
||||
|
@ -1915,7 +1927,7 @@ void registerSqlObj(SSqlObj* pSql) {
|
|||
tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total);
|
||||
}
|
||||
|
||||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) {
|
||||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
|
||||
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
||||
if (pNew == NULL) {
|
||||
tscError("%p new subquery failed, tableIndex:%d", pSql, 0);
|
||||
|
@ -2000,7 +2012,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui
|
|||
tscFieldInfoUpdateOffset(pNewQueryInfo);
|
||||
}
|
||||
|
||||
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) {
|
||||
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
||||
|
@ -2196,7 +2208,7 @@ void tscDoQuery(SSqlObj* pSql) {
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
uint16_t type = pQueryInfo->type;
|
||||
|
||||
if (pSql->fp == (void(*)())tscHandleMultivnodeInsert) { // multi-vnodes insertion
|
||||
if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion
|
||||
tscHandleMultivnodeInsert(pSql);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2,15 +2,117 @@
|
|||
#include <iostream>
|
||||
|
||||
#include "taos.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
namespace {
|
||||
static int64_t start_ts = 1433955661000;
|
||||
}
|
||||
/* test parse time function */
|
||||
TEST(testCase, result_field_test) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
|
||||
taos_init();
|
||||
|
||||
void stmtInsertTest() {
|
||||
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
|
||||
if (conn == NULL) {
|
||||
printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
TAOS_RES* res = taos_query(conn, "use test");
|
||||
taos_free_result(res);
|
||||
|
||||
const char* sql = "insert into t1 values(?, ?, ?, ?)";
|
||||
TAOS_STMT* stmt = taos_stmt_init(conn);
|
||||
|
||||
int32_t ret = taos_stmt_prepare(stmt, sql, 0);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
//ts timestamp, k int, a binary(11), b nchar(4)
|
||||
struct {
|
||||
int64_t ts;
|
||||
int k;
|
||||
char* a;
|
||||
char* b;
|
||||
} v = {0};
|
||||
|
||||
TAOS_BIND params[4];
|
||||
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
params[0].buffer_length = sizeof(v.ts);
|
||||
params[0].buffer = &v.ts;
|
||||
params[0].length = ¶ms[0].buffer_length;
|
||||
params[0].is_null = NULL;
|
||||
|
||||
params[1].buffer_type = TSDB_DATA_TYPE_INT;
|
||||
params[1].buffer_length = sizeof(v.k);
|
||||
params[1].buffer = &v.k;
|
||||
params[1].length = ¶ms[1].buffer_length;
|
||||
params[1].is_null = NULL;
|
||||
|
||||
params[2].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
params[2].buffer_length = sizeof(v.a);
|
||||
params[2].buffer = &v.a;
|
||||
params[2].is_null = NULL;
|
||||
|
||||
params[3].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||
params[3].buffer_length = sizeof(v.b);
|
||||
params[3].buffer = &v.b;
|
||||
params[3].is_null = NULL;
|
||||
|
||||
v.ts = start_ts + 20;
|
||||
v.k = 123;
|
||||
|
||||
char* str = "abc";
|
||||
uintptr_t len = strlen(str);
|
||||
|
||||
v.a = str;
|
||||
params[2].length = &len;
|
||||
params[2].buffer_length = len;
|
||||
params[2].buffer = str;
|
||||
|
||||
char* nstr = "999";
|
||||
uintptr_t len1 = strlen(nstr);
|
||||
|
||||
v.b = nstr;
|
||||
params[3].buffer_length = len1;
|
||||
params[3].buffer = nstr;
|
||||
params[3].length = &len1;
|
||||
|
||||
taos_stmt_bind_param(stmt, params);
|
||||
taos_stmt_add_batch(stmt);
|
||||
|
||||
if (taos_stmt_execute(stmt) != 0) {
|
||||
printf("\033[31mfailed to execute insert statement.\033[0m\n");
|
||||
return;
|
||||
}
|
||||
|
||||
v.ts = start_ts + 30;
|
||||
v.k = 911;
|
||||
|
||||
str = "92";
|
||||
len = strlen(str);
|
||||
|
||||
params[2].length = &len;
|
||||
params[2].buffer_length = len;
|
||||
params[2].buffer = str;
|
||||
|
||||
nstr = "1920";
|
||||
len1 = strlen(nstr);
|
||||
|
||||
params[3].buffer_length = len1;
|
||||
params[3].buffer = nstr;
|
||||
params[3].length = &len1;
|
||||
|
||||
taos_stmt_bind_param(stmt, params);
|
||||
taos_stmt_add_batch(stmt);
|
||||
|
||||
ret = taos_stmt_execute(stmt);
|
||||
if (ret != 0) {
|
||||
printf("%p\n", ret);
|
||||
printf("\033[31mfailed to execute insert statement.\033[0m\n");
|
||||
return;
|
||||
}
|
||||
|
||||
taos_stmt_close(stmt);
|
||||
taos_close(conn);
|
||||
}
|
||||
|
||||
void validateResultFields() {
|
||||
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
|
||||
if (conn == NULL) {
|
||||
printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
|
||||
|
@ -134,5 +236,31 @@ TEST(testCase, result_field_test) {
|
|||
ASSERT_STREQ(fields[6].name, "first(ts)");
|
||||
|
||||
taos_free_result(res);
|
||||
|
||||
// update the configure parameter, the result field name will be changed
|
||||
tsKeepOriginalColumnName = 1;
|
||||
res = taos_query(conn, "select first(ts, a, k, k, b, b, ts) from t1");
|
||||
ASSERT_EQ(taos_num_fields(res), 7);
|
||||
|
||||
fields = taos_fetch_fields(res);
|
||||
ASSERT_EQ(fields[0].bytes, 8);
|
||||
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_TIMESTAMP);
|
||||
ASSERT_STREQ(fields[0].name, "ts");
|
||||
|
||||
ASSERT_EQ(fields[2].bytes, 4);
|
||||
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_INT);
|
||||
ASSERT_STREQ(fields[2].name, "k");
|
||||
|
||||
taos_free_result(res);
|
||||
|
||||
taos_close(conn);
|
||||
}
|
||||
}
|
||||
/* test parse time function */
|
||||
TEST(testCase, result_field_test) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
|
||||
taos_init();
|
||||
|
||||
validateResultFields();
|
||||
stmtInsertTest();
|
||||
}
|
|
@ -20,7 +20,7 @@
|
|||
#include <string.h>
|
||||
|
||||
#include "talgo.h"
|
||||
#include "taosdef.h"
|
||||
#include "ttype.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -180,7 +180,7 @@ extern int32_t tsLogKeepDays;
|
|||
extern int32_t dDebugFlag;
|
||||
extern int32_t vDebugFlag;
|
||||
extern int32_t mDebugFlag;
|
||||
extern int32_t cDebugFlag;
|
||||
extern uint32_t cDebugFlag;
|
||||
extern int32_t jniDebugFlag;
|
||||
extern int32_t tmrDebugFlag;
|
||||
extern int32_t sdbDebugFlag;
|
||||
|
|
|
@ -39,4 +39,18 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
|
|||
|
||||
SSchema tscGetTbnameColumnSchema();
|
||||
|
||||
/**
|
||||
* check if the schema is valid or not, including following aspects:
|
||||
* 1. number of columns
|
||||
* 2. column types
|
||||
* 3. column length
|
||||
* 4. column names
|
||||
* 5. total length
|
||||
*
|
||||
* @param pSchema
|
||||
* @param numOfCols
|
||||
* @return
|
||||
*/
|
||||
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
|
||||
|
||||
#endif // TDENGINE_NAME_H
|
||||
|
|
|
@ -212,7 +212,7 @@ int32_t mDebugFlag = 131;
|
|||
int32_t sdbDebugFlag = 131;
|
||||
int32_t dDebugFlag = 135;
|
||||
int32_t vDebugFlag = 135;
|
||||
int32_t cDebugFlag = 131;
|
||||
uint32_t cDebugFlag = 131;
|
||||
int32_t jniDebugFlag = 131;
|
||||
int32_t odbcDebugFlag = 131;
|
||||
int32_t httpDebugFlag = 131;
|
||||
|
|
|
@ -6,6 +6,10 @@
|
|||
#include "ttokendef.h"
|
||||
#include "tvariant.h"
|
||||
|
||||
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
|
||||
|
||||
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
|
||||
|
||||
// todo refactor
|
||||
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
|
@ -62,7 +66,7 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
|
|||
if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE);
|
||||
} else {
|
||||
s.bytes = tDataTypeDesc[pVal->nType].nSize;
|
||||
s.bytes = tDataTypes[pVal->nType].bytes;
|
||||
}
|
||||
|
||||
s.colId = TSDB_UD_COLUMN_INDEX;
|
||||
|
@ -206,3 +210,65 @@ SSchema tscGetTbnameColumnSchema() {
|
|||
strcpy(s.name, TSQL_TBNAME_L);
|
||||
return s;
|
||||
}
|
||||
|
||||
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
|
||||
int32_t rowLen = 0;
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
// 1. valid types
|
||||
if (!isValidDataType(pSchema[i].type)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// 2. valid length for each type
|
||||
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) {
|
||||
return false;
|
||||
}
|
||||
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (pSchema[i].bytes > TSDB_MAX_NCHAR_LEN) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (pSchema[i].bytes != tDataTypes[pSchema[i].type].bytes) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. valid column names
|
||||
for (int32_t j = i + 1; j < numOfCols; ++j) {
|
||||
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
rowLen += pSchema[i].bytes;
|
||||
}
|
||||
|
||||
return rowLen <= maxLen;
|
||||
}
|
||||
|
||||
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
|
||||
if (!VALIDNUMOFCOLS(numOfCols)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!VALIDNUMOFTAGS(numOfTags)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* first column must be the timestamp, which is a primary key */
|
||||
if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
*/
|
||||
#include "os.h"
|
||||
|
||||
#include "taosdef.h"
|
||||
#include "ttype.h"
|
||||
#include "ttokendef.h"
|
||||
#include "tscompression.h"
|
||||
|
||||
|
@ -367,7 +367,7 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i
|
|||
*maxIndex = 0;
|
||||
}
|
||||
|
||||
tDataTypeDescriptor tDataTypeDesc[15] = {
|
||||
tDataTypeDescriptor tDataTypes[15] = {
|
||||
{TSDB_DATA_TYPE_NULL, 6,1, "NOTYPE", NULL, NULL, NULL},
|
||||
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool},
|
||||
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
|
||||
|
@ -423,58 +423,58 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
|
|||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BOOL_NULL;
|
||||
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_TINYINT_NULL;
|
||||
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint16_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_SMALLINT_NULL;
|
||||
*(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_INT_NULL;
|
||||
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BIGINT_NULL;
|
||||
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_UTINYINT_NULL;
|
||||
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint16_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_USMALLINT_NULL;
|
||||
*(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_UINT_NULL;
|
||||
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_UBIGINT_NULL;
|
||||
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_FLOAT_NULL;
|
||||
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL;
|
||||
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
|
@ -485,7 +485,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
|
|||
break;
|
||||
default: {
|
||||
for (int32_t i = 0; i < numOfElems; ++i) {
|
||||
*(uint32_t *)(val + i * tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize) = TSDB_DATA_INT_NULL;
|
||||
*(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -524,15 +524,18 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
|||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
*((int8_t *)val) = GET_INT8_VAL(src);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
*((int16_t *)val) = GET_INT16_VAL(src);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
*((int32_t *)val) = GET_INT32_VAL(src);
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
SET_FLOAT_VAL(val, GET_FLOAT_VAL(src));
|
||||
break;
|
||||
|
@ -540,6 +543,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
|||
SET_DOUBLE_VAL(val, GET_DOUBLE_VAL(src));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
*((int64_t *)val) = GET_INT64_VAL(src);
|
||||
break;
|
||||
|
|
|
@ -205,7 +205,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
|
|||
}
|
||||
|
||||
if (pDst->nType != TSDB_DATA_TYPE_ARRAY) {
|
||||
pDst->nLen = tDataTypeDesc[pDst->nType].nSize;
|
||||
pDst->nLen = tDataTypes[pDst->nType].bytes;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -399,6 +399,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
|
|||
pVariant->wpz = (wchar_t *)tmp;
|
||||
} else {
|
||||
int32_t output = 0;
|
||||
|
||||
bool ret = taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, &output);
|
||||
if (!ret) {
|
||||
return -1;
|
||||
|
@ -424,7 +425,7 @@ static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *val
|
|||
|
||||
static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, bool issigned, bool releaseVariantPtr) {
|
||||
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
|
||||
setNull((char *)result, type, tDataTypeDesc[type].nSize);
|
||||
setNull((char *)result, type, tDataTypes[type].bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -445,7 +446,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
pVariant->nLen = 0;
|
||||
}
|
||||
|
||||
setNull((char *)result, type, tDataTypeDesc[type].nSize);
|
||||
setNull((char *)result, type, tDataTypes[type].bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -495,7 +496,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
free(pVariant->pz);
|
||||
pVariant->nLen = 0;
|
||||
}
|
||||
setNull((char *)result, type, tDataTypeDesc[type].nSize);
|
||||
setNull((char *)result, type, tDataTypes[type].bytes);
|
||||
return 0;
|
||||
} else {
|
||||
int64_t val = wcstoll(pVariant->wpz, &endPtr, 10);
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="src" output="target/classes" path="src/main/java">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry excluding="**" kind="src" output="target/classes" path="src/main/resources">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
<attribute name="test" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
|
@ -1,23 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>taos-jdbcdriver</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.jdt.core.javabuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.m2e.core.maven2Builder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.eclipse.jdt.core.javanature</nature>
|
||||
<nature>org.eclipse.m2e.core.maven2Nature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
|
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
|
|||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||
POST_BUILD
|
||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.15-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.16-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMENT "build jdbc driver")
|
||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.15</version>
|
||||
<version>2.0.16</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>JDBCDriver</name>
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.15</version>
|
||||
<version>2.0.16</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>JDBCDriver</name>
|
||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||
|
@ -49,6 +49,7 @@
|
|||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
|
@ -56,12 +57,6 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.47</version>
|
||||
</dependency>
|
||||
|
||||
<!-- for restful -->
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
|
@ -79,12 +74,6 @@
|
|||
<version>1.2.58</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.49</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
|
|
@ -497,12 +497,12 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
|
|||
|
||||
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
|
||||
throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
return null;
|
||||
}
|
||||
|
||||
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
|
||||
String columnNamePattern) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
return null;
|
||||
}
|
||||
|
||||
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
|
||||
|
|
|
@ -20,13 +20,11 @@ import java.util.List;
|
|||
|
||||
public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
||||
|
||||
private String dbProductName = null;
|
||||
private String url = null;
|
||||
private String userName = null;
|
||||
private Connection conn = null;
|
||||
private String url;
|
||||
private String userName;
|
||||
private Connection conn;
|
||||
|
||||
public TSDBDatabaseMetaData(String dbProductName, String url, String userName) {
|
||||
this.dbProductName = dbProductName;
|
||||
public TSDBDatabaseMetaData(String url, String userName) {
|
||||
this.url = url;
|
||||
this.userName = userName;
|
||||
}
|
||||
|
@ -35,12 +33,17 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
this.conn = conn;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||
return null;
|
||||
try {
|
||||
return iface.cast(this);
|
||||
} catch (ClassCastException cce) {
|
||||
throw new SQLException("Unable to unwrap to " + iface.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||
return false;
|
||||
return iface.isInstance(this);
|
||||
}
|
||||
|
||||
public boolean allProceduresAreCallable() throws SQLException {
|
||||
|
@ -80,11 +83,11 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public String getDatabaseProductName() throws SQLException {
|
||||
return this.dbProductName;
|
||||
return "TDengine";
|
||||
}
|
||||
|
||||
public String getDatabaseProductVersion() throws SQLException {
|
||||
return "1.5.1";
|
||||
return "2.0.x.x";
|
||||
}
|
||||
|
||||
public String getDriverName() throws SQLException {
|
||||
|
@ -92,7 +95,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public String getDriverVersion() throws SQLException {
|
||||
return "1.0.0";
|
||||
return "2.0.x";
|
||||
}
|
||||
|
||||
public int getDriverMajorVersion() {
|
||||
|
@ -111,7 +114,9 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
public boolean supportsMixedCaseIdentifiers() throws SQLException {
|
||||
//像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -120,7 +125,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public boolean storesLowerCaseIdentifiers() throws SQLException {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean storesMixedCaseIdentifiers() throws SQLException {
|
||||
|
@ -128,6 +133,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
|
||||
//像database、table这些对象的标识符,在存储时是否采用大小写混合、并带引号的模式
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -188,10 +194,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public boolean nullPlusNonNullIsNull() throws SQLException {
|
||||
// null + non-null != null
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean supportsConvert() throws SQLException {
|
||||
// 是否支持转换函数convert
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -216,7 +224,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public boolean supportsGroupBy() throws SQLException {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean supportsGroupByUnrelated() throws SQLException {
|
||||
|
@ -488,7 +496,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public int getDefaultTransactionIsolation() throws SQLException {
|
||||
return 0;
|
||||
return Connection.TRANSACTION_NONE;
|
||||
}
|
||||
|
||||
public boolean supportsTransactions() throws SQLException {
|
||||
|
@ -496,6 +504,8 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
|
||||
if (level == Connection.TRANSACTION_NONE)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -517,28 +527,27 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
|
||||
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
|
||||
throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
return null;
|
||||
}
|
||||
|
||||
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
|
||||
String columnNamePattern) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
return null;
|
||||
}
|
||||
|
||||
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
|
||||
throws SQLException {
|
||||
Statement stmt = null;
|
||||
if (null != conn && !conn.isClosed()) {
|
||||
stmt = conn.createStatement();
|
||||
if (catalog == null || catalog.length() < 1) {
|
||||
catalog = conn.getCatalog();
|
||||
}
|
||||
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
|
||||
if (conn == null || conn.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
if (catalog == null || catalog.isEmpty())
|
||||
return null;
|
||||
|
||||
stmt.executeUpdate("use " + catalog);
|
||||
ResultSet resultSet0 = stmt.executeQuery("show tables");
|
||||
GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
|
||||
return getTablesResultSet;
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,14 +556,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public ResultSet getCatalogs() throws SQLException {
|
||||
if (conn == null || conn.isClosed())
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
|
||||
if (conn != null && !conn.isClosed()) {
|
||||
Statement stmt = conn.createStatement();
|
||||
ResultSet resultSet0 = stmt.executeQuery("show databases");
|
||||
CatalogResultSet resultSet = new CatalogResultSet(resultSet0);
|
||||
return resultSet;
|
||||
} else {
|
||||
return getEmptyResultSet();
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("show databases");
|
||||
return new CatalogResultSet(rs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -562,7 +569,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
|
||||
|
||||
// set up ColumnMetaDataList
|
||||
List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>(1);
|
||||
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(1);
|
||||
ColumnMetaData colMetaData = new ColumnMetaData();
|
||||
colMetaData.setColIndex(0);
|
||||
colMetaData.setColName("TABLE_TYPE");
|
||||
|
@ -571,7 +578,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
columnMetaDataList.add(colMetaData);
|
||||
|
||||
// set up rowDataList
|
||||
List<TSDBResultSetRowData> rowDataList = new ArrayList<TSDBResultSetRowData>(2);
|
||||
List<TSDBResultSetRowData> rowDataList = new ArrayList<>(2);
|
||||
TSDBResultSetRowData rowData = new TSDBResultSetRowData();
|
||||
rowData.setString(0, "TABLE");
|
||||
rowDataList.add(rowData);
|
||||
|
@ -591,11 +598,10 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
Statement stmt = null;
|
||||
if (null != conn && !conn.isClosed()) {
|
||||
stmt = conn.createStatement();
|
||||
if (catalog == null || catalog.length() < 1) {
|
||||
catalog = conn.getCatalog();
|
||||
}
|
||||
stmt.executeUpdate("use " + catalog);
|
||||
if (catalog == null || catalog.isEmpty())
|
||||
return null;
|
||||
|
||||
stmt.executeUpdate("use " + catalog);
|
||||
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
|
||||
// set up ColumnMetaDataList
|
||||
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24);
|
||||
|
@ -851,7 +857,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public Connection getConnection() throws SQLException {
|
||||
return null;
|
||||
return this.conn;
|
||||
}
|
||||
|
||||
public boolean supportsSavepoints() throws SQLException {
|
||||
|
@ -884,15 +890,17 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
|
||||
if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
public int getResultSetHoldability() throws SQLException {
|
||||
return 0;
|
||||
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
|
||||
}
|
||||
|
||||
public int getDatabaseMajorVersion() throws SQLException {
|
||||
return 0;
|
||||
return 2;
|
||||
}
|
||||
|
||||
public int getDatabaseMinorVersion() throws SQLException {
|
||||
|
@ -900,7 +908,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
|||
}
|
||||
|
||||
public int getJDBCMajorVersion() throws SQLException {
|
||||
return 0;
|
||||
return 2;
|
||||
}
|
||||
|
||||
public int getJDBCMinorVersion() throws SQLException {
|
||||
|
|
|
@ -214,7 +214,7 @@ public class TSDBDriver extends AbstractTaosDriver {
|
|||
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
|
||||
}
|
||||
|
||||
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
|
||||
this.dbMetaData = new TSDBDatabaseMetaData(urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
|
||||
return urlProps;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class TSDBResultSet implements ResultSet {
|
||||
private TSDBJNIConnector jniConnector = null;
|
||||
|
||||
|
@ -104,6 +103,7 @@ public class TSDBResultSet implements ResultSet {
|
|||
}
|
||||
|
||||
public TSDBResultSet() {
|
||||
|
||||
}
|
||||
|
||||
public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException {
|
||||
|
|
|
@ -17,6 +17,7 @@ package com.taosdata.jdbc;
|
|||
import java.sql.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class TSDBStatement implements Statement {
|
||||
private TSDBJNIConnector connector = null;
|
||||
|
@ -68,7 +69,6 @@ public class TSDBStatement implements Statement {
|
|||
pSql = this.connector.executeQuery(sql);
|
||||
|
||||
long resultSetPointer = this.connector.getResultSet();
|
||||
|
||||
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
this.connector.freeResultSet(pSql);
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
|
|
|
@ -8,7 +8,6 @@ import java.util.List;
|
|||
|
||||
public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData {
|
||||
|
||||
|
||||
private final String url;
|
||||
private final String userName;
|
||||
private final Connection connection;
|
||||
|
|
|
@ -1,245 +0,0 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class DatabaseMetaDataTest {
|
||||
static Connection connection = null;
|
||||
static PreparedStatement statement = null;
|
||||
static String dbName = "test";
|
||||
static String tName = "t0";
|
||||
static String host = "localhost";
|
||||
|
||||
@BeforeClass
|
||||
public static void createConnection() throws SQLException {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
} catch (ClassNotFoundException e) {
|
||||
return;
|
||||
}
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
String sql = "drop database if exists " + dbName;
|
||||
statement = connection.prepareStatement(sql);
|
||||
statement.executeUpdate("create database if not exists " + dbName);
|
||||
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMetaDataTest() throws SQLException {
|
||||
DatabaseMetaData databaseMetaData = connection.getMetaData();
|
||||
ResultSet resultSet = databaseMetaData.getTables(dbName, "t*", "t*", new String[]{"t"});
|
||||
while (resultSet.next()) {
|
||||
for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) {
|
||||
System.out.printf("%d: %s\n", i, resultSet.getString(i));
|
||||
}
|
||||
}
|
||||
resultSet.close();
|
||||
databaseMetaData.isWrapperFor(null);
|
||||
databaseMetaData.allProceduresAreCallable();
|
||||
databaseMetaData.allTablesAreSelectable();
|
||||
databaseMetaData.getURL();
|
||||
databaseMetaData.getUserName();
|
||||
databaseMetaData.isReadOnly();
|
||||
databaseMetaData.nullsAreSortedHigh();
|
||||
databaseMetaData.nullsAreSortedLow();
|
||||
databaseMetaData.nullsAreSortedAtStart();
|
||||
databaseMetaData.nullsAreSortedAtEnd();
|
||||
databaseMetaData.getDatabaseProductName();
|
||||
databaseMetaData.getDatabaseProductVersion();
|
||||
databaseMetaData.getDriverName();
|
||||
databaseMetaData.getDriverVersion();
|
||||
databaseMetaData.getDriverMajorVersion();
|
||||
databaseMetaData.getDriverMinorVersion();
|
||||
databaseMetaData.usesLocalFiles();
|
||||
databaseMetaData.usesLocalFilePerTable();
|
||||
databaseMetaData.supportsMixedCaseIdentifiers();
|
||||
databaseMetaData.storesUpperCaseIdentifiers();
|
||||
databaseMetaData.storesLowerCaseIdentifiers();
|
||||
databaseMetaData.storesMixedCaseIdentifiers();
|
||||
databaseMetaData.supportsMixedCaseQuotedIdentifiers();
|
||||
databaseMetaData.storesUpperCaseQuotedIdentifiers();
|
||||
databaseMetaData.storesLowerCaseQuotedIdentifiers();
|
||||
databaseMetaData.storesMixedCaseQuotedIdentifiers();
|
||||
databaseMetaData.getIdentifierQuoteString();
|
||||
databaseMetaData.getSQLKeywords();
|
||||
databaseMetaData.getNumericFunctions();
|
||||
databaseMetaData.getStringFunctions();
|
||||
databaseMetaData.getSystemFunctions();
|
||||
databaseMetaData.getTimeDateFunctions();
|
||||
databaseMetaData.getSearchStringEscape();
|
||||
databaseMetaData.getExtraNameCharacters();
|
||||
databaseMetaData.supportsAlterTableWithAddColumn();
|
||||
databaseMetaData.supportsAlterTableWithDropColumn();
|
||||
databaseMetaData.supportsColumnAliasing();
|
||||
databaseMetaData.nullPlusNonNullIsNull();
|
||||
databaseMetaData.supportsConvert();
|
||||
databaseMetaData.supportsConvert(0, 0);
|
||||
databaseMetaData.supportsTableCorrelationNames();
|
||||
databaseMetaData.supportsDifferentTableCorrelationNames();
|
||||
databaseMetaData.supportsExpressionsInOrderBy();
|
||||
databaseMetaData.supportsOrderByUnrelated();
|
||||
databaseMetaData.supportsGroupBy();
|
||||
databaseMetaData.supportsGroupByUnrelated();
|
||||
databaseMetaData.supportsGroupByBeyondSelect();
|
||||
databaseMetaData.supportsLikeEscapeClause();
|
||||
databaseMetaData.supportsMultipleResultSets();
|
||||
databaseMetaData.supportsMultipleTransactions();
|
||||
databaseMetaData.supportsNonNullableColumns();
|
||||
databaseMetaData.supportsMinimumSQLGrammar();
|
||||
databaseMetaData.supportsCoreSQLGrammar();
|
||||
databaseMetaData.supportsExtendedSQLGrammar();
|
||||
databaseMetaData.supportsANSI92EntryLevelSQL();
|
||||
databaseMetaData.supportsANSI92IntermediateSQL();
|
||||
databaseMetaData.supportsANSI92FullSQL();
|
||||
databaseMetaData.supportsIntegrityEnhancementFacility();
|
||||
databaseMetaData.supportsOuterJoins();
|
||||
databaseMetaData.supportsFullOuterJoins();
|
||||
databaseMetaData.supportsLimitedOuterJoins();
|
||||
databaseMetaData.getSchemaTerm();
|
||||
databaseMetaData.getProcedureTerm();
|
||||
databaseMetaData.getCatalogTerm();
|
||||
databaseMetaData.isCatalogAtStart();
|
||||
databaseMetaData.getCatalogSeparator();
|
||||
databaseMetaData.supportsSchemasInDataManipulation();
|
||||
databaseMetaData.supportsSchemasInProcedureCalls();
|
||||
databaseMetaData.supportsSchemasInTableDefinitions();
|
||||
databaseMetaData.supportsSchemasInIndexDefinitions();
|
||||
databaseMetaData.supportsSchemasInPrivilegeDefinitions();
|
||||
databaseMetaData.supportsCatalogsInDataManipulation();
|
||||
databaseMetaData.supportsCatalogsInProcedureCalls();
|
||||
databaseMetaData.supportsCatalogsInTableDefinitions();
|
||||
databaseMetaData.supportsCatalogsInIndexDefinitions();
|
||||
databaseMetaData.supportsCatalogsInPrivilegeDefinitions();
|
||||
databaseMetaData.supportsPositionedDelete();
|
||||
databaseMetaData.supportsPositionedUpdate();
|
||||
databaseMetaData.supportsSelectForUpdate();
|
||||
databaseMetaData.supportsStoredProcedures();
|
||||
databaseMetaData.supportsSubqueriesInComparisons();
|
||||
databaseMetaData.supportsSubqueriesInExists();
|
||||
databaseMetaData.supportsSubqueriesInIns();
|
||||
databaseMetaData.supportsSubqueriesInQuantifieds();
|
||||
databaseMetaData.supportsCorrelatedSubqueries();
|
||||
databaseMetaData.supportsUnion();
|
||||
databaseMetaData.supportsUnionAll();
|
||||
databaseMetaData.supportsOpenCursorsAcrossCommit();
|
||||
databaseMetaData.supportsOpenCursorsAcrossRollback();
|
||||
databaseMetaData.supportsOpenStatementsAcrossCommit();
|
||||
databaseMetaData.supportsOpenStatementsAcrossRollback();
|
||||
databaseMetaData.getMaxBinaryLiteralLength();
|
||||
databaseMetaData.getMaxCharLiteralLength();
|
||||
databaseMetaData.getMaxColumnNameLength();
|
||||
databaseMetaData.getMaxColumnsInGroupBy();
|
||||
databaseMetaData.getMaxColumnsInIndex();
|
||||
databaseMetaData.getMaxColumnsInOrderBy();
|
||||
databaseMetaData.getMaxColumnsInSelect();
|
||||
databaseMetaData.getMaxColumnsInTable();
|
||||
databaseMetaData.getMaxConnections();
|
||||
databaseMetaData.getMaxCursorNameLength();
|
||||
databaseMetaData.getMaxIndexLength();
|
||||
databaseMetaData.getMaxSchemaNameLength();
|
||||
databaseMetaData.getMaxProcedureNameLength();
|
||||
databaseMetaData.getMaxCatalogNameLength();
|
||||
databaseMetaData.getMaxRowSize();
|
||||
databaseMetaData.doesMaxRowSizeIncludeBlobs();
|
||||
databaseMetaData.getMaxStatementLength();
|
||||
databaseMetaData.getMaxStatements();
|
||||
databaseMetaData.getMaxTableNameLength();
|
||||
databaseMetaData.getMaxTablesInSelect();
|
||||
databaseMetaData.getMaxUserNameLength();
|
||||
databaseMetaData.getDefaultTransactionIsolation();
|
||||
databaseMetaData.supportsTransactions();
|
||||
databaseMetaData.supportsTransactionIsolationLevel(0);
|
||||
databaseMetaData.supportsDataDefinitionAndDataManipulationTransactions();
|
||||
databaseMetaData.supportsDataManipulationTransactionsOnly();
|
||||
databaseMetaData.dataDefinitionCausesTransactionCommit();
|
||||
databaseMetaData.dataDefinitionIgnoredInTransactions();
|
||||
try {
|
||||
databaseMetaData.getProcedures("", "", "");
|
||||
} catch (Exception e) {
|
||||
}
|
||||
try {
|
||||
databaseMetaData.getProcedureColumns("", "", "", "");
|
||||
} catch (Exception e) {
|
||||
}
|
||||
try {
|
||||
databaseMetaData.getTables("", "", "", new String[]{""});
|
||||
} catch (Exception e) {
|
||||
}
|
||||
databaseMetaData.getSchemas();
|
||||
databaseMetaData.getCatalogs();
|
||||
// databaseMetaData.getTableTypes();
|
||||
|
||||
databaseMetaData.getColumns(dbName, "", tName, "");
|
||||
databaseMetaData.getColumnPrivileges("", "", "", "");
|
||||
databaseMetaData.getTablePrivileges("", "", "");
|
||||
databaseMetaData.getBestRowIdentifier("", "", "", 0, false);
|
||||
databaseMetaData.getVersionColumns("", "", "");
|
||||
databaseMetaData.getPrimaryKeys("", "", "");
|
||||
databaseMetaData.getImportedKeys("", "", "");
|
||||
databaseMetaData.getExportedKeys("", "", "");
|
||||
databaseMetaData.getCrossReference("", "", "", "", "", "");
|
||||
databaseMetaData.getTypeInfo();
|
||||
databaseMetaData.getIndexInfo("", "", "", false, false);
|
||||
databaseMetaData.supportsResultSetType(0);
|
||||
databaseMetaData.supportsResultSetConcurrency(0, 0);
|
||||
databaseMetaData.ownUpdatesAreVisible(0);
|
||||
databaseMetaData.ownDeletesAreVisible(0);
|
||||
databaseMetaData.ownInsertsAreVisible(0);
|
||||
databaseMetaData.othersUpdatesAreVisible(0);
|
||||
databaseMetaData.othersDeletesAreVisible(0);
|
||||
databaseMetaData.othersInsertsAreVisible(0);
|
||||
databaseMetaData.updatesAreDetected(0);
|
||||
databaseMetaData.deletesAreDetected(0);
|
||||
databaseMetaData.insertsAreDetected(0);
|
||||
databaseMetaData.supportsBatchUpdates();
|
||||
databaseMetaData.getUDTs("", "", "", new int[]{0});
|
||||
databaseMetaData.getConnection();
|
||||
databaseMetaData.supportsSavepoints();
|
||||
databaseMetaData.supportsNamedParameters();
|
||||
databaseMetaData.supportsMultipleOpenResults();
|
||||
databaseMetaData.supportsGetGeneratedKeys();
|
||||
databaseMetaData.getSuperTypes("", "", "");
|
||||
databaseMetaData.getSuperTables("", "", "");
|
||||
databaseMetaData.getAttributes("", "", "", "");
|
||||
databaseMetaData.supportsResultSetHoldability(0);
|
||||
databaseMetaData.getResultSetHoldability();
|
||||
databaseMetaData.getDatabaseMajorVersion();
|
||||
databaseMetaData.getDatabaseMinorVersion();
|
||||
databaseMetaData.getJDBCMajorVersion();
|
||||
databaseMetaData.getJDBCMinorVersion();
|
||||
databaseMetaData.getSQLStateType();
|
||||
databaseMetaData.locatorsUpdateCopy();
|
||||
databaseMetaData.supportsStatementPooling();
|
||||
databaseMetaData.getRowIdLifetime();
|
||||
databaseMetaData.getSchemas("", "");
|
||||
databaseMetaData.supportsStoredFunctionsUsingCallSyntax();
|
||||
databaseMetaData.autoCommitFailureClosesAllResultSets();
|
||||
databaseMetaData.getClientInfoProperties();
|
||||
databaseMetaData.getFunctions("", "", "");
|
||||
databaseMetaData.getFunctionColumns("", "", "", "");
|
||||
databaseMetaData.getPseudoColumns("", "", "", "");
|
||||
databaseMetaData.generatedKeyAlwaysReturned();
|
||||
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void close() throws Exception {
|
||||
statement.executeUpdate("drop database " + dbName);
|
||||
statement.close();
|
||||
connection.close();
|
||||
Thread.sleep(10);
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,913 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TSDBDatabaseMetaDataTest {
|
||||
private TSDBDatabaseMetaData metaData;
|
||||
private static final String host = "localhost";
|
||||
|
||||
@Before
|
||||
public void before() throws ClassNotFoundException, SQLException {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
metaData = (TSDBDatabaseMetaData) DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties).getMetaData();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void unwrap() throws SQLException {
|
||||
TSDBDatabaseMetaData unwrap = metaData.unwrap(TSDBDatabaseMetaData.class);
|
||||
Assert.assertNotNull(unwrap);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isWrapperFor() throws SQLException {
|
||||
Assert.assertTrue(metaData.isWrapperFor(TSDBDatabaseMetaData.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void allProceduresAreCallable() throws SQLException {
|
||||
Assert.assertFalse(metaData.allProceduresAreCallable());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void allTablesAreSelectable() throws SQLException {
|
||||
Assert.assertFalse(metaData.allTablesAreSelectable());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getURL() throws SQLException {
|
||||
Assert.assertEquals("jdbc:TAOS://localhost:6030/?user=root&password=taosdata", metaData.getURL());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserName() throws SQLException {
|
||||
Assert.assertEquals("root", metaData.getUserName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isReadOnly() throws SQLException {
|
||||
Assert.assertFalse(metaData.isReadOnly());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void nullsAreSortedHigh() throws SQLException {
|
||||
Assert.assertFalse(metaData.nullsAreSortedHigh());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void nullsAreSortedLow() throws SQLException {
|
||||
Assert.assertTrue(metaData.nullsAreSortedLow());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void nullsAreSortedAtStart() throws SQLException {
|
||||
Assert.assertTrue(metaData.nullsAreSortedAtStart());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void nullsAreSortedAtEnd() throws SQLException {
|
||||
Assert.assertFalse(metaData.nullsAreSortedAtEnd());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDatabaseProductName() throws SQLException {
|
||||
Assert.assertEquals("TDengine", metaData.getDatabaseProductName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDatabaseProductVersion() throws SQLException {
|
||||
Assert.assertEquals("2.0.x.x", metaData.getDatabaseProductVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDriverName() throws SQLException {
|
||||
Assert.assertEquals("com.taosdata.jdbc.TSDBDriver", metaData.getDriverName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDriverVersion() throws SQLException {
|
||||
Assert.assertEquals("2.0.x", metaData.getDriverVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDriverMajorVersion() {
|
||||
Assert.assertEquals(2, metaData.getDriverMajorVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDriverMinorVersion() {
|
||||
Assert.assertEquals(0, metaData.getDriverMinorVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void usesLocalFiles() throws SQLException {
|
||||
Assert.assertFalse(metaData.usesLocalFiles());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void usesLocalFilePerTable() throws SQLException {
|
||||
Assert.assertFalse(metaData.usesLocalFilePerTable());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsMixedCaseIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsMixedCaseIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void storesUpperCaseIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.storesUpperCaseIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void storesLowerCaseIdentifiers() throws SQLException {
|
||||
Assert.assertTrue(metaData.storesLowerCaseIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void storesMixedCaseIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.storesMixedCaseIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsMixedCaseQuotedIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsMixedCaseQuotedIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void storesUpperCaseQuotedIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.storesUpperCaseQuotedIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void storesLowerCaseQuotedIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.storesLowerCaseQuotedIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void storesMixedCaseQuotedIdentifiers() throws SQLException {
|
||||
Assert.assertFalse(metaData.storesMixedCaseQuotedIdentifiers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getIdentifierQuoteString() throws SQLException {
|
||||
Assert.assertEquals(" ", metaData.getIdentifierQuoteString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSQLKeywords() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getSQLKeywords());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getNumericFunctions() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getNumericFunctions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStringFunctions() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getStringFunctions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSystemFunctions() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getSystemFunctions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTimeDateFunctions() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getTimeDateFunctions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSearchStringEscape() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getSearchStringEscape());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExtraNameCharacters() throws SQLException {
|
||||
Assert.assertEquals(null, metaData.getExtraNameCharacters());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsAlterTableWithAddColumn() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsAlterTableWithAddColumn());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsAlterTableWithDropColumn() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsAlterTableWithDropColumn());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsColumnAliasing() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsColumnAliasing());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void nullPlusNonNullIsNull() throws SQLException {
|
||||
Assert.assertFalse(metaData.nullPlusNonNullIsNull());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsConvert() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsConvert());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSupportsConvert() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsConvert(1, 1));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsTableCorrelationNames() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsTableCorrelationNames());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsDifferentTableCorrelationNames() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsDifferentTableCorrelationNames());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsExpressionsInOrderBy() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsExpressionsInOrderBy());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsOrderByUnrelated() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsOrderByUnrelated());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsGroupBy() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsGroupBy());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsGroupByUnrelated() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsGroupByUnrelated());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsGroupByBeyondSelect() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsGroupByBeyondSelect());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsLikeEscapeClause() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsLikeEscapeClause());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsMultipleResultSets() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsMultipleResultSets());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsMultipleTransactions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsMultipleTransactions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsNonNullableColumns() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsNonNullableColumns());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsMinimumSQLGrammar() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsMinimumSQLGrammar());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCoreSQLGrammar() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsCoreSQLGrammar());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsExtendedSQLGrammar() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsExtendedSQLGrammar());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsANSI92EntryLevelSQL() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsANSI92EntryLevelSQL());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsANSI92IntermediateSQL() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsANSI92IntermediateSQL());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsANSI92FullSQL() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsANSI92FullSQL());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsIntegrityEnhancementFacility() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsIntegrityEnhancementFacility());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsOuterJoins() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsOuterJoins());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsFullOuterJoins() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsFullOuterJoins());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsLimitedOuterJoins() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsLimitedOuterJoins());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSchemaTerm() throws SQLException {
|
||||
Assert.assertNull(metaData.getSchemaTerm());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getProcedureTerm() throws SQLException {
|
||||
Assert.assertNull(metaData.getProcedureTerm());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getCatalogTerm() throws SQLException {
|
||||
Assert.assertEquals("database", metaData.getCatalogTerm());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isCatalogAtStart() throws SQLException {
|
||||
Assert.assertTrue(metaData.isCatalogAtStart());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getCatalogSeparator() throws SQLException {
|
||||
Assert.assertEquals(".", metaData.getCatalogSeparator());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSchemasInDataManipulation() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSchemasInDataManipulation());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSchemasInProcedureCalls() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSchemasInProcedureCalls());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSchemasInTableDefinitions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSchemasInTableDefinitions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSchemasInIndexDefinitions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSchemasInIndexDefinitions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSchemasInPrivilegeDefinitions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSchemasInPrivilegeDefinitions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCatalogsInDataManipulation() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsCatalogsInDataManipulation());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCatalogsInProcedureCalls() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsCatalogsInProcedureCalls());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCatalogsInTableDefinitions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsCatalogsInTableDefinitions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCatalogsInIndexDefinitions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsCatalogsInIndexDefinitions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCatalogsInPrivilegeDefinitions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsCatalogsInPrivilegeDefinitions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsPositionedDelete() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsPositionedDelete());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsPositionedUpdate() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsPositionedUpdate());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSelectForUpdate() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSelectForUpdate());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsStoredProcedures() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsStoredProcedures());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSubqueriesInComparisons() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSubqueriesInComparisons());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSubqueriesInExists() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSubqueriesInExists());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSubqueriesInIns() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSubqueriesInIns());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSubqueriesInQuantifieds() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSubqueriesInQuantifieds());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsCorrelatedSubqueries() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsCorrelatedSubqueries());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsUnion() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsUnion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsUnionAll() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsUnionAll());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsOpenCursorsAcrossCommit() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsOpenCursorsAcrossCommit());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsOpenCursorsAcrossRollback() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsOpenCursorsAcrossRollback());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsOpenStatementsAcrossCommit() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsOpenStatementsAcrossCommit());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsOpenStatementsAcrossRollback() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsOpenStatementsAcrossRollback());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxBinaryLiteralLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxBinaryLiteralLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxCharLiteralLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxCharLiteralLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxColumnNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxColumnNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxColumnsInGroupBy() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxColumnsInGroupBy());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxColumnsInIndex() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxColumnsInIndex());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxColumnsInOrderBy() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxColumnsInOrderBy());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxColumnsInSelect() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxColumnsInSelect());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxColumnsInTable() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxColumnsInTable());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxConnections() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxConnections());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxCursorNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxCursorNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxIndexLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxIndexLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxSchemaNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxSchemaNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxProcedureNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxProcedureNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxCatalogNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxCatalogNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxRowSize() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxRowSize());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void doesMaxRowSizeIncludeBlobs() throws SQLException {
|
||||
Assert.assertFalse(metaData.doesMaxRowSizeIncludeBlobs());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxStatementLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxStatementLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxStatements() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxStatements());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxTableNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxTableNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxTablesInSelect() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxTablesInSelect());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMaxUserNameLength() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getMaxUserNameLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDefaultTransactionIsolation() throws SQLException {
|
||||
Assert.assertEquals(Connection.TRANSACTION_NONE, metaData.getDefaultTransactionIsolation());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsTransactions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsTransactions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsTransactionIsolationLevel() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_NONE));
|
||||
Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED));
|
||||
Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_UNCOMMITTED));
|
||||
Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ));
|
||||
Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsDataDefinitionAndDataManipulationTransactions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsDataManipulationTransactionsOnly() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsDataManipulationTransactionsOnly());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void dataDefinitionCausesTransactionCommit() throws SQLException {
|
||||
Assert.assertFalse(metaData.dataDefinitionCausesTransactionCommit());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void dataDefinitionIgnoredInTransactions() throws SQLException {
|
||||
Assert.assertFalse(metaData.dataDefinitionIgnoredInTransactions());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getProcedures() throws SQLException {
|
||||
Assert.assertNull(metaData.getProcedures("*", "*", "*"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getProcedureColumns() throws SQLException {
|
||||
Assert.assertNull(metaData.getProcedureColumns("*", "*", "*", "*"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTables() throws SQLException {
|
||||
Assert.assertNull(metaData.getTables("", "", "*", null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSchemas() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getSchemas());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getCatalogs() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getCatalogs());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTableTypes() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getTableTypes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getColumns() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getColumns("", "", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getColumnPrivileges() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getColumnPrivileges("", "", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTablePrivileges() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getTablePrivileges("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getBestRowIdentifier() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getBestRowIdentifier("", "", "", 0, false));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getVersionColumns() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getVersionColumns("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getPrimaryKeys() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getPrimaryKeys("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getImportedKeys() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getImportedKeys("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExportedKeys() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getExportedKeys("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getCrossReference() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getCrossReference("", "", "", "", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTypeInfo() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getTypeInfo());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getIndexInfo() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getIndexInfo("", "", "", false, false));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsResultSetType() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsResultSetType(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsResultSetConcurrency() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsResultSetConcurrency(0, 0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ownUpdatesAreVisible() throws SQLException {
|
||||
Assert.assertFalse(metaData.ownUpdatesAreVisible(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ownDeletesAreVisible() throws SQLException {
|
||||
Assert.assertFalse(metaData.ownDeletesAreVisible(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ownInsertsAreVisible() throws SQLException {
|
||||
Assert.assertFalse(metaData.ownInsertsAreVisible(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void othersUpdatesAreVisible() throws SQLException {
|
||||
Assert.assertFalse(metaData.othersUpdatesAreVisible(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void othersDeletesAreVisible() throws SQLException {
|
||||
Assert.assertFalse(metaData.othersDeletesAreVisible(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void othersInsertsAreVisible() throws SQLException {
|
||||
Assert.assertFalse(metaData.othersInsertsAreVisible(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updatesAreDetected() throws SQLException {
|
||||
Assert.assertFalse(metaData.updatesAreDetected(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deletesAreDetected() throws SQLException {
|
||||
Assert.assertFalse(metaData.deletesAreDetected(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void insertsAreDetected() throws SQLException {
|
||||
Assert.assertFalse(metaData.insertsAreDetected(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsBatchUpdates() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsBatchUpdates());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUDTs() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getUDTs("", "", "", null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getConnection() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getConnection());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsSavepoints() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsSavepoints());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsNamedParameters() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsNamedParameters());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsMultipleOpenResults() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsMultipleOpenResults());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsGetGeneratedKeys() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsGetGeneratedKeys());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSuperTypes() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getSuperTypes("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSuperTables() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getSuperTables("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getAttributes() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getAttributes("", "", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsResultSetHoldability() throws SQLException {
|
||||
Assert.assertTrue(metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT));
|
||||
Assert.assertFalse(metaData.supportsResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getResultSetHoldability() throws SQLException {
|
||||
Assert.assertEquals(1, metaData.getResultSetHoldability());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDatabaseMajorVersion() throws SQLException {
|
||||
Assert.assertEquals(2, metaData.getDatabaseMajorVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDatabaseMinorVersion() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getDatabaseMinorVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getJDBCMajorVersion() throws SQLException {
|
||||
Assert.assertEquals(2, metaData.getJDBCMajorVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getJDBCMinorVersion() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getJDBCMinorVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSQLStateType() throws SQLException {
|
||||
Assert.assertEquals(0, metaData.getSQLStateType());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void locatorsUpdateCopy() throws SQLException {
|
||||
Assert.assertFalse(metaData.locatorsUpdateCopy());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsStatementPooling() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsStatementPooling());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getRowIdLifetime() throws SQLException {
|
||||
Assert.assertNull(metaData.getRowIdLifetime());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetSchemas() throws SQLException {
|
||||
Assert.assertNull(metaData.getSchemas());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void supportsStoredFunctionsUsingCallSyntax() throws SQLException {
|
||||
Assert.assertFalse(metaData.supportsStoredFunctionsUsingCallSyntax());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void autoCommitFailureClosesAllResultSets() throws SQLException {
|
||||
Assert.assertFalse(metaData.autoCommitFailureClosesAllResultSets());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getClientInfoProperties() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getClientInfoProperties());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getFunctions() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getFunctions("", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getFunctionColumns() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getFunctionColumns("", "", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getPseudoColumns() throws SQLException {
|
||||
Assert.assertNotNull(metaData.getPseudoColumns("", "", "", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void generatedKeyAlwaysReturned() throws SQLException {
|
||||
Assert.assertFalse(metaData.generatedKeyAlwaysReturned());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class MultiThreadsWithSameStatmentTest {
|
||||
|
||||
|
||||
private class Service {
|
||||
public Connection conn;
|
||||
public Statement stmt;
|
||||
|
||||
public Service() {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata");
|
||||
stmt = conn.createStatement();
|
||||
stmt.execute("create database if not exists jdbctest");
|
||||
stmt.executeUpdate("create table if not exists jdbctest.weather (ts timestamp, f1 int)");
|
||||
} catch (ClassNotFoundException | SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void release() {
|
||||
try {
|
||||
stmt.close();
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
Thread t1 = new Thread(() -> {
|
||||
try {
|
||||
Service service = new Service();
|
||||
ResultSet resultSet = service.stmt.executeQuery("select * from jdbctest.weather");
|
||||
while (resultSet.next()) {
|
||||
ResultSetMetaData metaData = resultSet.getMetaData();
|
||||
for (int i = 1; i <= metaData.getColumnCount(); i++) {
|
||||
System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i));
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
resultSet.close();
|
||||
service.release();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
});
|
||||
|
||||
Thread t2 = new Thread(() -> {
|
||||
while (true) {
|
||||
try {
|
||||
Service service = new Service();
|
||||
service.stmt.executeUpdate("insert into jdbctest.weather values(now,1)");
|
||||
service.release();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
t1.start();
|
||||
sleep(1000);
|
||||
t2.start();
|
||||
}
|
||||
|
||||
private void sleep(long mills) {
|
||||
try {
|
||||
TimeUnit.MILLISECONDS.sleep(mills);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.3",
|
||||
version="2.0.4",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -184,7 +184,7 @@ class TDengineCursor(object):
|
|||
|
||||
return False
|
||||
|
||||
def fetchall(self):
|
||||
def fetchall_row(self):
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
||||
"""
|
||||
if self._result is None or self._fields is None:
|
||||
|
@ -203,7 +203,7 @@ class TDengineCursor(object):
|
|||
for i in range(len(self._fields)):
|
||||
buffer[i].extend(block[i])
|
||||
return list(map(tuple, zip(*buffer)))
|
||||
def fetchall_block(self):
|
||||
def fetchall(self):
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.3",
|
||||
version="2.0.4",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -192,7 +192,7 @@ class TDengineCursor(object):
|
|||
|
||||
return False
|
||||
|
||||
def fetchall(self):
|
||||
def fetchall_row(self):
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
||||
"""
|
||||
if self._result is None or self._fields is None:
|
||||
|
@ -212,7 +212,7 @@ class TDengineCursor(object):
|
|||
buffer[i].extend(block[i])
|
||||
return list(map(tuple, zip(*buffer)))
|
||||
|
||||
def fetchall_block(self):
|
||||
def fetchall(self):
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.3",
|
||||
version="2.0.4",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -138,7 +138,7 @@ class TDengineCursor(object):
|
|||
def fetchmany(self):
|
||||
pass
|
||||
|
||||
def fetchall(self):
|
||||
def fetchall_row(self):
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
||||
"""
|
||||
if self._result is None or self._fields is None:
|
||||
|
@ -158,7 +158,7 @@ class TDengineCursor(object):
|
|||
buffer[i].extend(block[i])
|
||||
return list(map(tuple, zip(*buffer)))
|
||||
|
||||
def fetchall_block(self):
|
||||
def fetchall(self):
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.3",
|
||||
version="2.0.4",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -139,7 +139,7 @@ class TDengineCursor(object):
|
|||
def fetchmany(self):
|
||||
pass
|
||||
|
||||
def fetchall(self):
|
||||
def fetchall_row(self):
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
||||
"""
|
||||
if self._result is None or self._fields is None:
|
||||
|
@ -159,7 +159,7 @@ class TDengineCursor(object):
|
|||
buffer[i].extend(block[i])
|
||||
return list(map(tuple, zip(*buffer)))
|
||||
|
||||
def fetchall_block(self):
|
||||
def fetchall(self):
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) {
|
|||
int16_t numOfTags = htons(pTable->numOfTags);
|
||||
int32_t tableId = htonl(pTable->tid);
|
||||
uint64_t uid = htobe64(pTable->uid);
|
||||
dInfo("table:%s, numOfColumns:%d numOfTags:%d tid:%d uid:%" PRIu64, pTable->tableId, numOfColumns, numOfTags, tableId, uid);
|
||||
dInfo("table:%s, numOfColumns:%d numOfTags:%d tid:%d uid:%" PRIu64, pTable->tableFname, numOfColumns, numOfTags, tableId, uid);
|
||||
|
||||
return rpcRsp.pCont;
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ DLL_EXPORT int taos_errno(TAOS_RES *tres);
|
|||
|
||||
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);
|
||||
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
|
||||
DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);
|
||||
//DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);
|
||||
|
||||
typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code);
|
||||
DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval);
|
||||
|
|
|
@ -36,29 +36,6 @@ extern "C" {
|
|||
#define TSWINDOW_INITIALIZER ((STimeWindow) {INT64_MIN, INT64_MAX})
|
||||
#define TSKEY_INITIAL_VAL INT64_MIN
|
||||
|
||||
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
|
||||
typedef int32_t VarDataOffsetT;
|
||||
typedef int16_t VarDataLenT;
|
||||
|
||||
typedef struct tstr {
|
||||
VarDataLenT len;
|
||||
char data[];
|
||||
} tstr;
|
||||
|
||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||
|
||||
#define varDataLen(v) ((VarDataLenT *)(v))[0]
|
||||
#define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v))
|
||||
#define varDataVal(v) ((void *)((char *)v + VARSTR_HEADER_SIZE))
|
||||
#define varDataCopy(dst, v) memcpy((dst), (void*) (v), varDataTLen(v))
|
||||
#define varDataLenByData(v) (*(VarDataLenT *)(((char*)(v)) - VARSTR_HEADER_SIZE))
|
||||
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT) (_len))
|
||||
#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
|
||||
// this data type is internally used only in 'in' query to hold the values
|
||||
#define TSDB_DATA_TYPE_ARRAY (TSDB_DATA_TYPE_NCHAR + 1)
|
||||
|
||||
|
||||
// Bytes for each type.
|
||||
extern const int32_t TYPE_BYTES[15];
|
||||
|
||||
|
@ -164,70 +141,6 @@ do { \
|
|||
#define SET_DOUBLE_PTR(x, y) { (*(double *)(x)) = (*(double *)(y)); }
|
||||
#endif
|
||||
|
||||
typedef struct tDataTypeDescriptor {
|
||||
int16_t nType;
|
||||
int16_t nameLen;
|
||||
int32_t nSize;
|
||||
char * aName;
|
||||
int (*compFunc)(const char *const input, int inputSize, const int nelements, char *const output, int outputSize,
|
||||
char algorithm, char *const buffer, int bufferSize);
|
||||
int (*decompFunc)(const char *const input, int compressedSize, const int nelements, char *const output,
|
||||
int outputSize, char algorithm, char *const buffer, int bufferSize);
|
||||
void (*getStatisFunc)(const void *pData, int32_t numofrow, int64_t *min, int64_t *max, int64_t *sum,
|
||||
int16_t *minindex, int16_t *maxindex, int16_t *numofnull);
|
||||
} tDataTypeDescriptor;
|
||||
|
||||
extern tDataTypeDescriptor tDataTypeDesc[15];
|
||||
|
||||
bool isValidDataType(int32_t type);
|
||||
|
||||
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
return *(uint32_t *)val == TSDB_DATA_INT_NULL;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
return varDataLen(val) == sizeof(int32_t) && *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
return *(uint8_t*) val == TSDB_DATA_UTINYINT_NULL;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
return *(uint16_t*) val == TSDB_DATA_USMALLINT_NULL;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
return *(uint32_t*) val == TSDB_DATA_UINT_NULL;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return *(uint64_t*) val == TSDB_DATA_UBIGINT_NULL;
|
||||
|
||||
default:
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
void* getNullValue(int32_t type);
|
||||
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
|
||||
|
||||
int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bool issigned);
|
||||
|
||||
#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL)
|
||||
|
||||
// TODO: check if below is necessary
|
||||
#define TSDB_RELATION_INVALID 0
|
||||
#define TSDB_RELATION_LESS 1
|
||||
|
@ -270,7 +183,7 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
|
|||
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
|
||||
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
|
||||
#define TSDB_MAX_SQL_SHOW_LEN 512
|
||||
#define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024U) // sql length should be less than 1mb
|
||||
#define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024u) // sql length should be less than 1mb
|
||||
|
||||
#define TSDB_APPNAME_LEN TSDB_UNI_LEN
|
||||
|
||||
|
@ -399,8 +312,8 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
|
|||
|
||||
#define TSDB_MAX_RPC_THREADS 5
|
||||
|
||||
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
||||
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode
|
||||
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
||||
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode
|
||||
|
||||
/*
|
||||
* 1. ordinary sub query for select * from super_table
|
||||
|
@ -420,31 +333,31 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
|
|||
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
|
||||
#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type
|
||||
|
||||
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
|
||||
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
|
||||
#define TSDB_QUERY_CLEAR_TYPE(x, _type) ((x) &= (~_type))
|
||||
#define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE)
|
||||
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
|
||||
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
|
||||
#define TSDB_QUERY_CLEAR_TYPE(x, _type) ((x) &= (~_type))
|
||||
#define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE)
|
||||
|
||||
#define TSDB_ORDER_ASC 1
|
||||
#define TSDB_ORDER_DESC 2
|
||||
#define TSDB_ORDER_ASC 1
|
||||
#define TSDB_ORDER_DESC 2
|
||||
|
||||
#define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1
|
||||
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
|
||||
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
|
||||
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
|
||||
#define TSDB_DEFAULT_USERS_HASH_SIZE 20
|
||||
#define TSDB_DEFAULT_DBS_HASH_SIZE 100
|
||||
#define TSDB_DEFAULT_VGROUPS_HASH_SIZE 100
|
||||
#define TSDB_DEFAULT_STABLES_HASH_SIZE 100
|
||||
#define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000
|
||||
|
||||
#define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1
|
||||
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
|
||||
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
|
||||
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
|
||||
#define TSDB_DEFAULT_USERS_HASH_SIZE 20
|
||||
#define TSDB_DEFAULT_DBS_HASH_SIZE 100
|
||||
#define TSDB_DEFAULT_VGROUPS_HASH_SIZE 100
|
||||
#define TSDB_DEFAULT_STABLES_HASH_SIZE 100
|
||||
#define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000
|
||||
#define TSDB_PORT_DNODESHELL 0
|
||||
#define TSDB_PORT_DNODEDNODE 5
|
||||
#define TSDB_PORT_SYNC 10
|
||||
#define TSDB_PORT_HTTP 11
|
||||
#define TSDB_PORT_ARBITRATOR 12
|
||||
|
||||
#define TSDB_PORT_DNODESHELL 0
|
||||
#define TSDB_PORT_DNODEDNODE 5
|
||||
#define TSDB_PORT_SYNC 10
|
||||
#define TSDB_PORT_HTTP 11
|
||||
#define TSDB_PORT_ARBITRATOR 12
|
||||
|
||||
#define TSDB_MAX_WAL_SIZE (1024*1024*2)
|
||||
#define TSDB_MAX_WAL_SIZE (1024*1024*3)
|
||||
|
||||
typedef enum {
|
||||
TAOS_QTYPE_RPC = 0,
|
||||
|
|
|
@ -153,30 +153,31 @@ enum _mgmt_table {
|
|||
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
||||
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
|
||||
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
#define TSDB_FILL_SET_VALUE 2
|
||||
#define TSDB_FILL_LINEAR 3
|
||||
#define TSDB_FILL_PREV 4
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
#define TSDB_FILL_SET_VALUE 2
|
||||
#define TSDB_FILL_LINEAR 3
|
||||
#define TSDB_FILL_PREV 4
|
||||
#define TSDB_FILL_NEXT 5
|
||||
|
||||
#define TSDB_ALTER_USER_PASSWD 0x1
|
||||
#define TSDB_ALTER_USER_PASSWD 0x1
|
||||
#define TSDB_ALTER_USER_PRIVILEGES 0x2
|
||||
|
||||
#define TSDB_KILL_MSG_LEN 30
|
||||
#define TSDB_KILL_MSG_LEN 30
|
||||
|
||||
#define TSDB_VN_READ_ACCCESS ((char)0x1)
|
||||
#define TSDB_VN_WRITE_ACCCESS ((char)0x2)
|
||||
#define TSDB_VN_READ_ACCCESS ((char)0x1)
|
||||
#define TSDB_VN_WRITE_ACCCESS ((char)0x2)
|
||||
#define TSDB_VN_ALL_ACCCESS (TSDB_VN_READ_ACCCESS | TSDB_VN_WRITE_ACCCESS)
|
||||
|
||||
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
|
||||
#define TSDB_COL_TAG 0x1u // the tag column type
|
||||
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
|
||||
#define TSDB_COL_NULL 0x4u // the column filter NULL or not
|
||||
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
|
||||
#define TSDB_COL_TAG 0x1u // the tag column type
|
||||
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
|
||||
#define TSDB_COL_NULL 0x4u // the column filter NULL or not
|
||||
|
||||
#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
|
||||
#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
|
||||
#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
|
||||
#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
|
||||
#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
|
||||
#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
|
||||
#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
|
||||
#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
|
||||
|
||||
|
||||
extern char *taosMsg[];
|
||||
|
@ -260,14 +261,14 @@ typedef struct {
|
|||
uint64_t uid;
|
||||
uint64_t superTableUid;
|
||||
uint64_t createdTime;
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char superTableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
char stableFname[TSDB_TABLE_FNAME_LEN];
|
||||
char data[];
|
||||
} SMDCreateTableMsg;
|
||||
|
||||
typedef struct {
|
||||
int32_t len; // one create table message
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
|
||||
int8_t igExists;
|
||||
int8_t getMeta;
|
||||
|
@ -284,12 +285,12 @@ typedef struct {
|
|||
} SCMCreateTableMsg;
|
||||
|
||||
typedef struct {
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
int8_t igNotExists;
|
||||
} SCMDropTableMsg;
|
||||
|
||||
typedef struct {
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
|
||||
int16_t type; /* operation type */
|
||||
int16_t numOfCols; /* number of schema */
|
||||
|
@ -369,14 +370,14 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int32_t tid;
|
||||
uint64_t uid;
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
} SMDDropTableMsg;
|
||||
|
||||
typedef struct {
|
||||
int32_t contLen;
|
||||
int32_t vgId;
|
||||
uint64_t uid;
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
} SDropSTableMsg;
|
||||
|
||||
typedef struct {
|
||||
|
@ -688,7 +689,7 @@ typedef struct {
|
|||
} SCreateVnodeMsg, SAlterVnodeMsg;
|
||||
|
||||
typedef struct {
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
int16_t createFlag;
|
||||
char tags[];
|
||||
} STableInfoMsg;
|
||||
|
@ -726,7 +727,7 @@ typedef struct {
|
|||
|
||||
typedef struct STableMetaMsg {
|
||||
int32_t contLen;
|
||||
char tableId[TSDB_TABLE_FNAME_LEN]; // table id
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN]; // table id
|
||||
uint8_t numOfTags;
|
||||
uint8_t precision;
|
||||
uint8_t tableType;
|
||||
|
@ -847,7 +848,7 @@ typedef struct {
|
|||
uint64_t uid;
|
||||
uint64_t stime; // stream starting time
|
||||
int32_t status;
|
||||
char tableId[TSDB_TABLE_FNAME_LEN];
|
||||
char tableFname[TSDB_TABLE_FNAME_LEN];
|
||||
} SAlterStreamMsg;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -82,151 +82,154 @@
|
|||
#define TK_STABLES 64
|
||||
#define TK_VGROUPS 65
|
||||
#define TK_DROP 66
|
||||
#define TK_DNODE 67
|
||||
#define TK_USER 68
|
||||
#define TK_ACCOUNT 69
|
||||
#define TK_USE 70
|
||||
#define TK_DESCRIBE 71
|
||||
#define TK_ALTER 72
|
||||
#define TK_PASS 73
|
||||
#define TK_PRIVILEGE 74
|
||||
#define TK_LOCAL 75
|
||||
#define TK_IF 76
|
||||
#define TK_EXISTS 77
|
||||
#define TK_PPS 78
|
||||
#define TK_TSERIES 79
|
||||
#define TK_DBS 80
|
||||
#define TK_STORAGE 81
|
||||
#define TK_QTIME 82
|
||||
#define TK_CONNS 83
|
||||
#define TK_STATE 84
|
||||
#define TK_KEEP 85
|
||||
#define TK_CACHE 86
|
||||
#define TK_REPLICA 87
|
||||
#define TK_QUORUM 88
|
||||
#define TK_DAYS 89
|
||||
#define TK_MINROWS 90
|
||||
#define TK_MAXROWS 91
|
||||
#define TK_BLOCKS 92
|
||||
#define TK_CTIME 93
|
||||
#define TK_WAL 94
|
||||
#define TK_FSYNC 95
|
||||
#define TK_COMP 96
|
||||
#define TK_PRECISION 97
|
||||
#define TK_UPDATE 98
|
||||
#define TK_CACHELAST 99
|
||||
#define TK_LP 100
|
||||
#define TK_RP 101
|
||||
#define TK_UNSIGNED 102
|
||||
#define TK_TAGS 103
|
||||
#define TK_USING 104
|
||||
#define TK_AS 105
|
||||
#define TK_COMMA 106
|
||||
#define TK_NULL 107
|
||||
#define TK_SELECT 108
|
||||
#define TK_UNION 109
|
||||
#define TK_ALL 110
|
||||
#define TK_FROM 111
|
||||
#define TK_VARIABLE 112
|
||||
#define TK_INTERVAL 113
|
||||
#define TK_FILL 114
|
||||
#define TK_SLIDING 115
|
||||
#define TK_ORDER 116
|
||||
#define TK_BY 117
|
||||
#define TK_ASC 118
|
||||
#define TK_DESC 119
|
||||
#define TK_GROUP 120
|
||||
#define TK_HAVING 121
|
||||
#define TK_LIMIT 122
|
||||
#define TK_OFFSET 123
|
||||
#define TK_SLIMIT 124
|
||||
#define TK_SOFFSET 125
|
||||
#define TK_WHERE 126
|
||||
#define TK_NOW 127
|
||||
#define TK_RESET 128
|
||||
#define TK_QUERY 129
|
||||
#define TK_ADD 130
|
||||
#define TK_COLUMN 131
|
||||
#define TK_TAG 132
|
||||
#define TK_CHANGE 133
|
||||
#define TK_SET 134
|
||||
#define TK_KILL 135
|
||||
#define TK_CONNECTION 136
|
||||
#define TK_STREAM 137
|
||||
#define TK_COLON 138
|
||||
#define TK_ABORT 139
|
||||
#define TK_AFTER 140
|
||||
#define TK_ATTACH 141
|
||||
#define TK_BEFORE 142
|
||||
#define TK_BEGIN 143
|
||||
#define TK_CASCADE 144
|
||||
#define TK_CLUSTER 145
|
||||
#define TK_CONFLICT 146
|
||||
#define TK_COPY 147
|
||||
#define TK_DEFERRED 148
|
||||
#define TK_DELIMITERS 149
|
||||
#define TK_DETACH 150
|
||||
#define TK_EACH 151
|
||||
#define TK_END 152
|
||||
#define TK_EXPLAIN 153
|
||||
#define TK_FAIL 154
|
||||
#define TK_FOR 155
|
||||
#define TK_IGNORE 156
|
||||
#define TK_IMMEDIATE 157
|
||||
#define TK_INITIALLY 158
|
||||
#define TK_INSTEAD 159
|
||||
#define TK_MATCH 160
|
||||
#define TK_KEY 161
|
||||
#define TK_OF 162
|
||||
#define TK_RAISE 163
|
||||
#define TK_REPLACE 164
|
||||
#define TK_RESTRICT 165
|
||||
#define TK_ROW 166
|
||||
#define TK_STATEMENT 167
|
||||
#define TK_TRIGGER 168
|
||||
#define TK_VIEW 169
|
||||
#define TK_COUNT 170
|
||||
#define TK_SUM 171
|
||||
#define TK_AVG 172
|
||||
#define TK_MIN 173
|
||||
#define TK_MAX 174
|
||||
#define TK_FIRST 175
|
||||
#define TK_LAST 176
|
||||
#define TK_TOP 177
|
||||
#define TK_BOTTOM 178
|
||||
#define TK_STDDEV 179
|
||||
#define TK_PERCENTILE 180
|
||||
#define TK_APERCENTILE 181
|
||||
#define TK_LEASTSQUARES 182
|
||||
#define TK_HISTOGRAM 183
|
||||
#define TK_DIFF 184
|
||||
#define TK_SPREAD 185
|
||||
#define TK_TWA 186
|
||||
#define TK_INTERP 187
|
||||
#define TK_LAST_ROW 188
|
||||
#define TK_RATE 189
|
||||
#define TK_IRATE 190
|
||||
#define TK_SUM_RATE 191
|
||||
#define TK_SUM_IRATE 192
|
||||
#define TK_AVG_RATE 193
|
||||
#define TK_AVG_IRATE 194
|
||||
#define TK_TBID 195
|
||||
#define TK_SEMI 196
|
||||
#define TK_NONE 197
|
||||
#define TK_PREV 198
|
||||
#define TK_LINEAR 199
|
||||
#define TK_IMPORT 200
|
||||
#define TK_METRIC 201
|
||||
#define TK_TBNAME 202
|
||||
#define TK_JOIN 203
|
||||
#define TK_METRICS 204
|
||||
#define TK_STABLE 205
|
||||
#define TK_STABLE 67
|
||||
#define TK_DNODE 68
|
||||
#define TK_USER 69
|
||||
#define TK_ACCOUNT 70
|
||||
#define TK_USE 71
|
||||
#define TK_DESCRIBE 72
|
||||
#define TK_ALTER 73
|
||||
#define TK_PASS 74
|
||||
#define TK_PRIVILEGE 75
|
||||
#define TK_LOCAL 76
|
||||
#define TK_IF 77
|
||||
#define TK_EXISTS 78
|
||||
#define TK_PPS 79
|
||||
#define TK_TSERIES 80
|
||||
#define TK_DBS 81
|
||||
#define TK_STORAGE 82
|
||||
#define TK_QTIME 83
|
||||
#define TK_CONNS 84
|
||||
#define TK_STATE 85
|
||||
#define TK_KEEP 86
|
||||
#define TK_CACHE 87
|
||||
#define TK_REPLICA 88
|
||||
#define TK_QUORUM 89
|
||||
#define TK_DAYS 90
|
||||
#define TK_MINROWS 91
|
||||
#define TK_MAXROWS 92
|
||||
#define TK_BLOCKS 93
|
||||
#define TK_CTIME 94
|
||||
#define TK_WAL 95
|
||||
#define TK_FSYNC 96
|
||||
#define TK_COMP 97
|
||||
#define TK_PRECISION 98
|
||||
#define TK_UPDATE 99
|
||||
#define TK_CACHELAST 100
|
||||
#define TK_LP 101
|
||||
#define TK_RP 102
|
||||
#define TK_UNSIGNED 103
|
||||
#define TK_TAGS 104
|
||||
#define TK_USING 105
|
||||
#define TK_AS 106
|
||||
#define TK_COMMA 107
|
||||
#define TK_NULL 108
|
||||
#define TK_SELECT 109
|
||||
#define TK_UNION 110
|
||||
#define TK_ALL 111
|
||||
#define TK_FROM 112
|
||||
#define TK_VARIABLE 113
|
||||
#define TK_INTERVAL 114
|
||||
#define TK_FILL 115
|
||||
#define TK_SLIDING 116
|
||||
#define TK_ORDER 117
|
||||
#define TK_BY 118
|
||||
#define TK_ASC 119
|
||||
#define TK_DESC 120
|
||||
#define TK_GROUP 121
|
||||
#define TK_HAVING 122
|
||||
#define TK_LIMIT 123
|
||||
#define TK_OFFSET 124
|
||||
#define TK_SLIMIT 125
|
||||
#define TK_SOFFSET 126
|
||||
#define TK_WHERE 127
|
||||
#define TK_NOW 128
|
||||
#define TK_RESET 129
|
||||
#define TK_QUERY 130
|
||||
#define TK_ADD 131
|
||||
#define TK_COLUMN 132
|
||||
#define TK_TAG 133
|
||||
#define TK_CHANGE 134
|
||||
#define TK_SET 135
|
||||
#define TK_KILL 136
|
||||
#define TK_CONNECTION 137
|
||||
#define TK_STREAM 138
|
||||
#define TK_COLON 139
|
||||
#define TK_ABORT 140
|
||||
#define TK_AFTER 141
|
||||
#define TK_ATTACH 142
|
||||
#define TK_BEFORE 143
|
||||
#define TK_BEGIN 144
|
||||
#define TK_CASCADE 145
|
||||
#define TK_CLUSTER 146
|
||||
#define TK_CONFLICT 147
|
||||
#define TK_COPY 148
|
||||
#define TK_DEFERRED 149
|
||||
#define TK_DELIMITERS 150
|
||||
#define TK_DETACH 151
|
||||
#define TK_EACH 152
|
||||
#define TK_END 153
|
||||
#define TK_EXPLAIN 154
|
||||
#define TK_FAIL 155
|
||||
#define TK_FOR 156
|
||||
#define TK_IGNORE 157
|
||||
#define TK_IMMEDIATE 158
|
||||
#define TK_INITIALLY 159
|
||||
#define TK_INSTEAD 160
|
||||
#define TK_MATCH 161
|
||||
#define TK_KEY 162
|
||||
#define TK_OF 163
|
||||
#define TK_RAISE 164
|
||||
#define TK_REPLACE 165
|
||||
#define TK_RESTRICT 166
|
||||
#define TK_ROW 167
|
||||
#define TK_STATEMENT 168
|
||||
#define TK_TRIGGER 169
|
||||
#define TK_VIEW 170
|
||||
#define TK_COUNT 171
|
||||
#define TK_SUM 172
|
||||
#define TK_AVG 173
|
||||
#define TK_MIN 174
|
||||
#define TK_MAX 175
|
||||
#define TK_FIRST 176
|
||||
#define TK_LAST 177
|
||||
#define TK_TOP 178
|
||||
#define TK_BOTTOM 179
|
||||
#define TK_STDDEV 180
|
||||
#define TK_PERCENTILE 181
|
||||
#define TK_APERCENTILE 182
|
||||
#define TK_LEASTSQUARES 183
|
||||
#define TK_HISTOGRAM 184
|
||||
#define TK_DIFF 185
|
||||
#define TK_SPREAD 186
|
||||
#define TK_TWA 187
|
||||
#define TK_INTERP 188
|
||||
#define TK_LAST_ROW 189
|
||||
#define TK_RATE 190
|
||||
#define TK_IRATE 191
|
||||
#define TK_SUM_RATE 192
|
||||
#define TK_SUM_IRATE 193
|
||||
#define TK_AVG_RATE 194
|
||||
#define TK_AVG_IRATE 195
|
||||
#define TK_TBID 196
|
||||
#define TK_SEMI 197
|
||||
#define TK_NONE 198
|
||||
#define TK_PREV 199
|
||||
#define TK_LINEAR 200
|
||||
#define TK_IMPORT 201
|
||||
#define TK_METRIC 202
|
||||
#define TK_TBNAME 203
|
||||
#define TK_JOIN 204
|
||||
#define TK_METRICS 205
|
||||
#define TK_INSERT 206
|
||||
#define TK_INTO 207
|
||||
#define TK_VALUES 208
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define TK_SPACE 300
|
||||
#define TK_COMMENT 301
|
||||
#define TK_ILLEGAL 302
|
||||
|
|
|
@ -7,6 +7,28 @@ extern "C" {
|
|||
|
||||
#include "taosdef.h"
|
||||
|
||||
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
|
||||
typedef int32_t VarDataOffsetT;
|
||||
typedef int16_t VarDataLenT;
|
||||
|
||||
typedef struct tstr {
|
||||
VarDataLenT len;
|
||||
char data[];
|
||||
} tstr;
|
||||
|
||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||
|
||||
#define varDataLen(v) ((VarDataLenT *)(v))[0]
|
||||
#define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v))
|
||||
#define varDataVal(v) ((void *)((char *)v + VARSTR_HEADER_SIZE))
|
||||
#define varDataCopy(dst, v) memcpy((dst), (void*) (v), varDataTLen(v))
|
||||
#define varDataLenByData(v) (*(VarDataLenT *)(((char*)(v)) - VARSTR_HEADER_SIZE))
|
||||
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT) (_len))
|
||||
#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
|
||||
// this data type is internally used only in 'in' query to hold the values
|
||||
#define TSDB_DATA_TYPE_ARRAY (TSDB_DATA_TYPE_NCHAR + 1)
|
||||
|
||||
#define GET_TYPED_DATA(_v, _finalType, _type, _data) \
|
||||
do { \
|
||||
switch (_type) { \
|
||||
|
@ -59,6 +81,70 @@ extern "C" {
|
|||
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
|
||||
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
|
||||
|
||||
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
return *(uint32_t *)val == TSDB_DATA_INT_NULL;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
return varDataLen(val) == sizeof(int32_t) && *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
return *(uint8_t*) val == TSDB_DATA_UTINYINT_NULL;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
return *(uint16_t*) val == TSDB_DATA_USMALLINT_NULL;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
return *(uint32_t*) val == TSDB_DATA_UINT_NULL;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return *(uint64_t*) val == TSDB_DATA_UBIGINT_NULL;
|
||||
|
||||
default:
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
typedef struct tDataTypeDescriptor {
|
||||
int16_t type;
|
||||
int16_t nameLen;
|
||||
int32_t bytes;
|
||||
char * name;
|
||||
int (*compFunc)(const char *const input, int inputSize, const int nelements, char *const output, int outputSize,
|
||||
char algorithm, char *const buffer, int bufferSize);
|
||||
int (*decompFunc)(const char *const input, int compressedSize, const int nelements, char *const output,
|
||||
int outputSize, char algorithm, char *const buffer, int bufferSize);
|
||||
void (*statisFunc)(const void *pData, int32_t numofrow, int64_t *min, int64_t *max, int64_t *sum,
|
||||
int16_t *minindex, int16_t *maxindex, int16_t *numofnull);
|
||||
} tDataTypeDescriptor;
|
||||
|
||||
extern tDataTypeDescriptor tDataTypes[15];
|
||||
|
||||
bool isValidDataType(int32_t type);
|
||||
|
||||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
void* getNullValue(int32_t type);
|
||||
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
|
||||
|
||||
int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bool issigned);
|
||||
|
||||
#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1085,8 +1085,8 @@ static void printfQueryMeta() {
|
|||
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
|
||||
|
||||
printf("\n");
|
||||
printf("super table query info: \n");
|
||||
printf("rate: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
|
||||
printf("specified table query info: \n");
|
||||
printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
|
||||
printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent);
|
||||
printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
|
||||
|
||||
|
@ -1102,11 +1102,11 @@ static void printfQueryMeta() {
|
|||
printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]);
|
||||
}
|
||||
printf("\n");
|
||||
printf("sub table query info: \n");
|
||||
printf("rate: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate);
|
||||
printf("super table query info: \n");
|
||||
printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate);
|
||||
printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt);
|
||||
printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount);
|
||||
printf("childTblPrefix: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.childTblPrefix);
|
||||
printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName);
|
||||
|
||||
if (SUBSCRIBE_MODE == g_jsonType) {
|
||||
printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode);
|
||||
|
@ -4020,23 +4020,23 @@ void *superQueryProcess(void *sarg) {
|
|||
}
|
||||
selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
|
||||
int64_t t2 = taosGetTimestampUs();
|
||||
printf("taosc select sql return, Spent %f s\n", (t2 - t1)/1000000.0);
|
||||
printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0);
|
||||
} else {
|
||||
#ifdef TD_LOWA_CURL
|
||||
int64_t t1 = taosGetTimestampUs();
|
||||
int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle);
|
||||
int64_t t2 = taosGetTimestampUs();
|
||||
printf("http select sql return, Spent %f s \n", (t2 - t1)/1000000.0);
|
||||
printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0);
|
||||
|
||||
if (0 != retCode) {
|
||||
printf("========curl return fail, threadID[%d]\n", winfo->threadID);
|
||||
printf("====curl return fail, threadID[%d]\n", winfo->threadID);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
et = taosGetTimestampMs();
|
||||
printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0);
|
||||
printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -4065,7 +4065,7 @@ void *subQueryProcess(void *sarg) {
|
|||
char sqlstr[1024];
|
||||
threadInfo *winfo = (threadInfo *)sarg;
|
||||
int64_t st = 0;
|
||||
int64_t et = 0;
|
||||
int64_t et = g_queryInfo.subQueryInfo.rate*1000;
|
||||
while (1) {
|
||||
if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) {
|
||||
taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
|
||||
|
@ -4085,17 +4085,12 @@ void *subQueryProcess(void *sarg) {
|
|||
}
|
||||
}
|
||||
et = taosGetTimestampMs();
|
||||
printf("========thread[%"PRIu64"] complete all sqls to allocate all sub-tables once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0);
|
||||
printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int queryTestProcess() {
|
||||
printfQueryMeta();
|
||||
|
||||
printf("Press enter key to continue\n\n");
|
||||
(void)getchar();
|
||||
|
||||
TAOS * taos = NULL;
|
||||
taos_init();
|
||||
taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port);
|
||||
|
@ -4108,9 +4103,13 @@ int queryTestProcess() {
|
|||
(void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount);
|
||||
}
|
||||
|
||||
printfQueryMeta();
|
||||
printf("Press enter key to continue\n\n");
|
||||
(void)getchar();
|
||||
|
||||
pthread_t *pids = NULL;
|
||||
threadInfo *infos = NULL;
|
||||
//==== create sub threads for query from super table
|
||||
//==== create sub threads for query from specify table
|
||||
if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) {
|
||||
|
||||
pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
|
||||
|
@ -4146,7 +4145,7 @@ int queryTestProcess() {
|
|||
|
||||
pthread_t *pidsOfSub = NULL;
|
||||
threadInfo *infosOfSub = NULL;
|
||||
//==== create sub threads for query from sub table
|
||||
//==== create sub threads for query from all sub table of the super table
|
||||
if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) {
|
||||
pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t));
|
||||
infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo));
|
||||
|
@ -4177,6 +4176,7 @@ int queryTestProcess() {
|
|||
|
||||
t_info->start_table_id = last;
|
||||
t_info->end_table_id = i < b ? last + a : last + a - 1;
|
||||
last = t_info->end_table_id + 1;
|
||||
t_info->taos = taos;
|
||||
pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info);
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *
|
|||
cols++;
|
||||
|
||||
pMeta->numOfColumns = htons(cols);
|
||||
strcpy(pMeta->tableId, "show cluster");
|
||||
strcpy(pMeta->tableFname, "show cluster");
|
||||
pShow->numOfColumns = cols;
|
||||
|
||||
pShow->offset[0] = 0;
|
||||
|
|
|
@ -385,12 +385,22 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
|
|||
SConnObj *pConnObj = NULL;
|
||||
int32_t cols = 0;
|
||||
char * pWrite;
|
||||
void * pIter;
|
||||
char str[TSDB_IPv4ADDR_LEN + 6] = {0};
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
|
||||
if (pConnObj == NULL) break;
|
||||
pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
|
||||
if (pConnObj == NULL) {
|
||||
pShow->pIter = pIter;
|
||||
break;
|
||||
}
|
||||
|
||||
if (numOfRows + pConnObj->numOfQueries >= rows) {
|
||||
mnodeCancelGetNextConn(pIter);
|
||||
break;
|
||||
}
|
||||
|
||||
pShow->pIter = pIter;
|
||||
for (int32_t i = 0; i < pConnObj->numOfQueries; ++i) {
|
||||
SQueryDesc *pDesc = pConnObj->pQueries + i;
|
||||
cols = 0;
|
||||
|
@ -518,12 +528,22 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v
|
|||
SConnObj *pConnObj = NULL;
|
||||
int32_t cols = 0;
|
||||
char * pWrite;
|
||||
void * pIter;
|
||||
char ipStr[TSDB_IPv4ADDR_LEN + 6];
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
|
||||
if (pConnObj == NULL) break;
|
||||
pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
|
||||
if (pConnObj == NULL) {
|
||||
pShow->pIter = pIter;
|
||||
break;
|
||||
}
|
||||
|
||||
if (numOfRows + pConnObj->numOfStreams >= rows) {
|
||||
mnodeCancelGetNextConn(pIter);
|
||||
break;
|
||||
}
|
||||
|
||||
pShow->pIter = pIter;
|
||||
for (int32_t i = 0; i < pConnObj->numOfStreams; ++i) {
|
||||
SStreamDesc *pDesc = pConnObj->pStreams + i;
|
||||
cols = 0;
|
||||
|
|
|
@ -68,7 +68,7 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
|
|||
static int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
|
||||
|
||||
static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg);
|
||||
|
@ -164,7 +164,7 @@ static int32_t mnodeChildTableActionDelete(SSdbRow *pRow) {
|
|||
SVgObj *pVgroup = NULL;
|
||||
SDbObj *pDb = NULL;
|
||||
SAcctObj *pAcct = NULL;
|
||||
|
||||
|
||||
pVgroup = mnodeGetVgroup(pTable->vgId);
|
||||
if (pVgroup != NULL) pDb = mnodeGetDb(pVgroup->dbName);
|
||||
if (pDb != NULL) pAcct = mnodeGetAcct(pDb->acct);
|
||||
|
@ -180,14 +180,14 @@ static int32_t mnodeChildTableActionDelete(SSdbRow *pRow) {
|
|||
grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1);
|
||||
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
|
||||
}
|
||||
|
||||
|
||||
if (pDb != NULL) mnodeRemoveTableFromDb(pDb);
|
||||
if (pVgroup != NULL) mnodeRemoveTableFromVgroup(pVgroup, pTable);
|
||||
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
mnodeDecDbRef(pDb);
|
||||
mnodeDecAcctRef(pAcct);
|
||||
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -195,19 +195,19 @@ static int32_t mnodeChildTableActionUpdate(SSdbRow *pRow) {
|
|||
SCTableObj *pNew = pRow->pObj;
|
||||
SCTableObj *pTable = mnodeGetChildTable(pNew->info.tableId);
|
||||
if (pTable != pNew) {
|
||||
void *oldTableId = pTable->info.tableId;
|
||||
void *oldTableId = pTable->info.tableId;
|
||||
void *oldSql = pTable->sql;
|
||||
void *oldSchema = pTable->schema;
|
||||
void *oldSTable = pTable->superTable;
|
||||
int32_t oldRefCount = pTable->refCount;
|
||||
|
||||
|
||||
memcpy(pTable, pNew, sizeof(SCTableObj));
|
||||
|
||||
|
||||
pTable->refCount = oldRefCount;
|
||||
pTable->sql = pNew->sql;
|
||||
pTable->schema = pNew->schema;
|
||||
pTable->superTable = oldSTable;
|
||||
|
||||
|
||||
free(pNew);
|
||||
free(oldSql);
|
||||
free(oldSchema);
|
||||
|
@ -544,7 +544,7 @@ static int32_t mnodeSuperTableActionDecode(SSdbRow *pRow) {
|
|||
}
|
||||
|
||||
memcpy(pStable->schema, pRow->rowData + len, schemaSize);
|
||||
|
||||
|
||||
pRow->pObj = pStable;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -611,7 +611,7 @@ int32_t mnodeInitTables() {
|
|||
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_ALTER_TABLE, mnodeProcessAlterTableMsg);
|
||||
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_TABLE_META, mnodeProcessTableMetaMsg);
|
||||
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_STABLE_VGROUP, mnodeProcessSuperTableVgroupMsg);
|
||||
|
||||
|
||||
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CREATE_TABLE_RSP, mnodeProcessCreateChildTableRsp);
|
||||
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_DROP_TABLE_RSP, mnodeProcessDropChildTableRsp);
|
||||
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_DROP_STABLE_RSP, mnodeProcessDropSuperTableRsp);
|
||||
|
@ -750,7 +750,7 @@ void mnodeDestroySubMsg(SMnodeMsg *pSubMsg) {
|
|||
static int32_t mnodeValidateCreateTableMsg(SCreateTableMsg *pCreateTable, SMnodeMsg *pMsg) {
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreateTable->db);
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableFname);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
|
@ -759,28 +759,28 @@ static int32_t mnodeValidateCreateTableMsg(SCreateTableMsg *pCreateTable, SMnode
|
|||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreateTable->tableId);
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreateTable->tableFname);
|
||||
if (pMsg->pTable != NULL && pMsg->retry == 0) {
|
||||
if (pCreateTable->getMeta) {
|
||||
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableFname);
|
||||
return mnodeGetChildTableMeta(pMsg);
|
||||
} else if (pCreateTable->igExists) {
|
||||
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableFname);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreateTable->tableId);
|
||||
pCreateTable->tableFname);
|
||||
return TSDB_CODE_MND_TABLE_ALREADY_EXIST;
|
||||
}
|
||||
}
|
||||
|
||||
if (pCreateTable->numOfTags != 0) {
|
||||
mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreateTable->tableId, pMsg->rpcMsg.handle);
|
||||
pCreateTable->tableFname, pMsg->rpcMsg.handle);
|
||||
return mnodeProcessCreateSuperTableMsg(pMsg);
|
||||
} else {
|
||||
mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreateTable->tableId, pMsg->rpcMsg.handle);
|
||||
pCreateTable->tableFname, pMsg->rpcMsg.handle);
|
||||
return mnodeProcessCreateChildTableMsg(pMsg);
|
||||
}
|
||||
}
|
||||
|
@ -862,47 +862,46 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
|
|||
SCreateTableMsg *p = (SCreateTableMsg*)((char*) pCreate + sizeof(SCMCreateTableMsg));
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(p->db);
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, p->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(p->tableId);
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(p->tableFname);
|
||||
if (pMsg->pTable != NULL && pMsg->retry == 0) {
|
||||
if (p->getMeta) {
|
||||
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, p->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
|
||||
return mnodeGetChildTableMeta(pMsg);
|
||||
} else if (p->igExists) {
|
||||
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle,
|
||||
p->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
|
||||
return TSDB_CODE_MND_TABLE_ALREADY_EXIST;
|
||||
}
|
||||
}
|
||||
|
||||
if (p->numOfTags != 0) {
|
||||
mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
|
||||
p->tableId, pMsg->rpcMsg.handle);
|
||||
p->tableFname, pMsg->rpcMsg.handle);
|
||||
return mnodeProcessCreateSuperTableMsg(pMsg);
|
||||
} else {
|
||||
mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
|
||||
p->tableId, pMsg->rpcMsg.handle);
|
||||
p->tableFname, pMsg->rpcMsg.handle);
|
||||
return mnodeProcessCreateChildTableMsg(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
|
||||
SCMDropTableMsg *pDrop = pMsg->rpcMsg.pCont;
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableId);
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableFname);
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to drop table, db not selected or db in dropping", pMsg,
|
||||
pMsg->rpcMsg.ahandle, pDrop->tableId);
|
||||
pMsg->rpcMsg.ahandle, pDrop->tableFname);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
|
@ -913,17 +912,17 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||
mError("msg:%p, app:%p table:%s, failed to drop table, in monitor database", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pDrop->tableId);
|
||||
pDrop->tableFname);
|
||||
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->tableId);
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->tableFname);
|
||||
if (pMsg->pTable == NULL) {
|
||||
if (pDrop->igNotExists) {
|
||||
mDebug("msg:%p, app:%p table:%s is not exist, treat as success", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s is not exist, treat as success", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableFname);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
mError("msg:%p, app:%p table:%s, failed to drop, table not exist", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to drop, table not exist", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableFname);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
}
|
||||
}
|
||||
|
@ -931,12 +930,12 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
|
|||
if (pMsg->pTable->type == TSDB_SUPER_TABLE) {
|
||||
SSTableObj *pSTable = (SSTableObj *)pMsg->pTable;
|
||||
mInfo("msg:%p, app:%p table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d", pMsg,
|
||||
pMsg->rpcMsg.ahandle, pDrop->tableId, pSTable->uid, pSTable->numOfTables, taosHashGetSize(pSTable->vgHash));
|
||||
pMsg->rpcMsg.ahandle, pDrop->tableFname, pSTable->uid, pSTable->numOfTables, taosHashGetSize(pSTable->vgHash));
|
||||
return mnodeProcessDropSuperTableMsg(pMsg);
|
||||
} else {
|
||||
SCTableObj *pCTable = (SCTableObj *)pMsg->pTable;
|
||||
mInfo("msg:%p, app:%p table:%s, start to drop ctable, vgId:%d tid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle,
|
||||
pDrop->tableId, pCTable->vgId, pCTable->tid, pCTable->uid);
|
||||
pDrop->tableFname, pCTable->vgId, pCTable->tid, pCTable->uid);
|
||||
return mnodeProcessDropChildTableMsg(pMsg);
|
||||
}
|
||||
}
|
||||
|
@ -945,29 +944,29 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
STableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
|
||||
pInfo->createFlag = htons(pInfo->createFlag);
|
||||
mDebug("msg:%p, app:%p table:%s, table meta msg is received from thandle:%p, createFlag:%d", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pInfo->tableId, pMsg->rpcMsg.handle, pInfo->createFlag);
|
||||
pInfo->tableFname, pMsg->rpcMsg.handle, pInfo->createFlag);
|
||||
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableId);
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableFname);
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to get table meta, db not selected", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pInfo->tableId);
|
||||
pInfo->tableFname);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pInfo->tableId);
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pInfo->tableFname);
|
||||
if (pMsg->pTable == NULL) {
|
||||
if (!pInfo->createFlag) {
|
||||
mError("msg:%p, app:%p table:%s, failed to get table meta, table not exist", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pInfo->tableId);
|
||||
pInfo->tableFname);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
} else {
|
||||
mDebug("msg:%p, app:%p table:%s, failed to get table meta, start auto create table ", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pInfo->tableId);
|
||||
pInfo->tableFname);
|
||||
return mnodeAutoCreateChildTable(pMsg);
|
||||
}
|
||||
} else {
|
||||
|
@ -1007,12 +1006,12 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
|
||||
if (pStable == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
int64_t us = taosGetTimestampUs();
|
||||
pStable->info.tableId = strdup(pCreate->tableId);
|
||||
pStable->info.tableId = strdup(pCreate->tableFname);
|
||||
pStable->info.type = TSDB_SUPER_TABLE;
|
||||
pStable->createdTime = taosGetTimestampMs();
|
||||
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
|
||||
|
@ -1026,41 +1025,27 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
pStable->schema = (SSchema *)calloc(1, schemaSize);
|
||||
if (pStable->schema == NULL) {
|
||||
free(pStable);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
||||
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
|
||||
|
||||
if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
||||
pStable->nextColId = 0;
|
||||
|
||||
// TODO extract method to valid the schema
|
||||
int32_t schemaLen = 0;
|
||||
int32_t tagLen = 0;
|
||||
for (int32_t col = 0; col < numOfCols; col++) {
|
||||
SSchema *tschema = pStable->schema;
|
||||
tschema[col].colId = pStable->nextColId++;
|
||||
tschema[col].bytes = htons(tschema[col].bytes);
|
||||
|
||||
if (col < pStable->numOfTables) {
|
||||
schemaLen += tschema[col].bytes;
|
||||
} else {
|
||||
tagLen += tschema[col].bytes;
|
||||
}
|
||||
|
||||
if (!isValidDataType(tschema[col].type)) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, invalid data type in schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
return TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG;
|
||||
}
|
||||
}
|
||||
|
||||
if (schemaLen > (TSDB_MAX_BYTES_PER_ROW || tagLen > TSDB_MAX_TAGS_LEN)) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, schema is too long", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
if (!isValidSchema(pStable->schema, pStable->numOfColumns, pStable->numOfTags)) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create table, invalid schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
return TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG;
|
||||
}
|
||||
|
||||
|
@ -1080,7 +1065,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
mnodeDestroySuperTable(pStable);
|
||||
pMsg->pTable = NULL;
|
||||
mError("msg:%p, app:%p table:%s, failed to create, sdb error", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to create, sdb error", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1115,7 +1100,7 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
pDrop->contLen = htonl(sizeof(SDropSTableMsg));
|
||||
pDrop->vgId = htonl(pVgroup->vgId);
|
||||
pDrop->uid = htobe64(pStable->uid);
|
||||
mnodeExtractTableName(pStable->info.tableId, pDrop->tableId);
|
||||
mnodeExtractTableName(pStable->info.tableId, pDrop->tableFname);
|
||||
|
||||
mInfo("msg:%p, app:%p stable:%s, send drop stable msg to vgId:%d, hash:%p sizeOfVgList:%d", pMsg,
|
||||
pMsg->rpcMsg.ahandle, pStable->info.tableId, pVgroup->vgId, pStable->vgHash,
|
||||
|
@ -1129,8 +1114,8 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
taosHashCancelIterate(pStable->vgHash, pVgId);
|
||||
|
||||
mnodeDropAllChildTablesInStable(pStable);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.pTable = tsSuperTableSdb,
|
||||
|
@ -1274,7 +1259,7 @@ static int32_t mnodeModifySuperTableTagName(SMnodeMsg *pMsg, char *oldTagName, c
|
|||
if (mnodeFindSuperTableTagIndex(pStable, newTagName) >= 0) {
|
||||
return TSDB_CODE_MND_TAG_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + pStable->numOfColumns + col);
|
||||
tstrncpy(schema->name, newTagName, sizeof(schema->name));
|
||||
|
@ -1437,7 +1422,7 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char
|
|||
if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) {
|
||||
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||
|
@ -1460,7 +1445,7 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char
|
|||
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -1525,7 +1510,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
|
|||
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -1539,7 +1524,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
|
|||
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
|
||||
char stableName[TSDB_TABLE_NAME_LEN] = {0};
|
||||
|
||||
while (numOfRows < rows) {
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextSuperTable(pShow->pIter, &pTable);
|
||||
if (pTable == NULL) break;
|
||||
if (strncmp(pTable->info.tableId, prefix, prefixLen)) {
|
||||
|
@ -1558,11 +1543,11 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
|
|||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
|
||||
|
||||
int16_t len = strnlen(stableName, TSDB_TABLE_NAME_LEN - 1);
|
||||
*(int16_t*) pWrite = len;
|
||||
pWrite += sizeof(int16_t); // todo refactor
|
||||
|
||||
|
||||
strncpy(pWrite, stableName, len);
|
||||
cols++;
|
||||
|
||||
|
@ -1629,7 +1614,7 @@ void mnodeDropAllSuperTables(SDbObj *pDropDb) {
|
|||
static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSTableObj *pTable) {
|
||||
int32_t numOfCols = pTable->numOfColumns + pTable->numOfTags;
|
||||
assert(numOfCols <= TSDB_MAX_COLUMNS);
|
||||
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
tstrncpy(pSchema->name, pTable->schema[i].name, sizeof(pSchema->name));
|
||||
pSchema->type = pTable->schema[i].type;
|
||||
|
@ -1655,7 +1640,7 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
|
|||
pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns);
|
||||
pMeta->tableType = pTable->info.type;
|
||||
pMeta->contLen = sizeof(STableMetaMsg) + mnodeSetSchemaFromSuperTable(pMeta->schema, pTable);
|
||||
tstrncpy(pMeta->tableId, pTable->info.tableId, sizeof(pMeta->tableId));
|
||||
tstrncpy(pMeta->tableFname, pTable->info.tableId, sizeof(pMeta->tableFname));
|
||||
|
||||
pMsg->rpcRsp.len = pMeta->contLen;
|
||||
pMeta->contLen = htons(pMeta->contLen);
|
||||
|
@ -1709,7 +1694,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)msg;
|
||||
pVgroupMsg->numOfVgroups = 0;
|
||||
|
||||
|
||||
msg += sizeof(SVgroupsMsg);
|
||||
} else {
|
||||
SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)msg;
|
||||
|
@ -1798,7 +1783,7 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pCreateMsg, SCTabl
|
|||
return NULL;
|
||||
}
|
||||
|
||||
mnodeExtractTableName(pTable->info.tableId, pCreate->tableId);
|
||||
mnodeExtractTableName(pTable->info.tableId, pCreate->tableFname);
|
||||
pCreate->contLen = htonl(contLen);
|
||||
pCreate->vgId = htonl(pTable->vgId);
|
||||
pCreate->tableType = pTable->info.type;
|
||||
|
@ -1806,9 +1791,9 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pCreateMsg, SCTabl
|
|||
pCreate->tid = htonl(pTable->tid);
|
||||
pCreate->sqlDataLen = htonl(pTable->sqlLen);
|
||||
pCreate->uid = htobe64(pTable->uid);
|
||||
|
||||
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||
mnodeExtractTableName(pTable->superTable->info.tableId, pCreate->superTableId);
|
||||
mnodeExtractTableName(pTable->superTable->info.tableId, pCreate->stableFname);
|
||||
pCreate->numOfColumns = htons(pTable->superTable->numOfColumns);
|
||||
pCreate->numOfTags = htons(pTable->superTable->numOfTags);
|
||||
pCreate->sversion = htonl(pTable->superTable->sversion);
|
||||
|
@ -1823,7 +1808,7 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pCreateMsg, SCTabl
|
|||
pCreate->tagDataLen = 0;
|
||||
pCreate->superTableUid = 0;
|
||||
}
|
||||
|
||||
|
||||
SSchema *pSchema = (SSchema *) pCreate->data;
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||
memcpy(pSchema, pTable->superTable->schema, totalCols * sizeof(SSchema));
|
||||
|
@ -1922,12 +1907,12 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
|
|||
|
||||
SCTableObj *pTable = calloc(1, sizeof(SCTableObj));
|
||||
if (pTable == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to alloc memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to alloc memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pTable->info.type = (pCreate->numOfColumns == 0)? TSDB_CHILD_TABLE:TSDB_NORMAL_TABLE;
|
||||
pTable->info.tableId = strdup(pCreate->tableId);
|
||||
pTable->info.tableId = strdup(pCreate->tableFname);
|
||||
pTable->createdTime = taosGetTimestampMs();
|
||||
pTable->tid = tid;
|
||||
pTable->vgId = pVgroup->vgId;
|
||||
|
@ -1943,7 +1928,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
|
|||
size_t prefixLen = tableIdPrefix(pMsg->pDb->name, prefix, 64);
|
||||
if (0 != strncasecmp(prefix, stableName, prefixLen)) {
|
||||
mError("msg:%p, app:%p table:%s, corresponding super table:%s not in this db", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreate->tableId, stableName);
|
||||
pCreate->tableFname, stableName);
|
||||
mnodeDestroyChildTable(pTable);
|
||||
return TSDB_CODE_TDB_INVALID_CREATE_TB_MSG;
|
||||
}
|
||||
|
@ -1951,7 +1936,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
|
|||
if (pMsg->pSTable == NULL) pMsg->pSTable = mnodeGetSuperTable(stableName);
|
||||
if (pMsg->pSTable == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, corresponding super table:%s does not exist", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreate->tableId, stableName);
|
||||
pCreate->tableFname, stableName);
|
||||
mnodeDestroyChildTable(pTable);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
@ -2013,12 +1998,12 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
|
|||
.pMsg = pMsg,
|
||||
.fpReq = mnodeDoCreateChildTableFp
|
||||
};
|
||||
|
||||
|
||||
int32_t code = sdbInsertRow(&desc);
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
mnodeDestroyChildTable(pTable);
|
||||
pMsg->pTable = NULL;
|
||||
mError("msg:%p, app:%p table:%s, failed to create, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId,
|
||||
mError("msg:%p, app:%p table:%s, failed to create, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname,
|
||||
tstrerror(code));
|
||||
} else {
|
||||
mDebug("msg:%p, app:%p table:%s, allocated in vgroup, vgId:%d sid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle,
|
||||
|
@ -2035,7 +2020,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
|
|||
int32_t code = grantCheck(TSDB_GRANT_TIMESERIES);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create, grant timeseries failed", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreate->tableId);
|
||||
pCreate->tableFname);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2046,7 +2031,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
|
|||
code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &tid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mDebug("msg:%p, app:%p table:%s, failed to get available vgroup, reason:%s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pCreate->tableId, tstrerror(code));
|
||||
pCreate->tableFname, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2060,15 +2045,15 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
|
|||
return mnodeDoCreateChildTable(pMsg, tid);
|
||||
}
|
||||
} else {
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId);
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableFname);
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, object not found, retry:%d reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, pMsg->retry,
|
||||
mError("msg:%p, app:%p table:%s, object not found, retry:%d reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname, pMsg->retry,
|
||||
tstrerror(terrno));
|
||||
return terrno;
|
||||
} else {
|
||||
mDebug("msg:%p, app:%p table:%s, send create msg to vnode again", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, send create msg to vnode again", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
|
||||
return mnodeDoCreateChildTableFp(pMsg);
|
||||
}
|
||||
}
|
||||
|
@ -2084,7 +2069,7 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
|||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
tstrncpy(pDrop->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN);
|
||||
tstrncpy(pDrop->tableFname, pTable->info.tableId, TSDB_TABLE_FNAME_LEN);
|
||||
pDrop->vgId = htonl(pTable->vgId);
|
||||
pDrop->contLen = htonl(sizeof(SMDDropTableMsg));
|
||||
pDrop->tid = htonl(pTable->tid);
|
||||
|
@ -2093,7 +2078,7 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
|||
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup);
|
||||
|
||||
mInfo("msg:%p, app:%p ctable:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle,
|
||||
pDrop->tableId, pTable->vgId, pTable->tid, pTable->uid);
|
||||
pDrop->tableFname, pTable->vgId, pTable->tid, pTable->uid);
|
||||
|
||||
SRpcMsg rpcMsg = {
|
||||
.ahandle = pMsg,
|
||||
|
@ -2115,7 +2100,7 @@ static int32_t mnodeDropChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
SCTableObj *pTable = (SCTableObj *)pMsg->pTable;
|
||||
mError("msg:%p, app:%p ctable:%s, failed to drop, sdb error", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
return mnodeSendDropChildTableMsg(pMsg, true);
|
||||
}
|
||||
|
@ -2224,7 +2209,7 @@ static int32_t mnodeAddNormalTableColumn(SMnodeMsg *pMsg, SSchema schema[], int3
|
|||
|
||||
pTable->numOfColumns += ncols;
|
||||
pTable->sversion++;
|
||||
|
||||
|
||||
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
|
||||
if (pAcct != NULL) {
|
||||
pAcct->acctInfo.numOfTimeSeries += ncols;
|
||||
|
@ -2295,7 +2280,7 @@ static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char
|
|||
if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) {
|
||||
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pTable->schema + col);
|
||||
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||
|
@ -2335,7 +2320,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
pMeta->tid = htonl(pTable->tid);
|
||||
pMeta->precision = pDb->cfg.precision;
|
||||
pMeta->tableType = pTable->info.type;
|
||||
tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN);
|
||||
tstrncpy(pMeta->tableFname, pTable->info.tableId, TSDB_TABLE_FNAME_LEN);
|
||||
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||
assert(pTable->superTable != NULL);
|
||||
|
@ -2352,7 +2337,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
pMeta->tversion = 0;
|
||||
pMeta->numOfTags = 0;
|
||||
pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns);
|
||||
pMeta->contLen = sizeof(STableMetaMsg) + mnodeSetSchemaFromNormalTable(pMeta->schema, pTable);
|
||||
pMeta->contLen = sizeof(STableMetaMsg) + mnodeSetSchemaFromNormalTable(pMeta->schema, pTable);
|
||||
}
|
||||
|
||||
if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
|
||||
|
@ -2383,7 +2368,7 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
|||
|
||||
if (pMsg->rpcMsg.contLen <= sizeof(*pInfo)) {
|
||||
mError("msg:%p, app:%p table:%s, failed to auto create child table, tags not exist", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pInfo->tableId);
|
||||
pInfo->tableFname);
|
||||
return TSDB_CODE_MND_TAG_NOT_EXIST;
|
||||
}
|
||||
|
||||
|
@ -2398,7 +2383,7 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
|||
int32_t totalLen = nameLen + tagLen + sizeof(int32_t)*2;
|
||||
if (tagLen == 0 || nameLen == 0) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create table on demand for super table is empty, tagLen:%d", pMsg,
|
||||
pMsg->rpcMsg.ahandle, pInfo->tableId, tagLen);
|
||||
pMsg->rpcMsg.ahandle, pInfo->tableFname, tagLen);
|
||||
return TSDB_CODE_MND_INVALID_STABLE_NAME;
|
||||
}
|
||||
|
||||
|
@ -2406,14 +2391,14 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
|||
SCMCreateTableMsg *pCreateMsg = calloc(1, contLen);
|
||||
if (pCreateMsg == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to create table while get meta info, no enough memory", pMsg,
|
||||
pMsg->rpcMsg.ahandle, pInfo->tableId);
|
||||
pMsg->rpcMsg.ahandle, pInfo->tableFname);
|
||||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SCreateTableMsg* pCreate = (SCreateTableMsg*) ((char*) pCreateMsg + sizeof(SCMCreateTableMsg));
|
||||
|
||||
size_t size = tListLen(pInfo->tableId);
|
||||
tstrncpy(pCreate->tableId, pInfo->tableId, size);
|
||||
size_t size = tListLen(pInfo->tableFname);
|
||||
tstrncpy(pCreate->tableFname, pInfo->tableFname, size);
|
||||
tstrncpy(pCreate->db, pMsg->pDb->name, sizeof(pCreate->db));
|
||||
pCreate->igExists = 1;
|
||||
pCreate->getMeta = 1;
|
||||
|
@ -2427,7 +2412,7 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
|||
memcpy(name, pInfo->tags + sizeof(int32_t), nameLen);
|
||||
|
||||
mDebug("msg:%p, app:%p table:%s, start to create on demand, tagLen:%d stable:%s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pInfo->tableId, tagLen, name);
|
||||
pInfo->tableFname, tagLen, name);
|
||||
|
||||
if (pMsg->rpcMsg.pCont != pMsg->pCont) {
|
||||
tfree(pMsg->rpcMsg.pCont);
|
||||
|
@ -2557,7 +2542,7 @@ static SCTableObj* mnodeGetTableByPos(int32_t vnode, int32_t tid) {
|
|||
|
||||
static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
|
||||
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
||||
#if 0
|
||||
#if 0
|
||||
SConfigTableMsg *pCfg = pMsg->rpcMsg.pCont;
|
||||
pCfg->dnodeId = htonl(pCfg->dnodeId);
|
||||
pCfg->vgId = htonl(pCfg->vgId);
|
||||
|
@ -2575,13 +2560,13 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
|
|||
SMDCreateTableMsg *pCreate = NULL;
|
||||
pCreate = mnodeBuildCreateChildTableMsg(NULL, (SCTableObj *)pTable);
|
||||
mnodeDecTableRef(pTable);
|
||||
|
||||
|
||||
if (pCreate == NULL) return terrno;
|
||||
|
||||
|
||||
pMsg->rpcRsp.rsp = pCreate;
|
||||
pMsg->rpcRsp.len = htonl(pCreate->contLen);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
// handle drop child response
|
||||
|
@ -2674,7 +2659,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
|
|||
.pMsg = pMsg,
|
||||
.fpRsp = mnodeDoCreateChildTableCb
|
||||
};
|
||||
|
||||
|
||||
int32_t code = sdbInsertRowToQueue(&desc);
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
pMsg->pTable = NULL;
|
||||
|
@ -2821,7 +2806,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -2894,7 +2879,7 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
|
|||
static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -2931,7 +2916,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
}
|
||||
|
||||
char tableName[TSDB_TABLE_NAME_LEN] = {0};
|
||||
|
||||
|
||||
// pattern compare for table name
|
||||
mnodeExtractTableName(pTable->info.tableId, tableName);
|
||||
|
||||
|
@ -2960,13 +2945,13 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
|
||||
|
||||
memset(tableName, 0, sizeof(tableName));
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||
mnodeExtractTableName(pTable->superTable->info.tableId, tableName);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, pShow->bytes[cols]);
|
||||
}
|
||||
|
||||
|
||||
cols++;
|
||||
|
||||
// uid
|
||||
|
@ -3001,27 +2986,27 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
||||
SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont;
|
||||
mDebug("msg:%p, app:%p table:%s, alter table msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pAlter->tableId, pMsg->rpcMsg.handle);
|
||||
pAlter->tableFname, pMsg->rpcMsg.handle);
|
||||
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableId);
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableFname);
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to alter table, db not selected", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to alter table, db not selected", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||
mError("msg:%p, app:%p table:%s, failed to alter table, its log db", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to alter table, its log db", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
||||
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId);
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableFname);
|
||||
if (pMsg->pTable == NULL) {
|
||||
mError("msg:%p, app:%p table:%s, failed to alter table, table not exist", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId);
|
||||
mError("msg:%p, app:%p table:%s, failed to alter table, table not exist", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
||||
|
@ -3030,7 +3015,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
pAlter->tagValLen = htonl(pAlter->tagValLen);
|
||||
|
||||
if (pAlter->numOfCols > 2) {
|
||||
mError("msg:%p, app:%p table:%s, error numOfCols:%d in alter table", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId,
|
||||
mError("msg:%p, app:%p table:%s, error numOfCols:%d in alter table", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname,
|
||||
pAlter->numOfCols);
|
||||
return TSDB_CODE_MND_APP_ERROR;
|
||||
}
|
||||
|
@ -3041,7 +3026,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
int32_t code = TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
||||
if (pMsg->pTable->type == TSDB_SUPER_TABLE) {
|
||||
mDebug("msg:%p, app:%p table:%s, start to alter stable", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, start to alter stable", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
||||
if (pAlter->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
|
||||
code = mnodeAddSuperTableTag(pMsg, pAlter->schema, 1);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) {
|
||||
|
@ -3057,7 +3042,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
} else {
|
||||
}
|
||||
} else {
|
||||
mDebug("msg:%p, app:%p table:%s, start to alter ctable", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId);
|
||||
mDebug("msg:%p, app:%p table:%s, start to alter ctable", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
||||
if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) {
|
||||
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
|
||||
|
@ -3076,7 +3061,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -3129,13 +3114,13 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
|
|||
static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
SCTableObj *pTable = NULL;
|
||||
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
|
||||
|
@ -3148,7 +3133,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
|
|||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextChildTable(pShow->pIter, &pTable);
|
||||
if (pTable == NULL) break;
|
||||
|
||||
|
||||
// not belong to current db
|
||||
if (strncmp(pTable->info.tableId, prefix, prefixLen) || pTable->info.type != TSDB_STREAM_TABLE) {
|
||||
mnodeDecTableRef(pTable);
|
||||
|
@ -3156,7 +3141,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
|
|||
}
|
||||
|
||||
char tableName[TSDB_TABLE_NAME_LEN] = {0};
|
||||
|
||||
|
||||
// pattern compare for table name
|
||||
mnodeExtractTableName(pTable->info.tableId, tableName);
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ static int32_t mnodeGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
cols++;
|
||||
|
||||
pMeta->numOfColumns = htons(cols);
|
||||
strcpy(pMeta->tableId, "show users");
|
||||
strcpy(pMeta->tableFname, "show users");
|
||||
pShow->numOfColumns = cols;
|
||||
|
||||
pShow->offset[0] = 0;
|
||||
|
|
|
@ -82,9 +82,9 @@ void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** p
|
|||
|
||||
void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput);
|
||||
|
||||
int64_t getNumOfResWithFill(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
|
||||
bool taosFillHasMoreResults(SFillInfo* pFillInfo);
|
||||
|
||||
int32_t taosNumOfRemainRows(SFillInfo *pFillInfo);
|
||||
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
|
||||
|
||||
int32_t taosGetLinearInterpolationVal(int32_t type, SPoint *point1, SPoint *point2, SPoint *point);
|
||||
|
||||
|
|
|
@ -98,6 +98,7 @@ typedef struct SCreateTableSQL {
|
|||
|
||||
typedef struct SAlterTableSQL {
|
||||
SStrToken name;
|
||||
int16_t tableType;
|
||||
int16_t type;
|
||||
STagData tagData;
|
||||
SArray *pAddColumns; // SArray<TAOS_FIELD>
|
||||
|
@ -156,6 +157,7 @@ typedef struct tDCLSQL {
|
|||
int32_t nAlloc; /* Number of entries allocated below */
|
||||
SStrToken *a; /* one entry for element */
|
||||
bool existsCheck;
|
||||
int16_t tableType;
|
||||
|
||||
union {
|
||||
SCreateDBInfo dbOpt;
|
||||
|
@ -250,7 +252,7 @@ SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSe
|
|||
|
||||
void tSqlExprNodeDestroy(tSQLExpr *pExpr);
|
||||
|
||||
SAlterTableSQL * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type);
|
||||
SAlterTableSQL * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable);
|
||||
SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists);
|
||||
|
||||
void destroyAllSelectClause(SSubclauseInfo *pSql);
|
||||
|
@ -267,7 +269,7 @@ void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken
|
|||
void SqlInfoDestroy(SSqlInfo *pInfo);
|
||||
|
||||
void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...);
|
||||
void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck);
|
||||
void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck,int16_t tableType);
|
||||
void setShowOptions(SSqlInfo *pInfo, int32_t type, SStrToken* prefix, SStrToken* pPatterns);
|
||||
|
||||
tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SStrToken *pToken);
|
||||
|
|
|
@ -131,10 +131,16 @@ cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
|
|||
//drop configure for tables
|
||||
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
|
||||
X.n += Z.n;
|
||||
setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y);
|
||||
setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y, -1);
|
||||
}
|
||||
|
||||
cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y); }
|
||||
//drop stable
|
||||
cmd ::= DROP STABLE ifexists(Y) ids(X) cpxName(Z). {
|
||||
X.n += Z.n;
|
||||
setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y, TSDB_SUPER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y, -1); }
|
||||
cmd ::= DROP DNODE ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &X); }
|
||||
cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &X); }
|
||||
cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &X); }
|
||||
|
@ -305,6 +311,8 @@ signed(A) ::= MINUS INTEGER(X). { A = -strtol(X.z, NULL, 10);}
|
|||
|
||||
////////////////////////////////// The CREATE TABLE statement ///////////////////////////////
|
||||
cmd ::= CREATE TABLE create_table_args. {}
|
||||
cmd ::= CREATE TABLE create_stable_args. {}
|
||||
cmd ::= CREATE STABLE create_stable_args. {}
|
||||
cmd ::= CREATE TABLE create_table_list(Z). { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = Z;}
|
||||
|
||||
%type create_table_list{SCreateTableSQL*}
|
||||
|
@ -333,7 +341,8 @@ create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP. {
|
|||
}
|
||||
|
||||
// create super table
|
||||
create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP TAGS LP columnlist(Y) RP. {
|
||||
%type create_stable_args{SCreateTableSQL*}
|
||||
create_stable_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP TAGS LP columnlist(Y) RP. {
|
||||
A = tSetCreateSqlElems(X, Y, NULL, TSQL_CREATE_STABLE);
|
||||
setSqlInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
|
||||
|
@ -683,7 +692,7 @@ cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
|
|||
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
|
@ -693,14 +702,14 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
|||
toTSDBType(A.type);
|
||||
SArray* K = tVariantListAppendToken(NULL, &A, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||
X.n += Y.n;
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
|
||||
|
@ -709,7 +718,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
|
|||
toTSDBType(Y.type);
|
||||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
|
@ -722,7 +731,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
|
|||
toTSDBType(Z.type);
|
||||
A = tVariantListAppendToken(A, &Z, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
|
@ -733,7 +742,54 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
|||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
A = tVariantListAppend(A, &Z, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
||||
X.n += F.n;
|
||||
|
||||
toTSDBType(A.type);
|
||||
SArray* K = tVariantListAppendToken(NULL, &A, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||
X.n += Y.n;
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
|
||||
X.n += Z.n;
|
||||
|
||||
toTSDBType(Y.type);
|
||||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
|
||||
X.n += F.n;
|
||||
|
||||
toTSDBType(Y.type);
|
||||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
|
||||
toTSDBType(Z.type);
|
||||
A = tVariantListAppendToken(A, &Z, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
|
|
|
@ -1901,7 +1901,7 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6
|
|||
static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type,
|
||||
SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) {
|
||||
tVariant val = {0};
|
||||
tVariantCreateFromBinary(&val, pData, tDataTypeDesc[type].nSize, type);
|
||||
tVariantCreateFromBinary(&val, pData, tDataTypes[type].bytes, type);
|
||||
|
||||
tValuePair **pList = pInfo->res;
|
||||
assert(pList != NULL);
|
||||
|
@ -1958,7 +1958,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData,
|
|||
static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type,
|
||||
SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) {
|
||||
tVariant val = {0};
|
||||
tVariantCreateFromBinary(&val, pData, tDataTypeDesc[type].nSize, type);
|
||||
tVariantCreateFromBinary(&val, pData, tDataTypes[type].bytes, type);
|
||||
|
||||
tValuePair **pList = pInfo->res;
|
||||
assert(pList != NULL);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3169,6 +3169,10 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
|
|||
// all results in current group have been returned to client, try next group
|
||||
if (pGroupResInfo->index >= taosArrayGetSize(pGroupResInfo->pRows)) {
|
||||
// current results of group has been sent to client, try next group
|
||||
pGroupResInfo->index = 0;
|
||||
pGroupResInfo->rowId = 0;
|
||||
taosArrayClear(pGroupResInfo->pRows);
|
||||
|
||||
if (mergeGroupResult(pQInfo) != TSDB_CODE_SUCCESS) {
|
||||
return; // failed to save data in the disk
|
||||
}
|
||||
|
@ -4172,7 +4176,7 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc
|
|||
}
|
||||
}
|
||||
|
||||
bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
SFillInfo *pFillInfo = pRuntimeEnv->pFillInfo;
|
||||
|
||||
|
@ -4182,8 +4186,7 @@ bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) {
|
|||
|
||||
if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) {
|
||||
// There are results not returned to client yet, so filling applied to the remain result is required firstly.
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo);
|
||||
if (remain > 0) {
|
||||
if (taosFillHasMoreResults(pFillInfo)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -4197,7 +4200,7 @@ bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) {
|
|||
* first result row in the actual result set will fill nothing.
|
||||
*/
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
int32_t numOfTotal = (int32_t)getNumOfResWithFill(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity);
|
||||
int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity);
|
||||
return numOfTotal > 0;
|
||||
}
|
||||
|
||||
|
@ -4265,7 +4268,7 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
|
|||
setQueryStatus(pQuery, QUERY_OVER);
|
||||
}
|
||||
} else {
|
||||
if (!queryHasRemainResForTableQuery(&pQInfo->runtimeEnv)) {
|
||||
if (!hasNotReturnedResults(&pQInfo->runtimeEnv)) {
|
||||
setQueryStatus(pQuery, QUERY_OVER);
|
||||
}
|
||||
}
|
||||
|
@ -4309,7 +4312,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
|
|||
ret = 0;
|
||||
}
|
||||
|
||||
if (!queryHasRemainResForTableQuery(pRuntimeEnv)) {
|
||||
if (!hasNotReturnedResults(pRuntimeEnv)) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -5767,7 +5770,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
if (queryHasRemainResForTableQuery(pRuntimeEnv)) {
|
||||
if (hasNotReturnedResults(pRuntimeEnv)) {
|
||||
if (pQuery->fillType != TSDB_FILL_NONE) {
|
||||
/*
|
||||
* There are remain results that are not returned due to result interpolation
|
||||
|
@ -6306,7 +6309,7 @@ static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t num
|
|||
}
|
||||
|
||||
type = TSDB_DATA_TYPE_DOUBLE;
|
||||
bytes = tDataTypeDesc[type].nSize;
|
||||
bytes = tDataTypes[type].bytes;
|
||||
} else if (pExprs[i].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX && pExprs[i].base.functionId == TSDB_FUNC_TAGPRJ) { // parse the normal column
|
||||
SSchema s = tGetTableNameColumnSchema();
|
||||
type = s.type;
|
||||
|
@ -6920,11 +6923,12 @@ static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) {
|
|||
*/
|
||||
if (isTSCompQuery(pQuery) && (*numOfRows) > 0) {
|
||||
struct stat fStat;
|
||||
if (fstat(fileno(*(FILE **)pQuery->sdata[0]->data), &fStat) == 0) {
|
||||
FILE *f = *(FILE **)pQuery->sdata[0]->data;
|
||||
if ((f != NULL) && (fstat(fileno(f), &fStat) == 0)) {
|
||||
*numOfRows = fStat.st_size;
|
||||
return fStat.st_size;
|
||||
} else {
|
||||
qError("QInfo:%p failed to get file info, path:%s, reason:%s", pQInfo, pQuery->sdata[0]->data, strerror(errno));
|
||||
qError("QInfo:%p failed to get file info, file:%p, reason:%s", pQInfo, f, strerror(errno));
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -6939,7 +6943,7 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
|
|||
// load data from file to msg buffer
|
||||
if (isTSCompQuery(pQuery)) {
|
||||
|
||||
FILE *f = *(FILE **)pQuery->sdata[0]->data;
|
||||
FILE *f = *(FILE **)pQuery->sdata[0]->data; // TODO refactor
|
||||
|
||||
// make sure file exist
|
||||
if (f) {
|
||||
|
|
|
@ -25,6 +25,251 @@
|
|||
#include "queryLog.h"
|
||||
|
||||
#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC)
|
||||
#define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1))))
|
||||
|
||||
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows) {
|
||||
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
|
||||
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, genRows);
|
||||
|
||||
assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags);
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
|
||||
assert (pTag->col.colId == pCol->col.colId);
|
||||
assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
|
||||
static void setNullValueForRow(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfCol, int32_t rowIndex) {
|
||||
// the first are always the timestamp column, so start from the second column.
|
||||
for (int32_t i = 1; i < numOfCol; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, rowIndex);
|
||||
setNull(output, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** srcData, int64_t ts, bool outOfBound) {
|
||||
char* prev = pFillInfo->prevValues;
|
||||
char* next = pFillInfo->nextValues;
|
||||
|
||||
SPoint point1, point2, point;
|
||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order);
|
||||
|
||||
// set the primary timestamp column value
|
||||
int32_t index = pFillInfo->numOfCurrent;
|
||||
char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, index);
|
||||
*(TSKEY*) val = pFillInfo->currentKey;
|
||||
|
||||
// set the other values
|
||||
if (pFillInfo->type == TSDB_FILL_PREV) {
|
||||
char* p = FILL_IS_ASC_FILL(pFillInfo) ? prev : next;
|
||||
|
||||
if (p != NULL) {
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
} else { // no prev value yet, set the value for NULL
|
||||
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
|
||||
}
|
||||
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
|
||||
char* p = FILL_IS_ASC_FILL(pFillInfo)? next : prev;
|
||||
|
||||
if (p != NULL) {
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
} else { // no prev value yet, set the value for NULL
|
||||
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
|
||||
}
|
||||
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
|
||||
// TODO : linear interpolation supports NULL value
|
||||
if (prev != NULL && !outOfBound) {
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int16_t type = pCol->col.type;
|
||||
int16_t bytes = pCol->col.bytes;
|
||||
|
||||
char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) {
|
||||
setNull(val1, pCol->col.type, bytes);
|
||||
continue;
|
||||
}
|
||||
|
||||
point1 = (SPoint){.key = *(TSKEY*)(prev), .val = prev + pCol->col.offset};
|
||||
point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->index * bytes};
|
||||
point = (SPoint){.key = pFillInfo->currentKey, .val = val1};
|
||||
taosGetLinearInterpolationVal(type, &point1, &point2, &point);
|
||||
}
|
||||
} else {
|
||||
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
|
||||
}
|
||||
} else { /* fill the default value */
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
|
||||
setTagsValue(pFillInfo, data, index);
|
||||
pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
|
||||
pFillInfo->numOfCurrent++;
|
||||
}
|
||||
|
||||
static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** next) {
|
||||
if (*next != NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
*next = calloc(1, pFillInfo->rowSize);
|
||||
for (int i = 1; i < pFillInfo->numOfCols; i++) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
setNull(*next + pCol->col.offset, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, char* buf) {
|
||||
int32_t rowIndex = pFillInfo->index;
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
memcpy(buf + pCol->col.offset, srcData[i] + rowIndex * pCol->col.bytes, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t outputRows) {
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
|
||||
char** srcData = pFillInfo->pData;
|
||||
char** prev = &pFillInfo->prevValues;
|
||||
char** next = &pFillInfo->nextValues;
|
||||
|
||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order);
|
||||
|
||||
if (FILL_IS_ASC_FILL(pFillInfo)) {
|
||||
assert(pFillInfo->currentKey >= pFillInfo->start);
|
||||
} else {
|
||||
assert(pFillInfo->currentKey <= pFillInfo->start);
|
||||
}
|
||||
|
||||
while (pFillInfo->numOfCurrent < outputRows) {
|
||||
int64_t ts = ((int64_t*)pFillInfo->pData[0])[pFillInfo->index];
|
||||
|
||||
// set the next value for interpolation
|
||||
if ((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) {
|
||||
initBeforeAfterDataBuf(pFillInfo, next);
|
||||
copyCurrentRowIntoBuf(pFillInfo, srcData, *next);
|
||||
}
|
||||
|
||||
if (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) &&
|
||||
pFillInfo->numOfCurrent < outputRows) {
|
||||
|
||||
// fill the gap between two actual input rows
|
||||
while (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) &&
|
||||
pFillInfo->numOfCurrent < outputRows) {
|
||||
doFillOneRowResult(pFillInfo, data, srcData, ts, false);
|
||||
}
|
||||
|
||||
// output buffer is full, abort
|
||||
if (pFillInfo->numOfCurrent == outputRows) {
|
||||
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
|
||||
return outputRows;
|
||||
}
|
||||
} else {
|
||||
assert(pFillInfo->currentKey == ts);
|
||||
initBeforeAfterDataBuf(pFillInfo, prev);
|
||||
|
||||
// assign rows to dst buffer
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, pFillInfo->numOfCurrent);
|
||||
char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->index);
|
||||
|
||||
if (i == 0 || (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) ||
|
||||
(pCol->functionId == TSDB_FUNC_COUNT && GET_INT64_VAL(src) != 0)) {
|
||||
assignVal(output, src, pCol->col.bytes, pCol->col.type);
|
||||
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
|
||||
} else { // i > 0 and data is null , do interpolation
|
||||
if (pFillInfo->type == TSDB_FILL_PREV) {
|
||||
assignVal(output, *prev + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
|
||||
assignVal(output, src, pCol->col.bytes, pCol->col.type);
|
||||
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
|
||||
} else {
|
||||
assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set the tag value for final result
|
||||
setTagsValue(pFillInfo, data, pFillInfo->numOfCurrent);
|
||||
|
||||
pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->precision);
|
||||
pFillInfo->index += 1;
|
||||
pFillInfo->numOfCurrent += 1;
|
||||
}
|
||||
|
||||
if (pFillInfo->index >= pFillInfo->numOfRows || pFillInfo->numOfCurrent >= outputRows) {
|
||||
/* the raw data block is exhausted, next value does not exists */
|
||||
if (pFillInfo->index >= pFillInfo->numOfRows) {
|
||||
tfree(*next);
|
||||
}
|
||||
|
||||
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
|
||||
return pFillInfo->numOfCurrent;
|
||||
}
|
||||
}
|
||||
|
||||
return pFillInfo->numOfCurrent;
|
||||
}
|
||||
|
||||
static int64_t appendFilledResult(SFillInfo* pFillInfo, tFilePage** output, int64_t resultCapacity) {
|
||||
/*
|
||||
* These data are generated according to fill strategy, since the current timestamp is out of the time window of
|
||||
* real result set. Note that we need to keep the direct previous result rows, to generated the filled data.
|
||||
*/
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
while (pFillInfo->numOfCurrent < resultCapacity) {
|
||||
doFillOneRowResult(pFillInfo, output, pFillInfo->pData, pFillInfo->start, true);
|
||||
}
|
||||
|
||||
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
|
||||
|
||||
assert(pFillInfo->numOfCurrent == resultCapacity);
|
||||
return resultCapacity;
|
||||
}
|
||||
|
||||
// there are no duplicated tags in the SFillTagColInfo list
|
||||
static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t capacity) {
|
||||
|
@ -68,6 +313,14 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t
|
|||
return rowsize;
|
||||
}
|
||||
|
||||
static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) {
|
||||
if (pFillInfo->numOfRows == 0 || (pFillInfo->numOfRows > 0 && pFillInfo->index >= pFillInfo->numOfRows)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pFillInfo->numOfRows - pFillInfo->index;
|
||||
}
|
||||
|
||||
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
|
||||
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
|
||||
SFillColInfo* pCol, void* handle) {
|
||||
|
@ -76,22 +329,21 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
|
|||
}
|
||||
|
||||
SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo));
|
||||
|
||||
taosResetFillInfo(pFillInfo, skey);
|
||||
|
||||
pFillInfo->order = order;
|
||||
pFillInfo->type = fillType;
|
||||
pFillInfo->pFillCol = pCol;
|
||||
pFillInfo->order = order;
|
||||
pFillInfo->type = fillType;
|
||||
pFillInfo->pFillCol = pCol;
|
||||
pFillInfo->numOfTags = numOfTags;
|
||||
pFillInfo->numOfCols = numOfCols;
|
||||
pFillInfo->precision = precision;
|
||||
pFillInfo->alloc = capacity;
|
||||
pFillInfo->handle = handle;
|
||||
|
||||
pFillInfo->interval.interval = slidingTime;
|
||||
pFillInfo->interval.interval = slidingTime;
|
||||
pFillInfo->interval.intervalUnit = slidingUnit;
|
||||
pFillInfo->interval.sliding = slidingTime;
|
||||
pFillInfo->interval.slidingUnit = slidingUnit;
|
||||
pFillInfo->interval.sliding = slidingTime;
|
||||
pFillInfo->interval.slidingUnit = slidingUnit;
|
||||
|
||||
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
|
||||
if (numOfTags > 0) {
|
||||
|
@ -185,7 +437,11 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage*
|
|||
}
|
||||
}
|
||||
|
||||
int64_t getNumOfResWithFill(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) {
|
||||
bool taosFillHasMoreResults(SFillInfo* pFillInfo) {
|
||||
return taosNumOfRemainRows(pFillInfo) > 0;
|
||||
}
|
||||
|
||||
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) {
|
||||
int64_t* tsList = (int64_t*) pFillInfo->pData[0];
|
||||
|
||||
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
|
||||
|
@ -223,16 +479,6 @@ int64_t getNumOfResWithFill(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRo
|
|||
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
|
||||
}
|
||||
|
||||
int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) {
|
||||
if (pFillInfo->numOfRows == 0 || (pFillInfo->numOfRows > 0 && pFillInfo->index >= pFillInfo->numOfRows)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pFillInfo->numOfRows - pFillInfo->index;
|
||||
}
|
||||
|
||||
#define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1))))
|
||||
|
||||
int32_t taosGetLinearInterpolationVal(int32_t type, SPoint* point1, SPoint* point2, SPoint* point) {
|
||||
double v1 = -1;
|
||||
double v2 = -1;
|
||||
|
@ -256,243 +502,15 @@ int32_t taosGetLinearInterpolationVal(int32_t type, SPoint* point1, SPoint* poin
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows) {
|
||||
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
|
||||
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, genRows);
|
||||
|
||||
assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags);
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
|
||||
assert (pTag->col.colId == pCol->col.colId);
|
||||
assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
|
||||
static void setNullValueForRow(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfCol, int32_t rowIndex) {
|
||||
// the first are always the timestamp column, so start from the second column.
|
||||
for (int32_t i = 1; i < numOfCol; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, rowIndex);
|
||||
setNull(output, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** srcData, int64_t ts, bool outOfBound) {
|
||||
char* prev = pFillInfo->prevValues;
|
||||
char* next = pFillInfo->nextValues;
|
||||
|
||||
SPoint point1, point2, point;
|
||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order);
|
||||
|
||||
// set the primary timestamp column value
|
||||
int32_t index = pFillInfo->numOfCurrent;
|
||||
char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, index);
|
||||
*(TSKEY*) val = pFillInfo->currentKey;
|
||||
|
||||
// set the other values
|
||||
if (pFillInfo->type == TSDB_FILL_PREV) {
|
||||
char* p = FILL_IS_ASC_FILL(pFillInfo) ? prev : next;
|
||||
|
||||
if (p != NULL) {
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
} else { // no prev value yet, set the value for NULL
|
||||
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
|
||||
}
|
||||
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
|
||||
// TODO : linear interpolation supports NULL value
|
||||
if (prev != NULL && !outOfBound) {
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int16_t type = pCol->col.type;
|
||||
int16_t bytes = pCol->col.bytes;
|
||||
|
||||
char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) {
|
||||
setNull(val1, pCol->col.type, bytes);
|
||||
continue;
|
||||
}
|
||||
|
||||
point1 = (SPoint){.key = *(TSKEY*)(prev), .val = prev + pCol->col.offset};
|
||||
point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->index * bytes};
|
||||
point = (SPoint){.key = pFillInfo->currentKey, .val = val1};
|
||||
taosGetLinearInterpolationVal(type, &point1, &point2, &point);
|
||||
}
|
||||
} else {
|
||||
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
|
||||
}
|
||||
} else { /* fill the default value */
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
|
||||
setTagsValue(pFillInfo, data, index);
|
||||
pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
|
||||
pFillInfo->numOfCurrent++;
|
||||
}
|
||||
|
||||
static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** next) {
|
||||
if (*next != NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
*next = calloc(1, pFillInfo->rowSize);
|
||||
for (int i = 1; i < pFillInfo->numOfCols; i++) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
setNull(*next + pCol->col.offset, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, char* buf) {
|
||||
int32_t rowIndex = pFillInfo->index;
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
memcpy(buf + pCol->col.offset, srcData[i] + rowIndex * pCol->col.bytes, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t outputRows) {
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
|
||||
char** srcData = pFillInfo->pData;
|
||||
char** prev = &pFillInfo->prevValues;
|
||||
char** next = &pFillInfo->nextValues;
|
||||
|
||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order);
|
||||
|
||||
if (FILL_IS_ASC_FILL(pFillInfo)) {
|
||||
assert(pFillInfo->currentKey >= pFillInfo->start);
|
||||
} else {
|
||||
assert(pFillInfo->currentKey <= pFillInfo->start);
|
||||
}
|
||||
|
||||
while (pFillInfo->numOfCurrent < outputRows) {
|
||||
int64_t ts = ((int64_t*)pFillInfo->pData[0])[pFillInfo->index];
|
||||
|
||||
if ((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) {
|
||||
/* set the next value for interpolation */
|
||||
initBeforeAfterDataBuf(pFillInfo, next);
|
||||
copyCurrentRowIntoBuf(pFillInfo, srcData, *next);
|
||||
}
|
||||
|
||||
if (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) &&
|
||||
pFillInfo->numOfCurrent < outputRows) {
|
||||
|
||||
// fill the gap between two actual input rows
|
||||
while (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) &&
|
||||
pFillInfo->numOfCurrent < outputRows) {
|
||||
doFillOneRowResult(pFillInfo, data, srcData, ts, false);
|
||||
}
|
||||
|
||||
// output buffer is full, abort
|
||||
if (pFillInfo->numOfCurrent == outputRows) {
|
||||
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
|
||||
return outputRows;
|
||||
}
|
||||
} else {
|
||||
assert(pFillInfo->currentKey == ts);
|
||||
initBeforeAfterDataBuf(pFillInfo, prev);
|
||||
|
||||
// assign rows to dst buffer
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, pFillInfo->numOfCurrent);
|
||||
char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->index);
|
||||
|
||||
if (i == 0 || (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) ||
|
||||
(pCol->functionId == TSDB_FUNC_COUNT && GET_INT64_VAL(src) != 0)) {
|
||||
assignVal(output, src, pCol->col.bytes, pCol->col.type);
|
||||
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
|
||||
} else { // i > 0 and data is null , do interpolation
|
||||
if (pFillInfo->type == TSDB_FILL_PREV) {
|
||||
assignVal(output, *prev + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
|
||||
assignVal(output, src, pCol->col.bytes, pCol->col.type);
|
||||
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
|
||||
} else {
|
||||
assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set the tag value for final result
|
||||
setTagsValue(pFillInfo, data, pFillInfo->numOfCurrent);
|
||||
|
||||
pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->precision);
|
||||
pFillInfo->index += 1;
|
||||
pFillInfo->numOfCurrent += 1;
|
||||
}
|
||||
|
||||
if (pFillInfo->index >= pFillInfo->numOfRows || pFillInfo->numOfCurrent >= outputRows) {
|
||||
/* the raw data block is exhausted, next value does not exists */
|
||||
if (pFillInfo->index >= pFillInfo->numOfRows) {
|
||||
tfree(*next);
|
||||
}
|
||||
|
||||
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
|
||||
return pFillInfo->numOfCurrent;
|
||||
}
|
||||
}
|
||||
|
||||
return pFillInfo->numOfCurrent;
|
||||
}
|
||||
|
||||
static int64_t fillExternalResults(SFillInfo* pFillInfo, tFilePage** output, int64_t resultCapacity) {
|
||||
/*
|
||||
* These data are generated according to fill strategy, since the current timestamp is out of the time window of
|
||||
* real result set. Note that we need to keep the direct previous result rows, to generated the filled data.
|
||||
*/
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
while (pFillInfo->numOfCurrent < resultCapacity) {
|
||||
doFillOneRowResult(pFillInfo, output, pFillInfo->pData, pFillInfo->start, true);
|
||||
}
|
||||
|
||||
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
|
||||
|
||||
assert(pFillInfo->numOfCurrent == resultCapacity);
|
||||
return resultCapacity;
|
||||
}
|
||||
|
||||
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) {
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo);
|
||||
|
||||
int64_t numOfRes = getNumOfResWithFill(pFillInfo, pFillInfo->end, capacity);
|
||||
int64_t numOfRes = getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, capacity);
|
||||
assert(numOfRes <= capacity);
|
||||
|
||||
// no data existed for fill operation now, append result according to the fill strategy
|
||||
if (remain == 0) {
|
||||
fillExternalResults(pFillInfo, output, numOfRes);
|
||||
appendFilledResult(pFillInfo, output, numOfRes);
|
||||
} else {
|
||||
fillResultImpl(pFillInfo, output, (int32_t) numOfRes);
|
||||
assert(numOfRes == pFillInfo->numOfCurrent);
|
||||
|
|
|
@ -184,7 +184,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) {
|
|||
histogramCreateBin(*pHisto, idx, val);
|
||||
}
|
||||
#else
|
||||
tSkipListKey key = tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &val, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
|
||||
tSkipListKey key = tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &val, tDataTypes[TSDB_DATA_TYPE_DOUBLE].nSize);
|
||||
SHistBin* entry = calloc(1, sizeof(SHistBin));
|
||||
entry->val = val;
|
||||
|
||||
|
@ -217,7 +217,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) {
|
|||
}
|
||||
|
||||
tSkipListKey kx =
|
||||
tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &(*pHisto)->max, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
|
||||
tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &(*pHisto)->max, tDataTypes[TSDB_DATA_TYPE_DOUBLE].nSize);
|
||||
pLast = tSkipListGetOne((*pHisto)->pList, &kx);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -398,21 +398,21 @@ void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
|||
pField->name[0] = 0;
|
||||
|
||||
int32_t i = 0;
|
||||
while (i < tListLen(tDataTypeDesc)) {
|
||||
if ((type->n == tDataTypeDesc[i].nameLen) &&
|
||||
(strncasecmp(type->z, tDataTypeDesc[i].aName, tDataTypeDesc[i].nameLen) == 0)) {
|
||||
while (i < tListLen(tDataTypes)) {
|
||||
if ((type->n == tDataTypes[i].nameLen) &&
|
||||
(strncasecmp(type->z, tDataTypes[i].name, tDataTypes[i].nameLen) == 0)) {
|
||||
break;
|
||||
}
|
||||
|
||||
i += 1;
|
||||
}
|
||||
|
||||
if (i == tListLen(tDataTypeDesc)) {
|
||||
if (i == tListLen(tDataTypes)) {
|
||||
return;
|
||||
}
|
||||
|
||||
pField->type = i;
|
||||
pField->bytes = tDataTypeDesc[i].nSize;
|
||||
pField->bytes = tDataTypes[i].bytes;
|
||||
|
||||
if (i == TSDB_DATA_TYPE_NCHAR) {
|
||||
/*
|
||||
|
@ -585,11 +585,12 @@ SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVal
|
|||
return info;
|
||||
}
|
||||
|
||||
SAlterTableSQL *tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type) {
|
||||
SAlterTableSQL *tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableType) {
|
||||
SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL));
|
||||
|
||||
pAlterTable->name = *pTableName;
|
||||
pAlterTable->type = type;
|
||||
pAlterTable->tableType = tableType;
|
||||
|
||||
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
|
||||
pAlterTable->pAddColumns = pCols;
|
||||
|
@ -733,9 +734,10 @@ void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
|
|||
va_end(va);
|
||||
}
|
||||
|
||||
void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck) {
|
||||
void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck, int16_t tableType) {
|
||||
pInfo->type = type;
|
||||
pInfo->pDCLInfo = tTokenListAppend(pInfo->pDCLInfo, pToken);
|
||||
pInfo->pDCLInfo->tableType = tableType;
|
||||
pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1);
|
||||
}
|
||||
|
||||
|
|
2679
src/query/src/sql.c
2679
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -43,7 +43,7 @@ tMemBucket *createDoubleDataBucket(int32_t start, int32_t end) {
|
|||
}
|
||||
|
||||
tMemBucket *createUnsignedDataBucket(int32_t start, int32_t end, int32_t type) {
|
||||
tMemBucket *pBucket = tMemBucketCreate(tDataTypeDesc[type].nSize, type, start, end);
|
||||
tMemBucket *pBucket = tMemBucketCreate(tDataTypes[type].bytes, type, start, end);
|
||||
for (int32_t i = start; i <= end; ++i) {
|
||||
uint64_t k = i;
|
||||
int32_t ret = tMemBucketPut(pBucket, &k, 1);
|
||||
|
|
|
@ -568,7 +568,7 @@ static void syncStartCheckPeerConn(SSyncPeer *pPeer) {
|
|||
int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
||||
if (pNode->vgId > 1) checkMs = tsStatusInterval * 1000 + checkMs;
|
||||
|
||||
sDebug("%s, check peer connection after %d ms", pPeer->id, checkMs);
|
||||
taosTmrReset(syncCheckPeerConnection, checkMs, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
|
|
|
@ -475,7 +475,8 @@ void *syncRetrieveData(void *param) {
|
|||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
taosBlockSIGPIPE();
|
||||
sInfo("%s, start to retrieve data, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
|
||||
sInfo("%s, start to retrieve data, sstatus:%s, numOfRetrieves:%d", pPeer->id, syncStatus[pPeer->sstatus],
|
||||
pPeer->numOfRetrieves);
|
||||
|
||||
if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, pPeer->numOfRetrieves);
|
||||
|
||||
|
@ -497,9 +498,11 @@ void *syncRetrieveData(void *param) {
|
|||
pPeer->numOfRetrieves++;
|
||||
} else {
|
||||
pPeer->numOfRetrieves = 0;
|
||||
if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, 0);
|
||||
// if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, 0);
|
||||
}
|
||||
|
||||
if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, 0);
|
||||
|
||||
pPeer->fileChanged = 0;
|
||||
taosClose(pPeer->syncFd);
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
|
|||
}
|
||||
}
|
||||
if (tsdbTableSetSchema(pCfg, tdGetSchemaFromBuilder(&schemaBuilder), false) < 0) goto _err;
|
||||
if (tsdbTableSetName(pCfg, pMsg->tableId, true) < 0) goto _err;
|
||||
if (tsdbTableSetName(pCfg, pMsg->tableFname, true) < 0) goto _err;
|
||||
|
||||
if (numOfTags > 0) {
|
||||
// Decode tag schema
|
||||
|
@ -265,7 +265,7 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
|
|||
}
|
||||
}
|
||||
if (tsdbTableSetTagSchema(pCfg, tdGetSchemaFromBuilder(&schemaBuilder), false) < 0) goto _err;
|
||||
if (tsdbTableSetSName(pCfg, pMsg->superTableId, true) < 0) goto _err;
|
||||
if (tsdbTableSetSName(pCfg, pMsg->stableFname, true) < 0) goto _err;
|
||||
if (tsdbTableSetSuperUid(pCfg, htobe64(pMsg->superTableUid)) < 0) goto _err;
|
||||
|
||||
int32_t tagDataLen = htonl(pMsg->tagDataLen);
|
||||
|
|
|
@ -751,8 +751,8 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
|
||||
pCompCol->colId = pDataCol->colId;
|
||||
pCompCol->type = pDataCol->type;
|
||||
if (tDataTypeDesc[pDataCol->type].getStatisFunc) {
|
||||
(*tDataTypeDesc[pDataCol->type].getStatisFunc)(
|
||||
if (tDataTypes[pDataCol->type].statisFunc) {
|
||||
(*tDataTypes[pDataCol->type].statisFunc)(
|
||||
pDataCol->pData, rowsToWrite, &(pCompCol->min), &(pCompCol->max), &(pCompCol->sum), &(pCompCol->minIndex),
|
||||
&(pCompCol->maxIndex), &(pCompCol->numOfNull));
|
||||
}
|
||||
|
@ -788,7 +788,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
}
|
||||
}
|
||||
|
||||
flen = (*(tDataTypeDesc[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr,
|
||||
flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr,
|
||||
(int32_t)taosTSizeof(pHelper->pBuffer) - lsize, pCfg->compression,
|
||||
pHelper->compBuffer, (int32_t)taosTSizeof(pHelper->compBuffer));
|
||||
} else {
|
||||
|
@ -1208,7 +1208,7 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32
|
|||
// Decode the data
|
||||
if (comp) {
|
||||
// // Need to decompress
|
||||
int tlen = (*(tDataTypeDesc[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData,
|
||||
int tlen = (*(tDataTypes[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData,
|
||||
pDataCol->spaceSize, comp, buffer, bufferSize);
|
||||
if (tlen <= 0) {
|
||||
tsdbError("Failed to decompress column, file corrupted, len:%d comp:%d numOfRows:%d maxPoints:%d bufferSize:%d",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include "taosdef.h"
|
||||
#include "ttype.h"
|
||||
#include "tcompare.h"
|
||||
#include "tarray.h"
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
|
|||
SMDDropTableMsg *pTable = pCont;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
vDebug("vgId:%d, table:%s, start to drop", pVnode->vgId, pTable->tableId);
|
||||
vDebug("vgId:%d, table:%s, start to drop", pVnode->vgId, pTable->tableFname);
|
||||
STableId tableId = {.uid = htobe64(pTable->uid), .tid = htonl(pTable->tid)};
|
||||
|
||||
if (tsdbDropTable(pVnode->tsdb, tableId) < 0) code = terrno;
|
||||
|
@ -197,13 +197,13 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
|
|||
SDropSTableMsg *pTable = pCont;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
vDebug("vgId:%d, stable:%s, start to drop", pVnode->vgId, pTable->tableId);
|
||||
vDebug("vgId:%d, stable:%s, start to drop", pVnode->vgId, pTable->tableFname);
|
||||
|
||||
STableId stableId = {.uid = htobe64(pTable->uid), .tid = -1};
|
||||
|
||||
if (tsdbDropTable(pVnode->tsdb, stableId) < 0) code = terrno;
|
||||
|
||||
vDebug("vgId:%d, stable:%s, drop stable result:%s", pVnode->vgId, pTable->tableId, tstrerror(code));
|
||||
vDebug("vgId:%d, stable:%s, drop stable result:%s", pVnode->vgId, pTable->tableFname, tstrerror(code));
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
|
|||
if (pVnode->flowctrlLevel <= 0) code = TSDB_CODE_VND_IS_FLOWCTRL;
|
||||
|
||||
pWrite->processedCount++;
|
||||
if (pWrite->processedCount > 100) {
|
||||
if (pWrite->processedCount >= 100) {
|
||||
vError("vgId:%d, msg:%p, failed to process since %s, retry:%d", pVnode->vgId, pWrite, tstrerror(code),
|
||||
pWrite->processedCount);
|
||||
pWrite->processedCount = 1;
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
|
||||
|
||||
需求:
|
||||
1. 可以读lowa的配置文件
|
||||
2. 支持对JNI方式和Restful方式的taos-driver
|
||||
2. 支持JDBC-JNI和JDBC-restful
|
||||
3. 读取配置文件,持续执行查询
|
|
@ -19,14 +19,13 @@ import java.util.Map;
|
|||
|
||||
public class TaosDemoApplication {
|
||||
|
||||
private static Logger logger = Logger.getLogger(TaosDemoApplication.class);
|
||||
private static final Logger logger = Logger.getLogger(TaosDemoApplication.class);
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
// 读配置参数
|
||||
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
|
||||
boolean isHelp = Arrays.asList(args).contains("--help");
|
||||
if (isHelp || config.host == null || config.host.isEmpty()) {
|
||||
// if (isHelp) {
|
||||
JdbcTaosdemoConfig.printHelp();
|
||||
System.exit(0);
|
||||
}
|
||||
|
@ -75,7 +74,7 @@ public class TaosDemoApplication {
|
|||
}
|
||||
}
|
||||
end = System.currentTimeMillis();
|
||||
logger.error(">>> create table time cost : " + (end - start) + " ms.");
|
||||
logger.info(">>> create table time cost : " + (end - start) + " ms.");
|
||||
/**********************************************************************************/
|
||||
// 插入
|
||||
long tableSize = config.numOfTables;
|
||||
|
@ -90,7 +89,7 @@ public class TaosDemoApplication {
|
|||
// multi threads to insert
|
||||
int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config);
|
||||
end = System.currentTimeMillis();
|
||||
logger.error("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms");
|
||||
logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms");
|
||||
/**********************************************************************************/
|
||||
// 删除表
|
||||
if (config.dropTable) {
|
||||
|
@ -108,5 +107,4 @@ public class TaosDemoApplication {
|
|||
return startTime;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -21,27 +21,27 @@ public class DatabaseMapperImpl implements DatabaseMapper {
|
|||
public void createDatabase(String dbname) {
|
||||
String sql = "create database if not exists " + dbname;
|
||||
jdbcTemplate.execute(sql);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dropDatabase(String dbname) {
|
||||
String sql = "drop database if exists " + dbname;
|
||||
jdbcTemplate.update(sql);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createDatabaseWithParameters(Map<String, String> map) {
|
||||
String sql = SqlSpeller.createDatabase(map);
|
||||
jdbcTemplate.execute(sql);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void useDatabase(String dbname) {
|
||||
String sql = "use " + dbname;
|
||||
jdbcTemplate.execute(sql);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,14 +21,14 @@ public class SubTableMapperImpl implements SubTableMapper {
|
|||
@Override
|
||||
public void createUsingSuperTable(SubTableMeta subTableMeta) {
|
||||
String sql = SqlSpeller.createTableUsingSuperTable(subTableMeta);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
jdbcTemplate.execute(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int insertOneTableMultiValues(SubTableValue subTableValue) {
|
||||
String sql = SqlSpeller.insertOneTableMultiValues(subTableValue);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
|
||||
int affectRows = 0;
|
||||
try {
|
||||
|
@ -42,7 +42,7 @@ public class SubTableMapperImpl implements SubTableMapper {
|
|||
@Override
|
||||
public int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) {
|
||||
String sql = SqlSpeller.insertOneTableMultiValuesUsingSuperTable(subTableValue);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
|
||||
int affectRows = 0;
|
||||
try {
|
||||
|
@ -56,7 +56,7 @@ public class SubTableMapperImpl implements SubTableMapper {
|
|||
@Override
|
||||
public int insertMultiTableMultiValues(List<SubTableValue> tables) {
|
||||
String sql = SqlSpeller.insertMultiSubTableMultiValues(tables);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
int affectRows = 0;
|
||||
try {
|
||||
affectRows = jdbcTemplate.update(sql);
|
||||
|
@ -69,7 +69,7 @@ public class SubTableMapperImpl implements SubTableMapper {
|
|||
@Override
|
||||
public int insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables) {
|
||||
String sql = SqlSpeller.insertMultiTableMultiValuesUsingSuperTable(tables);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
int affectRows = 0;
|
||||
try {
|
||||
affectRows = jdbcTemplate.update(sql);
|
||||
|
|
|
@ -18,14 +18,14 @@ public class SuperTableMapperImpl implements SuperTableMapper {
|
|||
@Override
|
||||
public void createSuperTable(SuperTableMeta tableMetadata) {
|
||||
String sql = SqlSpeller.createSuperTable(tableMetadata);
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
jdbcTemplate.execute(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dropSuperTable(String database, String name) {
|
||||
String sql = "drop table if exists " + database + "." + name;
|
||||
logger.info("SQL >>> " + sql);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
jdbcTemplate.execute(sql);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,43 +1,49 @@
|
|||
package com.taosdata.taosdemo.dao;
|
||||
|
||||
import com.taosdata.taosdemo.dao.TableMapper;
|
||||
import com.taosdata.taosdemo.domain.TableMeta;
|
||||
import com.taosdata.taosdemo.domain.TableValue;
|
||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TableMapperImpl implements TableMapper {
|
||||
private static final Logger logger = Logger.getLogger(TableMapperImpl.class);
|
||||
private JdbcTemplate template;
|
||||
|
||||
@Override
|
||||
public void create(TableMeta tableMeta) {
|
||||
String sql = SqlSpeller.createTable(tableMeta);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
template.execute(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int insertOneTableMultiValues(TableValue values) {
|
||||
String sql = SqlSpeller.insertOneTableMultiValues(values);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
return template.update(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int insertOneTableMultiValuesWithColumns(TableValue values) {
|
||||
String sql = SqlSpeller.insertOneTableMultiValuesWithColumns(values);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
return template.update(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int insertMultiTableMultiValues(List<TableValue> tables) {
|
||||
String sql = SqlSpeller.insertMultiTableMultiValues(tables);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
return template.update(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int insertMultiTableMultiValuesWithColumns(List<TableValue> tables) {
|
||||
String sql = SqlSpeller.insertMultiTableMultiValuesWithColumns(tables);
|
||||
logger.debug("SQL >>> " + sql);
|
||||
return template.update(sql);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
### 设置###
|
||||
log4j.rootLogger=error,stdout
|
||||
log4j.rootLogger=info,stdout
|
||||
### 输出信息到控制抬 ###
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.Target=System.out
|
||||
|
|
|
@ -261,9 +261,6 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code)
|
|||
if (code == 0 && tres) {
|
||||
// asynchronous API to fetch a batch of records
|
||||
taos_fetch_rows_a(tres, taos_retrieve_call_back, pTable);
|
||||
|
||||
// taos_fetch_row_a is a less efficient way to retrieve records since it call back app for every row
|
||||
// taos_fetch_row_a(tres, taos_fetch_row_call_back, pTable);
|
||||
}
|
||||
else {
|
||||
printf("%s select failed, code:%d\n", pTable->name, code);
|
||||
|
|
|
@ -93,15 +93,15 @@ void Test(TAOS *taos, char *qstr, int index) {
|
|||
// if (taos_query(taos, qstr)) {
|
||||
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
|
||||
// }
|
||||
TAOS_RES *result = taos_query(taos, qstr);
|
||||
if (result) {
|
||||
TAOS_RES *result1 = taos_query(taos, qstr);
|
||||
if (result1) {
|
||||
printf("insert row: %i\n", i);
|
||||
} else {
|
||||
printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/);
|
||||
taos_free_result(result);
|
||||
taos_free_result(result1);
|
||||
exit(1);
|
||||
}
|
||||
taos_free_result(result);
|
||||
taos_free_result(result1);
|
||||
|
||||
}
|
||||
printf("success to insert rows, total %d rows\n", i);
|
||||
|
|
|
@ -16,6 +16,9 @@ fi
|
|||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
[ -z "$logDir" ] && logDir="/var/log/taos"
|
||||
[ -z "$dataDir" ] && dataDir="/var/lib/taos"
|
||||
|
||||
# Coloured Echoes
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; }
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; }
|
||||
|
|
|
@ -16,6 +16,9 @@ fi
|
|||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
[ -z "$logDir" ] && logDir="/var/log/taos"
|
||||
[ -z "$dataDir" ] && dataDir="/var/lib/taos"
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
|
|
@ -16,6 +16,9 @@ fi
|
|||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
[ -z "$logDir" ] && logDir="/var/log/taos"
|
||||
[ -z "$dataDir" ] && dataDir="/var/lib/taos"
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
|
|
@ -16,6 +16,9 @@ fi
|
|||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
[ -z "$logDir" ] && logDir="/var/log/taos"
|
||||
[ -z "$dataDir" ] && dataDir="/var/lib/taos"
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
|
|
@ -16,6 +16,9 @@ fi
|
|||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
[ -z "$logDir" ] && logDir="/var/log/taos"
|
||||
[ -z "$dataDir" ] && dataDir="/var/lib/taos"
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue