Merge branch 'develop' into feature/TD-1925
This commit is contained in:
commit
a7c6e73fb6
|
@ -79,14 +79,25 @@ pipeline {
|
|||
changeRequest()
|
||||
}
|
||||
parallel {
|
||||
stage('python') {
|
||||
agent{label 'pytest'}
|
||||
stage('python_1') {
|
||||
agent{label 'p1'}
|
||||
steps {
|
||||
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh pytest
|
||||
./test-all.sh p1
|
||||
date'''
|
||||
}
|
||||
}
|
||||
stage('python_2') {
|
||||
agent{label 'p2'}
|
||||
steps {
|
||||
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh p2
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
@ -96,7 +107,7 @@ pipeline {
|
|||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b1
|
||||
./test-all.sh b1fq
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
@ -120,7 +131,7 @@ pipeline {
|
|||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2
|
||||
./test-all.sh b2fq
|
||||
date
|
||||
'''
|
||||
}
|
||||
|
@ -141,7 +152,7 @@ pipeline {
|
|||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b3
|
||||
./test-all.sh b3fq
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ go 1.14
|
|||
require (
|
||||
github.com/jmoiron/sqlx v1.2.0
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/taosdata/driver-go v0.0.0-20200727182616-1a3b1941c206
|
||||
github.com/taosdata/driver-go v0.0.0-20201113094317-050667e5b4d0
|
||||
go.uber.org/zap v1.14.1
|
||||
google.golang.org/appengine v1.6.5 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
|
||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.0.11.0")
|
||||
SET(TD_VER_NUMBER "2.0.12.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -128,5 +128,18 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
|||
|
||||
## [培训和FAQ](https://www.taosdata.com/cn/faq)
|
||||
|
||||
- [FAQ](https://www.taosdata.com/cn/documentation20/faq):常见问题与答案
|
||||
- [应用案列](https://www.taosdata.com/cn/blog/?categories=4):一些使用实例来解释如何使用TDengine
|
||||
<ul>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/12/25/2126.html">技术公开课:开源、高效的物联网大数据平台,TDengine内核技术剖析</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1941.html">TDengine视频教程-快速上手</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1945.html">TDengine视频教程-数据建模</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1961.html">TDengine视频教程-集群搭建</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1951.html">TDengine视频教程-Go Connector</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1955.html">TDengine视频教程-JDBC Connector</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1957.html">TDengine视频教程-NodeJS Connector</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1963.html">TDengine视频教程-Python Connector</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1965.html">TDengine视频教程-RESTful Connector</a></li>
|
||||
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1959.html">TDengine视频教程-“零”代码运维监控</a></li>
|
||||
<li><a l href="https://www.taosdata.com/cn/documentation20/faq">FAQ:常见问题与答案</a></li>
|
||||
<li><a l href="https://www.taosdata.com/cn/blog/?categories=4"> 应用案例:一些使用实例来解释如何使用TDengine</a></li>
|
||||
</ul>
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。
|
|||
- TDengine-server-2.0.10.0-Linux-x64.deb (2.7M)
|
||||
- TDengine-server-2.0.10.0-Linux-x64.tar.gz (4.5M)
|
||||
|
||||
具体的安装过程,请参见<a href="https://www.taosdata.com/blog/2019/08/09/566.html">TDengine多种安装包的安装和卸载</a>。
|
||||
具体的安装过程,请参见<a href="https://www.taosdata.com/blog/2019/08/09/566.html">TDengine多种安装包的安装和卸载</a>以及<a href="https://www.taosdata.com/blog/2020/11/11/1941.html">视频教程</a>。
|
||||
|
||||
## 轻松启动
|
||||
|
||||
|
|
|
@ -59,3 +59,5 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
|
|||
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
|
||||
|
||||
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
|
||||
|
||||
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>。
|
||||
|
|
|
@ -161,7 +161,7 @@ SELECT function<field_name>,…
|
|||
|
||||
超级表聚合查询,TDengine目前支持以下聚合\选择函数:sum、count、avg、first、last、min、max、top、bottom,以及针对全部或部分列的投影操作,使用方式与单表查询的计算过程相同。暂不支持其他类型的聚合计算和四则运算。当前所有的函数及计算过程均不支持嵌套的方式进行执行。
|
||||
|
||||
不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合,结果输出默认是按照时间戳单调递增输出,用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序;使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组,并对每个组内的数据分别进行聚合,输出结果为各个组的聚合结果,组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。
|
||||
不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合,结果输出默认是按照时间戳单调递增输出,用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序;使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组,并对每个组内的数据分别进行聚合,输出结果为各个组的聚合结果,组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。
|
||||
|
||||
使用SLIMIT/SOFFSET语句指定组间分页,即指定结果集中输出的最大组数以及对组起始的位置。使用LIMIT/OFFSET语句指定组内分页,即指定结果集中每个组内最多输出多少条记录以及记录起始的位置。
|
||||
|
||||
|
@ -178,7 +178,7 @@ CREATE TABLE thermometer (ts timestamp, degree double)
|
|||
TAGS(location binary(20), type int)
|
||||
```
|
||||
|
||||
假设有北京,天津和上海三个地区的采集器共4个,温度采集器有3种类型,我们就可以对每个采集器建表如下:
|
||||
假设有北京,天津和上海三个地区的采集器共4个,温度采集器有3种类型,我们就可以对每个采集器建表如下:
|
||||
|
||||
```mysql
|
||||
CREATE TABLE therm1 USING thermometer TAGS (’beijing’, 1);
|
||||
|
|
|
@ -77,15 +77,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
SHOW VARIABLES;
|
||||
```
|
||||
|
||||
|
||||
- **使用数据库**
|
||||
|
||||
|
||||
```mysql
|
||||
USE db_name;
|
||||
```
|
||||
使用/切换数据库
|
||||
|
||||
|
||||
- **删除数据库**
|
||||
```mysql
|
||||
DROP DATABASE [IF EXISTS] db_name;
|
||||
|
@ -120,7 +118,6 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
|
||||
|
||||
|
||||
- **显示系统所有数据库**
|
||||
```mysql
|
||||
SHOW DATABASES;
|
||||
|
@ -128,7 +125,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
## 表管理
|
||||
- **创建数据表**
|
||||
|
||||
|
||||
```mysql
|
||||
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]);
|
||||
```
|
||||
|
@ -153,7 +150,6 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
显示当前数据库下的所有数据表信息。说明:可在like中使用通配符进行名称的匹配。 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’_’下划线匹配一个字符。
|
||||
|
||||
|
||||
- **在线修改显示字符宽度**
|
||||
|
||||
```mysql
|
||||
|
@ -184,18 +180,18 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
## 超级表STable管理
|
||||
- **创建超级表**
|
||||
|
||||
|
||||
```mysql
|
||||
CREATE TABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
|
||||
```
|
||||
创建STable, 与创建表的SQL语法相似,但需指定TAGS字段的名称和类型
|
||||
|
||||
|
||||
说明:
|
||||
1) TAGS 列的数据类型不能是timestamp类型;
|
||||
2) TAGS 列名不能与其他列名相同;
|
||||
3) TAGS 列名不能为预留关键字;
|
||||
4) TAGS 最多允许128个,可以0个,总长度不超过16k个字符
|
||||
|
||||
|
||||
- **删除超级表**
|
||||
|
||||
```mysql
|
||||
|
@ -215,7 +211,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
```mysql
|
||||
DESCRIBE stb_name;
|
||||
```
|
||||
|
||||
|
||||
- **超级表增加列**
|
||||
|
||||
```mysql
|
||||
|
@ -230,11 +226,11 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
## 超级表 STable 中 TAG 管理
|
||||
- **添加标签**
|
||||
|
||||
|
||||
```mysql
|
||||
ALTER TABLE stb_name ADD TAG new_tag_name tag_type;
|
||||
```
|
||||
为STable增加一个新的标签,并指定新标签的类型。标签总数不能超过128个,总长度不超过16k个字符.
|
||||
为STable增加一个新的标签,并指定新标签的类型。标签总数不能超过128个,总长度不超过16k个字符。
|
||||
|
||||
- **删除标签**
|
||||
|
||||
|
@ -265,36 +261,31 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
```
|
||||
向表tb_name中插入一条记录
|
||||
|
||||
|
||||
- **插入一条记录,数据对应到指定的列**
|
||||
```mysql
|
||||
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)
|
||||
```
|
||||
向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。
|
||||
|
||||
|
||||
- **插入多条记录**
|
||||
```mysql
|
||||
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
|
||||
```
|
||||
向表tb_name中插入多条记录
|
||||
|
||||
|
||||
- **按指定的列插入多条记录**
|
||||
```mysql
|
||||
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
|
||||
```
|
||||
向表tb_name中按指定的列插入多条记录
|
||||
|
||||
|
||||
- **向多个表插入多条记录**
|
||||
```mysql
|
||||
INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
|
||||
INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
|
||||
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;
|
||||
```
|
||||
同时向表tb1_name和tb2_name中分别插入多条记录
|
||||
|
||||
|
||||
- **同时向多个表按列插入多条记录**
|
||||
```mysql
|
||||
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...)
|
||||
|
@ -320,7 +311,7 @@ SELECT select_expr [, select_expr ...]
|
|||
[FILL fill_val]
|
||||
[SLIDING fill_val]
|
||||
[GROUP BY col_list]
|
||||
[ORDER BY col_list { DESC | ASC }]
|
||||
[ORDER BY col_list { DESC | ASC }]
|
||||
[SLIMIT limit_val [, SOFFSET offset_val]]
|
||||
[LIMIT limit_val [, OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
|
@ -382,7 +373,6 @@ taos> SELECT * FROM meters;
|
|||
Query OK, 9 row(s) in set (0.002022s)
|
||||
```
|
||||
|
||||
|
||||
通配符支持表名前缀,以下两个SQL语句均为返回全部的列:
|
||||
```mysql
|
||||
SELECT * FROM d1001;
|
||||
|
@ -592,11 +582,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中记录行数或某列的非空值个数。
|
||||
返回结果数据类型:长整型INT64。
|
||||
应用字段:应用全部字段。
|
||||
适用于:表、超级表。
|
||||
说明:1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。2)针对同一表的(不包含NULL值)字段查询结果均相同。3)如果统计对象是具体的列,则返回该列中非NULL值的记录数量。
|
||||
功能说明:统计表/超级表中记录行数或某列的非空值个数。
|
||||
返回结果数据类型:长整型INT64。
|
||||
应用字段:应用全部字段。
|
||||
适用于:表、超级表。
|
||||
说明:1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。2)针对同一表的(不包含NULL值)字段查询结果均相同。3)如果统计对象是具体的列,则返回该列中非NULL值的记录数量。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -613,16 +603,15 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
Query OK, 1 row(s) in set (0.001075s)
|
||||
```
|
||||
|
||||
|
||||
- **AVG**
|
||||
```mysql
|
||||
SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的平均值。
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool字段。
|
||||
适用于:表、超级表。
|
||||
|
||||
功能说明:统计表/超级表中某列的平均值。
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool字段。
|
||||
适用于:表、超级表。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM meters;
|
||||
|
@ -642,8 +631,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT TWA(field_name) FROM tb_name WHERE clause;
|
||||
```
|
||||
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:时间加权平均(time weighted average, TWA)查询需要指定查询时间段的 _开始时间_ 和 _结束时间_ 。
|
||||
适用于:表、超级表。
|
||||
|
@ -652,10 +641,10 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的和。
|
||||
返回结果数据类型:双精度浮点数Double和长整型INT64。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
适用于:表、超级表。
|
||||
功能说明:统计表/超级表中某列的和。
|
||||
返回结果数据类型:双精度浮点数Double和长整型INT64。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
适用于:表、超级表。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -676,9 +665,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的均方差。
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
功能说明:统计表中某列的均方差。
|
||||
返回结果数据类型:双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
适用于:表。
|
||||
|
||||
示例:
|
||||
|
@ -694,10 +683,10 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。
|
||||
返回结果数据类型:字符串表达式(斜率, 截距)。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:自变量是时间戳,因变量是该列的值。
|
||||
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。
|
||||
返回结果数据类型:字符串表达式(斜率, 截距)。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:自变量是时间戳,因变量是该列的值。
|
||||
适用于:表。
|
||||
|
||||
示例:
|
||||
|
@ -715,8 +704,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最小值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
功能说明:统计表/超级表中某列的值最小值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
示例:
|
||||
|
@ -738,8 +727,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最大值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
功能说明:统计表/超级表中某列的值最大值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
|
||||
示例:
|
||||
|
@ -757,14 +746,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
Query OK, 1 row(s) in set (0.000987s)
|
||||
```
|
||||
|
||||
|
||||
- **FIRST**
|
||||
```mysql
|
||||
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最先写入的非NULL值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:所有字段。
|
||||
功能说明:统计表/超级表中某列的值最先写入的非NULL值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:所有字段。
|
||||
说明:1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(*);2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;3) 如果结果集中所有列全部为NULL值,则不返回结果。
|
||||
|
||||
示例:
|
||||
|
@ -786,9 +774,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:所有字段。
|
||||
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:所有字段。
|
||||
说明:1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(*);2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
|
||||
|
||||
示例:
|
||||
|
@ -810,10 +798,10 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
|
||||
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
|
@ -837,9 +825,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
|
||||
|
||||
示例:
|
||||
|
@ -863,9 +851,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值百分比分位数。
|
||||
返回结果数据类型: 双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
功能说明:统计表中某列的值百分比分位数。
|
||||
返回结果数据类型: 双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
|
||||
|
||||
示例:
|
||||
|
@ -881,9 +869,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
|
||||
返回结果数据类型: 双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
|
||||
返回结果数据类型: 双精度浮点数Double。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
|
||||
```mysql
|
||||
taos> SELECT APERCENTILE(current, 20) FROM d1001;
|
||||
|
@ -897,9 +885,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
||||
```
|
||||
功能说明:返回表(超级表)的最后一条记录。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:所有字段。
|
||||
功能说明:返回表(超级表)的最后一条记录。
|
||||
返回结果数据类型:同应用的字段。
|
||||
应用字段:所有字段。
|
||||
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
|
||||
|
||||
示例:
|
||||
|
@ -922,11 +910,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT DIFF(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
功能说明:统计表中某列的值与前一行对应值的差。
|
||||
返回结果数据类型: 同应用字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
功能说明:统计表中某列的值与前一行对应值的差。
|
||||
返回结果数据类型: 同应用字段。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
taos> SELECT DIFF(current) FROM d1001;
|
||||
|
@ -937,14 +925,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
Query OK, 2 row(s) in set (0.001162s)
|
||||
```
|
||||
|
||||
|
||||
- **SPREAD**
|
||||
```mysql
|
||||
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的最大值和最小值之差。
|
||||
返回结果数据类型: 双精度浮点数。
|
||||
应用字段:不能应用在binary、nchar、bool类型字段。
|
||||
功能说明:统计表/超级表中某列的最大值和最小值之差。
|
||||
返回结果数据类型: 双精度浮点数。
|
||||
应用字段:不能应用在binary、nchar、bool类型字段。
|
||||
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
|
||||
|
||||
示例:
|
||||
|
@ -962,15 +949,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
Query OK, 1 row(s) in set (0.000836s)
|
||||
```
|
||||
|
||||
|
||||
- **四则运算**
|
||||
|
||||
```mysql
|
||||
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
|
||||
返回结果数据类型:双精度浮点数。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
|
||||
返回结果数据类型:双精度浮点数。
|
||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||
说明:1)支持两列或多列之间进行计算,可使用括号控制计算优先级;2)NULL字段不参与计算,如果参与计算的某行中包含NULL,该行的计算结果为NULL。
|
||||
|
||||
```mysql
|
||||
|
@ -987,12 +973,12 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:
|
||||
|
||||
```mysql
|
||||
SELECT function_list FROM tb_name
|
||||
SELECT function_list FROM tb_name
|
||||
[WHERE where_condition]
|
||||
INTERVAL (interval [, offset])
|
||||
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
|
||||
|
||||
SELECT function_list FROM stb_name
|
||||
SELECT function_list FROM stb_name
|
||||
[WHERE where_condition]
|
||||
INTERVAL (interval [, offset])
|
||||
[FILL ({ VALUE | PREV | NULL | LINEAR})]
|
||||
|
@ -1000,18 +986,17 @@ SELECT function_list FROM stb_name
|
|||
```
|
||||
|
||||
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
|
||||
- WHERE语句可以指定查询的起止时间和其他过滤条件
|
||||
- WHERE语句可以指定查询的起止时间和其他过滤条件
|
||||
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
||||
1. 不进行填充:NONE(默认填充模式)。
|
||||
|
||||
2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
|
||||
|
||||
3. NULL填充:使用NULL填充数据。例如:fill(null)。
|
||||
|
||||
4. PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。
|
||||
|
||||
|
||||
说明:
|
||||
2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
|
||||
|
||||
3. NULL填充:使用NULL填充数据。例如:fill(null)。
|
||||
|
||||
4. PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。
|
||||
|
||||
说明:
|
||||
1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
|
||||
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
|
||||
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了group by语句分组,则返回结果中每个group内不按照时间序列严格单调递增。
|
||||
|
@ -1040,8 +1025,6 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
|
|||
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
|
||||
|
||||
|
||||
## TAOS SQL其他约定
|
||||
|
||||
**group by的限制**
|
||||
|
@ -1054,6 +1037,4 @@ TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持
|
|||
|
||||
**is not null与不为空的表达式适用范围**
|
||||
|
||||
is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
|
||||
|
||||
|
||||
is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
|
|
@ -1,6 +1,5 @@
|
|||
# TDengine 2.0 错误码以及对应的十进制码
|
||||
|
||||
|
||||
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|
||||
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|
||||
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
Memory Size = maxVgroupsPerDb * (blocks * cache + 10Mb) + numOfTables * (tagSizePerTable + 0.5Kb)
|
||||
```
|
||||
|
||||
示例:假设是4核机器,cache是缺省大小16M, blocks是缺省值6,假设有10万张表,标签总长度是256字节,则总的内存需求为:4\*(16\*6+10) + 100000*(0.25+0.5)/1000 = 499M。
|
||||
示例:假设是4核机器,cache是缺省大小16M, blocks是缺省值6,假设有10万张表,标签总长度是256字节,则总的内存需求为:4\*(16\*6+10) + 100000*(0.25+0.5)/1000 = 499M。
|
||||
|
||||
实际运行的系统往往会根据数据特点的不同,将数据存放在不同的DB里。因此做规划时,也需要考虑。
|
||||
|
||||
|
@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景
|
|||
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
|
||||
```
|
||||
|
||||
示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
|
||||
示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
|
||||
|
||||
用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。
|
||||
|
||||
|
@ -121,7 +121,7 @@ taosd -C
|
|||
- replica:副本个数,取值范围:1-3。单位为个,默认值:1
|
||||
- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms
|
||||
|
||||
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
|
||||
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
|
||||
|
||||
```
|
||||
create database demo days 10 cache 32 blocks 8 replica 3;
|
||||
|
@ -148,8 +148,8 @@ ALTER DNODE <dnode_id> <config>
|
|||
```
|
||||
|
||||
- dnode_id: 可以通过SQL语句"SHOW DNODES"命令获取
|
||||
- config: 要调整的日志参数,在如下列表中取值
|
||||
> resetlog 截断旧日志文件,创建一个新日志文件
|
||||
- config: 要调整的日志参数,在如下列表中取值
|
||||
> resetlog 截断旧日志文件,创建一个新日志文件
|
||||
> debugFlag < 131 | 135 | 143 > 设置debugFlag为131、135或者143
|
||||
|
||||
例如:
|
||||
|
@ -157,9 +157,9 @@ ALTER DNODE <dnode_id> <config>
|
|||
alter dnode 1 debugFlag 135;
|
||||
```
|
||||
|
||||
## 客户端配置
|
||||
## 客户端配置
|
||||
|
||||
TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](https://www.taosdata.com/cn/documentation/administrator/#_TDengine_Shell命令行程序)。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
|
||||
TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见<a href="https://www.taosdata.com/cn/documentation/administrator/#_TDengine_Shell命令行程序">Shell命令行程序</a>。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
|
||||
|
||||
**2.0.10.0 之后版本支持命令行以下参数显示当前客户端参数的配置**
|
||||
|
||||
|
@ -176,7 +176,7 @@ taos -C 或 taos --dump-config
|
|||
- locale
|
||||
|
||||
默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
|
||||
|
||||
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。
|
||||
|
||||
客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。
|
||||
|
@ -186,7 +186,7 @@ taos -C 或 taos --dump-config
|
|||
- charset
|
||||
|
||||
默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
|
||||
|
||||
如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。
|
||||
|
||||
在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如:
|
||||
|
@ -242,12 +242,11 @@ taos -C 或 taos --dump-config
|
|||
为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
|
||||
|
||||
启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。
|
||||
|
||||
|
||||
- maxBinaryDisplayWidth
|
||||
|
||||
Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。
|
||||
|
||||
|
||||
## 用户管理
|
||||
|
||||
系统管理员可以在CLI界面里添加、删除用户,也可以修改密码。CLI里SQL语法如下:
|
||||
|
@ -280,7 +279,7 @@ ALTER USER <user_name> PRIVILEGE <super|write|read>;
|
|||
SHOW USERS;
|
||||
```
|
||||
|
||||
显示所有用户
|
||||
显示所有用户
|
||||
|
||||
**注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身
|
||||
|
||||
|
@ -428,8 +427,6 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
|
||||
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。
|
||||
|
||||
|
||||
|
||||
## TDengine参数限制与保留关键字
|
||||
|
||||
- 数据库名:不能包含“.”以及特殊字符,不能超过32个字符
|
||||
|
@ -448,8 +445,6 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
- 库的个数:仅受节点个数限制
|
||||
- 单个库上虚拟节点个数:不能超过64个
|
||||
|
||||
|
||||
|
||||
目前TDengine有将近200个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable名、数据列名及标签列名等。这些关键字列表如下:
|
||||
|
||||
| 关键字列表 | | | | |
|
||||
|
@ -489,5 +484,4 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
| COMP | GE | METRIC | SELECT | VIEW |
|
||||
| CONCAT | GLOB | METRICS | SEMI | WAVG |
|
||||
| CONFIGS | GRANTS | MIN | SET | WHERE |
|
||||
| CONFLICT | GROUP | | | |
|
||||
|
||||
| CONFLICT | GROUP | | | |
|
|
@ -1,4 +1,4 @@
|
|||
#数据模型和整体架构
|
||||
# 数据模型和整体架构
|
||||
|
||||
## 数据模型
|
||||
### 物联网典型场景
|
||||
|
@ -102,7 +102,7 @@
|
|||
|
||||
每一条记录都有设备ID,时间戳,采集的物理量(如上图中的电流、电压、相位),还有与每个设备相关的静态标签(如上述表一中的位置Location和分组groupId)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
|
||||
|
||||
### 数据特征
|
||||
### 数据特征
|
||||
除时序特征外,仔细研究发现,物联网、车联网、运维监测类数据还具有很多其他明显的特征:
|
||||
|
||||
1. 数据高度结构化;
|
||||
|
@ -121,7 +121,7 @@
|
|||
### 关系型数据库模型
|
||||
因为采集的数据一般是结构化数据,同时为降低学习门槛,TDengine采用传统的关系型数据库模型管理数据。因此用户需要先创建库,然后创建表,之后才能插入或查询数据。TDengine采用的是结构化存储,而不是NoSQL的key-value存储。
|
||||
|
||||
### 一个数据采集点一张表
|
||||
### 一个数据采集点一张表
|
||||
为充分利用其数据的时序性和其他数据特点,TDengine要求**对每个数据采集点单独建表**(比如有一千万个智能电表,就需创建一千万张表,上述表格中的d1001, d1002, d1003, d1004都需单独建表),用来存储这个采集点所采集的时序数据。这种设计有几大优点:
|
||||
|
||||
1. 能保证一个采集点的数据在存储介质上是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。
|
||||
|
@ -150,7 +150,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
|||
<center> 图 1 TDengine架构示意图 </center>
|
||||
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
|
||||
|
||||
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》。
|
||||
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文<a href="https://www.taosdata.com/blog/2020/09/11/1824.html">《一篇文章说清楚TDengine的FQDN》</a>。
|
||||
|
||||
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
|
||||
|
||||
|
@ -356,7 +356,7 @@ SQL语句的解析和校验工作在客户端完成。解析SQL语句并生成
|
|||
|
||||
客户端在获取查询结果的时候,dnode的查询执行队列中的工作线程会等待vnode执行线程执行完成,才能将查询结果返回到请求的客户端。
|
||||
|
||||
### 按时间轴聚合、降采样、插值
|
||||
### 按时间轴聚合、降采样、插值
|
||||
|
||||
时序数据有别于普通数据的显著特征是每条记录均具有时间戳,因此针对具有时间戳数据在时间轴上进行聚合是不同于普通数据库的重要功能。从这点上来看,与流计算引擎的窗口查询有相似的地方。
|
||||
|
||||
|
|
|
@ -226,3 +226,5 @@ SHOW MNODES;
|
|||
如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
|
||||
|
||||
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
|
||||
|
||||
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>。
|
||||
|
|
|
@ -65,8 +65,6 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
|||
|
||||
**提示: 如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。**
|
||||
|
||||
|
||||
|
||||
**Windows x64/x86**
|
||||
|
||||
**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载 :**
|
||||
|
@ -100,8 +98,6 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
|||
|
||||
**2.卸载:运行unins000.exe可卸载TDengine应用驱动。**
|
||||
|
||||
|
||||
|
||||
**安装验证**
|
||||
|
||||
以上安装和配置完成后,并确认TDengine服务已经正常启动运行,此时可以执行taos客户端进行登录。
|
||||
|
@ -140,8 +136,6 @@ taos>
|
|||
taos>
|
||||
```
|
||||
|
||||
|
||||
|
||||
## C/C++ Connector
|
||||
|
||||
**C/C++连接器支持的系统有**:
|
||||
|
@ -151,8 +145,6 @@ taos>
|
|||
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
|
||||
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** |
|
||||
|
||||
|
||||
|
||||
C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 _taos.h_(安装后,位于 _/usr/local/taos/include_):
|
||||
|
||||
```C
|
||||
|
@ -166,7 +158,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c。
|
||||
|
||||
|
||||
### 基础API
|
||||
|
||||
基础API用于完成创建数据库连接等工作,为其它API的执行提供运行时环境。
|
||||
|
@ -175,22 +166,18 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
初始化运行环境。如果应用没有主动调用该API,那么应用在调用`taos_connect`时将自动调用,故应用程序一般无需手动调用该API。
|
||||
|
||||
|
||||
- `void taos_cleanup()`
|
||||
|
||||
清理运行环境,应用退出前应调用此API。
|
||||
|
||||
|
||||
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
|
||||
|
||||
设置客户端选项,目前只支持时区设置(_TSDB_OPTION_TIMEZONE_)和编码设置(_TSDB_OPTION_LOCALE_)。时区和编码默认为操作系统当前设置。
|
||||
|
||||
|
||||
- `char *taos_get_client_info()`
|
||||
|
||||
获取客户端版本信息。
|
||||
|
||||
|
||||
- `TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, int port)`
|
||||
|
||||
创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含:
|
||||
|
@ -200,26 +187,21 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
- pass:密码
|
||||
- db:数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库
|
||||
- port:端口号
|
||||
|
||||
返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。
|
||||
|
||||
返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。
|
||||
|
||||
- `char *taos_get_server_info(TAOS *taos)`
|
||||
|
||||
获取服务端版本信息。
|
||||
|
||||
|
||||
- `int taos_select_db(TAOS *taos, const char *db)`
|
||||
|
||||
将当前的缺省数据库设置为`db`。
|
||||
|
||||
|
||||
- `void taos_close(TAOS *taos)`
|
||||
|
||||
关闭连接, 其中`taos`是`taos_connect`函数返回的指针。
|
||||
|
||||
|
||||
|
||||
### 同步查询API
|
||||
|
||||
传统的数据库操作API,都属于同步操作。应用调用API后,一直处于阻塞状态,直到服务器返回结果。TDengine支持如下API:
|
||||
|
@ -228,37 +210,30 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
该API用来执行SQL语句,可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。返回值 NULL 表示失败。
|
||||
|
||||
|
||||
- `int taos_result_precision(TAOS_RES *res)`
|
||||
|
||||
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。
|
||||
|
||||
|
||||
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
|
||||
|
||||
按行获取查询结果集中的数据。
|
||||
|
||||
|
||||
- `int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)`
|
||||
|
||||
批量获取查询结果集中的数据,返回值为获取到的数据的行数。
|
||||
|
||||
|
||||
- `int taos_num_fields(TAOS_RES *res)` 和 `int taos_field_count(TAOS_RES *res)`
|
||||
|
||||
这两个API等价,用于获取查询结果集中的列数。
|
||||
|
||||
|
||||
- `int* taos_fetch_lengths(TAOS_RES *res)`
|
||||
|
||||
获取结果集中每个字段的长度。 返回值是一个数组,其长度为结果集的列数。
|
||||
|
||||
|
||||
- `int taos_affected_rows(TAOS_RES *res)`
|
||||
|
||||
获取被所执行的 SQL 语句影响的行数。
|
||||
|
||||
|
||||
- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
|
||||
|
||||
获取查询结果集每列数据的属性(数据类型、名字、字节数),与taos_num_fileds配合使用,可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下:
|
||||
|
@ -271,30 +246,24 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
} TAOS_FIELD;
|
||||
```
|
||||
|
||||
|
||||
- `void taos_stop_query(TAOS_RES *res)`
|
||||
|
||||
停止一个查询的执行。
|
||||
|
||||
|
||||
- `void taos_free_result(TAOS_RES *res)`
|
||||
|
||||
释放查询结果集以及相关的资源。查询完成后,务必调用该API释放资源,否则可能导致应用内存泄露。
|
||||
|
||||
|
||||
- `char *taos_errstr(TAOS_RES *res)`
|
||||
|
||||
获取最近一次API调用失败的原因,返回值为字符串。
|
||||
|
||||
|
||||
- `char *taos_errno(TAOS_RES *res)`
|
||||
|
||||
获取最近一次API调用失败的原因,返回值为错误代码。
|
||||
|
||||
|
||||
**注意**:对于每个数据库应用,2.0及以上版本 TDengine 推荐只建立一个连接。同时在应用中将该连接 (TAOS*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性。C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。
|
||||
|
||||
|
||||
### 异步查询API
|
||||
|
||||
同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。
|
||||
|
@ -319,7 +288,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
* res:`taos_query_a`回调时返回的结果集
|
||||
* fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。
|
||||
|
||||
|
||||
- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
|
||||
|
||||
异步获取一条记录。其中:
|
||||
|
@ -329,7 +297,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
|
||||
|
||||
|
||||
### 参数绑定API
|
||||
|
||||
除了直接调用 `taos_query` 进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号`?`来代表待绑定的参数,具体如下:
|
||||
|
@ -369,12 +336,11 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
|
|||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||
|
||||
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。
|
||||
|
||||
|
||||
- `int taos_stmt_close(TAOS_STMT *stmt)`
|
||||
|
||||
执行完毕,释放所有资源。
|
||||
|
||||
|
||||
### 连续查询接口
|
||||
|
||||
TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下:
|
||||
|
@ -388,14 +354,12 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
|||
* stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)
|
||||
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
|
||||
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
|
||||
|
||||
返回值为NULL,表示创建成功,返回值不为空,表示成功。
|
||||
|
||||
返回值为NULL,表示创建成功,返回值不为空,表示成功。
|
||||
|
||||
- `void taos_close_stream (TAOS_STREAM *tstr)`
|
||||
|
||||
关闭数据流,其中提供的参数是taos_open_stream的返回值。用户停止流式计算的时候,务必关闭该数据流。
|
||||
|
||||
|
||||
### 数据订阅接口
|
||||
|
||||
|
@ -420,7 +384,6 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
|||
* param:调用 `taos_subscribe`时客户程序提供的附加参数
|
||||
* code:错误码
|
||||
|
||||
|
||||
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
|
||||
|
||||
同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。
|
||||
|
@ -429,11 +392,12 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
|||
|
||||
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
|
||||
|
||||
|
||||
## Python Connector
|
||||
|
||||
Python连接器的使用参见<a href="https://www.taosdata.com/blog/2020/11/11/1963.html">视频教程</a>
|
||||
|
||||
### 安装准备
|
||||
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤)。
|
||||
* 应用驱动安装请参考<a href="https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤">安装连接器驱动步骤</a>。
|
||||
* 已安装python 2.7 or >= 3.4
|
||||
* 已安装pip 或 pip3
|
||||
|
||||
|
@ -471,7 +435,7 @@ python -m pip install python3\
|
|||
* 导入TDengine客户端模块
|
||||
|
||||
```python
|
||||
import taos
|
||||
import taos
|
||||
```
|
||||
* 获取连接并获取游标对象
|
||||
```python
|
||||
|
@ -483,7 +447,7 @@ c1 = conn.cursor()
|
|||
* 写入数据
|
||||
```python
|
||||
import datetime
|
||||
|
||||
|
||||
# 创建数据库
|
||||
c1.execute('create database db')
|
||||
c1.execute('use db')
|
||||
|
@ -511,7 +475,7 @@ numOfRows = c1.rowcount
|
|||
numOfCols = len(c1.description)
|
||||
for irow in range(numOfRows):
|
||||
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
|
||||
|
||||
|
||||
# 直接使用cursor 循环拉取查询结果
|
||||
c1.execute('select * from tb')
|
||||
for data in c1:
|
||||
|
@ -537,7 +501,6 @@ for d in data:
|
|||
sub.close()
|
||||
```
|
||||
|
||||
|
||||
* 关闭连接
|
||||
```python
|
||||
c1.close()
|
||||
|
@ -561,7 +524,6 @@ conn.close()
|
|||
|
||||
用于生成taos.TDengineConnection的实例。
|
||||
|
||||
|
||||
### Python客户端使用示例代码
|
||||
在tests/examples/python中,我们提供了一个示例Python程序read_example.py,可以参考这个程序来设计用户自己的写入、查询程序。在安装了对应的客户端后,通过import taos引入taos类。主要步骤如下
|
||||
|
||||
|
@ -574,7 +536,7 @@ conn.close()
|
|||
|
||||
## RESTful Connector
|
||||
|
||||
为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。
|
||||
为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见<a href=https://www.taosdata.com/blog/2020/11/11/1965.html>视频教程</a>。
|
||||
|
||||
### HTTP请求格式
|
||||
|
||||
|
@ -772,7 +734,7 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86
|
|||
|
||||
### 安装准备
|
||||
|
||||
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤)。
|
||||
* 应用驱动安装请参考<a href="https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤">安装连接器驱动步骤</a>。
|
||||
* .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
|
||||
* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。
|
||||
|
||||
|
@ -811,16 +773,15 @@ https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos
|
|||
https://www.taosdata.com/blog/2020/11/02/1901.html
|
||||
```
|
||||
|
||||
|
||||
## Go Connector
|
||||
|
||||
### 安装准备
|
||||
|
||||
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤)。
|
||||
* 应用驱动安装请参考<a href="https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤">安装连接器驱动步骤</a>。
|
||||
|
||||
TDengine提供了GO驱动程序`taosSql`。 `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`。
|
||||
|
||||
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go。
|
||||
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及<a href="https://www.taosdata.com/blog/2020/11/11/1951.html">视频教程</a>。
|
||||
|
||||
```Go
|
||||
import (
|
||||
|
@ -837,7 +798,7 @@ go env -w GOPROXY=https://goproxy.io,direct
|
|||
|
||||
### 常用API
|
||||
|
||||
- sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
|
||||
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
|
||||
|
||||
该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
|
||||
|
||||
|
@ -876,45 +837,47 @@ Node.js连接器支持的系统有:
|
|||
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
|
||||
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
|
||||
|
||||
Node.js连接器的使用参见<a href="https://www.taosdata.com/blog/2020/11/11/1957.html">视频教程</a>
|
||||
|
||||
### 安装准备
|
||||
|
||||
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤)。
|
||||
* 应用驱动安装请参考<a href="https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤">安装连接器驱动步骤</a>。
|
||||
|
||||
### 安装Node.js连接器
|
||||
|
||||
用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下:
|
||||
用户可以通过<a href="https://www.npmjs.com/">npm</a>来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下:
|
||||
|
||||
首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器.
|
||||
首先,通过<a href="https://www.npmjs.com/">npm</a>安装node.js 连接器.
|
||||
|
||||
```bash
|
||||
npm install td2.0-connector
|
||||
```
|
||||
我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下
|
||||
|
||||
我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
|
||||
我们使用<a href="https://github.com/nodejs/node-gyp">node-gyp</a>和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
|
||||
|
||||
### Linux
|
||||
|
||||
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
|
||||
- `node` 必须采用v10.x版本,其他版本存在包兼容性的问题。
|
||||
- `make`
|
||||
- c语言编译器比如[GCC](https://gcc.gnu.org)
|
||||
- c语言编译器比如<a href="https://gcc.gnu.org">GCC</a>
|
||||
|
||||
### Windows
|
||||
|
||||
#### 安装方法1
|
||||
|
||||
使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具
|
||||
使用微软的<a href="https://github.com/felixrieseberg/windows-build-tools">windows-build-tools</a>在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具
|
||||
|
||||
#### 安装方法2
|
||||
|
||||
手动安装以下工具:
|
||||
|
||||
- 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
|
||||
- 安装 [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
|
||||
- 安装Visual Studio相关:<a href="https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools>Visual Studio Build 工具</a> 或者 <a href="https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community">Visual Studio 2017 Community</a>
|
||||
- 安装 <a href="https://www.python.org/downloads/">Python</a> 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
|
||||
- 进入`cmd`命令行界面, `npm config set msvs_version 2017`
|
||||
|
||||
如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
|
||||
如果以上步骤不能成功执行, 可以参考微软的node.js用户手册<a href="https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules">Microsoft's Node.js Guidelines for Windows</a>
|
||||
|
||||
如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64".
|
||||
|
||||
|
@ -925,8 +888,6 @@ npm install td2.0-connector
|
|||
Node-example.js node.js示例源程序
|
||||
Node-example-raw.js
|
||||
|
||||
|
||||
|
||||
### 安装验证
|
||||
|
||||
在安装好TDengine客户端后,使用nodejsChecker.js程序能够验证当前环境是否支持nodejs方式访问Tdengine。
|
||||
|
@ -948,11 +909,11 @@ node nodejsChecker.js host=localhost
|
|||
### Node.js连接器的使用
|
||||
|
||||
(http://docs.taosdata.com/node)
|
||||
以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[该文档](http://docs.taosdata.com/node)
|
||||
以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考<a href="http://docs.taosdata.com/node">该文档</a>
|
||||
|
||||
#### 建立连接
|
||||
|
||||
使用node.js连接器时,必须先<em>require</em> ```td2.0-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信
|
||||
使用node.js连接器时,必须先<em>require</em> `td2.0-connector`,然后使用 `taos.connect` 函数。`taos.connect` 函数必须提供的参数是`host`,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化`cursor` 来和TDengine服务端通信
|
||||
|
||||
```javascript
|
||||
const taos = require('td2.0-connector');
|
||||
|
@ -988,13 +949,13 @@ TDengine目前还不支持update和delete语句。
|
|||
|
||||
#### 查询
|
||||
|
||||
可通过 ```cursor.query``` 函数来查询数据库。
|
||||
可通过 `cursor.query` 函数来查询数据库。
|
||||
|
||||
```javascript
|
||||
var query = cursor.query('show databases;')
|
||||
```
|
||||
|
||||
查询的结果可以通过 ```query.execute()``` 函数获取并打印出来
|
||||
查询的结果可以通过 `query.execute()` 函数获取并打印出来
|
||||
|
||||
```javascript
|
||||
var promise = query.execute();
|
||||
|
@ -1002,7 +963,7 @@ promise.then(function(result) {
|
|||
result.pretty();
|
||||
});
|
||||
```
|
||||
格式化查询语句还可以使用```query```的```bind```方法。如下面的示例:```query```会自动将提供的数值填入查询语句的```?```里。
|
||||
格式化查询语句还可以使用`query`的`bind`方法。如下面的示例:`query`会自动将提供的数值填入查询语句的`?`里。
|
||||
|
||||
```javascript
|
||||
var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
|
||||
|
@ -1010,8 +971,7 @@ query.execute().then(function(result) {
|
|||
result.pretty();
|
||||
})
|
||||
```
|
||||
如果在```query```语句里提供第二个参数并设为```true```也可以立即获取查询结果。如下:
|
||||
|
||||
如果在`query`语句里提供第二个参数并设为`true`也可以立即获取查询结果。如下:
|
||||
|
||||
```javascript
|
||||
var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
|
||||
|
@ -1032,9 +992,7 @@ promise2.then(function(result) {
|
|||
})
|
||||
```
|
||||
|
||||
|
||||
### 示例
|
||||
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
|
||||
|
||||
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
|
||||
<a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js">这里</a>提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
|
||||
|
||||
<a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js">这里</a>同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
|
||||
|
|
|
@ -1,22 +1,23 @@
|
|||
# Java Connector
|
||||
|
||||
**Java连接器支持的系统有:**
|
||||
|
||||
| **CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
|
||||
Java连接器支持的系统有:
|
||||
| **CPU类型** | x64(64bit) | | | ARM64 | ARM32 |
|
||||
| ------------ | ------------ | -------- | -------- | -------- | -------- |
|
||||
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
|
||||
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
|
||||
|
||||
Java连接器的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>。
|
||||
|
||||
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
|
||||
|
||||
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
|
||||
|
||||
* libtaos.so
|
||||
* libtaos.so
|
||||
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
|
||||
|
||||
* taos.dll
|
||||
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
|
||||
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
|
||||
|
||||
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
|
||||
|
@ -26,7 +27,6 @@ TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一
|
|||
* 目前不支持表间的 union 操作。
|
||||
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
|
||||
|
||||
|
||||
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
||||
|
||||
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
||||
|
@ -75,7 +75,6 @@ maven 项目中使用如下 pom.xml 配置即可:
|
|||
|
||||
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
|
||||
|
||||
|
||||
## 使用说明
|
||||
|
||||
### 获取连接
|
||||
|
@ -217,7 +216,6 @@ while(resultSet.next()){
|
|||
```
|
||||
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
||||
|
||||
|
||||
### 订阅
|
||||
|
||||
#### 创建
|
||||
|
@ -232,7 +230,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met
|
|||
* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
|
||||
* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
|
||||
|
||||
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
|
||||
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
|
||||
|
||||
#### 消费数据
|
||||
|
||||
|
@ -260,7 +258,6 @@ sub.close(true);
|
|||
|
||||
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
|
||||
|
||||
|
||||
### 关闭资源
|
||||
|
||||
```java
|
||||
|
@ -268,7 +265,7 @@ resultSet.close();
|
|||
stmt.close();
|
||||
conn.close();
|
||||
```
|
||||
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
|
||||
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
|
||||
|
||||
## 与连接池使用
|
||||
|
||||
|
@ -294,18 +291,18 @@ conn.close();
|
|||
config.setMinimumIdle(3); //minimum number of idle connection
|
||||
config.setMaximumPoolSize(10); //maximum number of connection in the pool
|
||||
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
|
||||
config.setIdleTimeout(60000); // max idle time for recycle idle connection
|
||||
config.setIdleTimeout(60000); // max idle time for recycle idle connection
|
||||
config.setConnectionTestQuery("describe log.dn"); //validation query
|
||||
config.setValidationTimeout(3000); //validation query timeout
|
||||
|
||||
HikariDataSource ds = new HikariDataSource(config); //create datasource
|
||||
|
||||
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
|
||||
//query or insert
|
||||
|
||||
//query or insert
|
||||
// ...
|
||||
|
||||
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
|
@ -347,7 +344,7 @@ public static void main(String[] args) throws Exception {
|
|||
properties.put("testWhileIdle","true"); // test connection while idle
|
||||
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
|
||||
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
|
||||
|
||||
|
||||
//create druid datasource
|
||||
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
|
@ -381,15 +378,15 @@ Query OK, 1 row(s) in set (0.000141s)
|
|||
## 常见问题
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
|
||||
|
||||
**原因**:程序没有找到依赖的本地函数库 taos。
|
||||
|
||||
|
||||
**解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
|
||||
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
|
||||
**原因**:目前 TDengine 只支持 64 位 JDK。
|
||||
|
||||
|
||||
**解决方法**:重新安装 64 位 JDK。
|
||||
|
||||
* 其它问题请参考 [Issues][7]
|
||||
|
@ -404,7 +401,7 @@ Query OK, 1 row(s) in set (0.000141s)
|
|||
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[10]: https://maven.aliyun.com/mvn/search
|
||||
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
|
||||
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
|
||||
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
|
||||
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
|
||||
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
|
||||
|
|
|
@ -27,43 +27,40 @@
|
|||
* 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
|
||||
* 本地虚拟机:检查网络能否ping通,尽量避免使用`localhost` 作为hostname
|
||||
* 公司服务器:如果为NAT网络环境,请务必检查服务器能否将消息返回值客户端
|
||||
|
||||
|
||||
2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
|
||||
|
||||
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
|
||||
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:<a href="https://www.taosdata.com/blog/2020/09/11/1824.html">一篇文章说清楚TDengine的FQDN</a>。
|
||||
|
||||
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
|
||||
|
||||
6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认TCP/UDP 端口6030-6042 是打开的
|
||||
|
||||
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
|
||||
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
|
||||
|
||||
9. 如果仍不能排除连接故障
|
||||
|
||||
|
||||
* Linux 系统请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
|
||||
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
|
||||
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
|
||||
检查客户端侧TCP端口连接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
|
||||
* Windows 系统请使用 PowerShell 命令 Net-TestConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
||||
|
||||
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
|
||||
|
||||
|
||||
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):<a href="https://www.taosdata.com/blog/2020/09/08/1816.html">TDengine 内嵌网络检测工具使用指南</a>。
|
||||
|
||||
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办?
|
||||
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
|
||||
|
||||
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:<a href="https://www.taosdata.com/blog/2020/09/11/1824.html">一篇文章说清楚TDengine的FQDN</a>。
|
||||
2. 如果网络配置有DNS server, 请检查是否正常工作
|
||||
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。
|
||||
4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的
|
||||
|
||||
|
||||
## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
|
||||
|
||||
如果你确认语法正确,2.0之前版本,请检查SQL语句长度是否超过64K。如果超过,也会返回这个错误。
|
||||
|
@ -86,7 +83,7 @@ TDengine还没有一组专用的validation queries。然而建议你使用系统
|
|||
|
||||
## 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
||||
|
||||
windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
|
||||
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
|
||||
```JAVA
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
|
@ -94,7 +91,7 @@ properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
|||
Connection = DriverManager.getConnection(url, properties);
|
||||
```
|
||||
## 12.TDengine GO windows驱动的如何编译?
|
||||
请看为此问题撰写的<a href='blog/2020/01/06/tdengine-go-windows驱动的编译/'>技术博客
|
||||
请看为此问题撰写的<a href='blog/2020/01/06/tdengine-go-windows驱动的编译/'>技术博客</a>
|
||||
|
||||
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
|
||||
请更新至最新的JDBC驱动
|
||||
|
@ -105,18 +102,14 @@ Connection = DriverManager.getConnection(url, properties);
|
|||
<version>2.0.4</version>
|
||||
</dependency>
|
||||
```
|
||||
## 14. taos connect failed, reason: invalid timestamp
|
||||
## 14. taos connect failed, reason: invalid timestamp
|
||||
|
||||
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
|
||||
|
||||
|
||||
|
||||
## 15. 表名显示不全
|
||||
|
||||
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
|
||||
|
||||
|
||||
|
||||
## 16. 如何进行数据迁移?
|
||||
|
||||
TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事:
|
||||
|
@ -125,13 +118,11 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A
|
|||
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
|
||||
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
|
||||
|
||||
|
||||
|
||||
## 17. 怎么报告问题?
|
||||
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
1. /var/log/taos
|
||||
2. /etc/taos
|
||||
2. /etc/taos
|
||||
|
||||
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
|
||||
|
||||
|
|
|
@ -28,10 +28,10 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
|
|||
- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。
|
||||
|
||||
## Prometheus直接写入
|
||||
[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
|
||||
<a href="https://www.prometheus.io/">Prometheus</a>作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具<a href="https://github.com/taosdata/Bailongma">Bailongma</a>,只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文<a href="https://www.taosdata.com/blog/2020/02/03/1189.html">用Docker容器快速搭建一个Devops监控Demo</a>即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
|
||||
|
||||
### 从源代码编译blm_prometheus
|
||||
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
|
||||
用户需要从github下载<a href="https://github.com/taosdata/Bailongma">Bailongma</a>的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
|
||||
- Linux操作系统的服务器
|
||||
- 安装好Golang, 1.10版本以上
|
||||
- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在bailongma所在的linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
|
||||
|
@ -45,10 +45,10 @@ go build
|
|||
一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。
|
||||
|
||||
### 安装Prometheus
|
||||
通过Prometheus的官网下载安装。[下载地址](https://prometheus.io/download/)
|
||||
通过Prometheus的官网下载安装。<a href="https://prometheus.io/download/">下载地址</a>
|
||||
|
||||
### 配置Prometheus
|
||||
参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的<remote_write>部分,增加以下配置
|
||||
参考Prometheus的<a href="https://prometheus.io/docs/prometheus/latest/configuration/configuration/">配置文档</a>,在Prometheus的配置文件中的<remote_write>部分,增加以下配置
|
||||
|
||||
- url: bailongma API服务提供的URL, 参考下面的blm_prometheus启动示例章节
|
||||
|
||||
|
@ -60,7 +60,7 @@ blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通
|
|||
--tdengine-name
|
||||
如果TDengine安装在一台具备域名的服务器上,也可以通过配置TDengine的域名来访问TDengine。在K8S环境下,可以配置成TDengine所运行的service name
|
||||
|
||||
--batch-size
|
||||
--batch-size
|
||||
blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
|
||||
|
||||
--dbname
|
||||
|
@ -113,10 +113,10 @@ select * from apiserver_request_latencies_bucket;
|
|||
```
|
||||
|
||||
## Telegraf直接写入
|
||||
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
|
||||
<a href="https://www.influxdata.com/time-series-platform/telegraf/"Telegraf</a>是一流行的IT运维数据采集开源工具,TDengine提供一个小工具<a href="https://github.com/taosdata/Bailongma">Bailongma</a>,只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文<a href="https://www.taosdata.com/blog/2020/02/03/1189.html">用Docker容器快速搭建一个Devops监控Demo</a>即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
|
||||
|
||||
### 从源代码编译blm_telegraf
|
||||
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
|
||||
用户需要从github下载<a href="https://github.com/taosdata/Bailongma">Bailongma</a>的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
|
||||
|
||||
- Linux操作系统的服务器
|
||||
- 安装好Golang, 1.10版本以上
|
||||
|
@ -137,7 +137,7 @@ go build
|
|||
### 配置Telegraf
|
||||
修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。
|
||||
|
||||
在output plugins部分,增加[[outputs.http]]配置项:
|
||||
在output plugins部分,增加[[outputs.http]]配置项:
|
||||
|
||||
- url: bailongma API服务提供的URL, 参考下面的启动示例章节
|
||||
- data_format: "json"
|
||||
|
@ -148,16 +148,16 @@ go build
|
|||
- hostname: 区分不同采集设备的机器名称,需确保其唯一性
|
||||
- metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。
|
||||
|
||||
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。
|
||||
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的<a href="https://docs.influxdata.com/telegraf/v1.11/">文档</a>。
|
||||
|
||||
### 启动blm_telegraf程序
|
||||
blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。
|
||||
|
||||
```sh
|
||||
--host
|
||||
--host
|
||||
TDengine服务端的IP地址,缺省值为空
|
||||
|
||||
--batch-size
|
||||
--batch-size
|
||||
blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
|
||||
|
||||
--dbname
|
||||
|
@ -218,15 +218,12 @@ use telegraf;
|
|||
select * from cpu;
|
||||
```
|
||||
|
||||
|
||||
|
||||
MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。
|
||||
|
||||
## EMQ Broker 直接写入
|
||||
|
||||
[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
|
||||
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>。
|
||||
|
||||
## HiveMQ Broker 直接写入
|
||||
|
||||
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。
|
||||
|
||||
<a href="https://www.hivemq.com/">HiveMQ</a> 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 <a href="https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md">HiveMQ extension - TDengine 说明文档</a>。
|
||||
|
|
|
@ -30,7 +30,7 @@ TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储
|
|||
|
||||
- master: 具有最新的数据,容许客户端往里写入数据,一个虚拟节点组,至多一个master.
|
||||
- slave:与master是同步的,但不容许客户端往里写入数据,根据配置,可以容许客户端对其进行查询。
|
||||
- unsynced: 节点处于非同步状态,比如虚拟节点刚启动、或与其他虚拟节点的连接出现故障等。处于该状态时,该虚拟节点既不能提供写入,也不能提供查询服务
|
||||
- unsynced: 节点处于非同步状态,比如虚拟节点刚启动、或与其他虚拟节点的连接出现故障等。处于该状态时,该虚拟节点既不能提供写入,也不能提供查询服务。
|
||||
- offline: 由于宕机或网络原因,无法访问到某虚拟节点时,其他虚拟节点将该虚拟节点标为离线。但请注意,该虚拟节点本身的状态可能是unsynced或其他,但不会是离线。
|
||||
|
||||
**Quorum:**
|
||||
|
@ -83,10 +83,10 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
|
|||
如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的连接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的连接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
|
||||
|
||||
1. 如果检测到在线的节点数没有超过一半,则将自己的状态设置为unsynced.
|
||||
2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程
|
||||
2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程。
|
||||
3. 如果master不存在,则会检查自己保存的各虚拟节点的状态信息与从另一节点接收到的是否一致,如果一致,说明节点组里状态已经稳定一致,则会触发选举流程。如果不一致,说明状态还没趋于一致,即使master不存在,也不进行选主。由于要求状态信息一致才进行选举,每个虚拟节点根据同样的信息,会选出同一个虚拟节点做master,无需投票表决。
|
||||
4. 自己的状态是根据规则自己决定并修改的,并不需要其他节点同意,包括成为master。一个节点无权修改其他节点的状态。
|
||||
5. 如果一个虚拟节点检测到自己或其他虚拟节点的role发生改变,该节点会广播它自己保存的各个虚拟节点的状态信息(role和version).
|
||||
5. 如果一个虚拟节点检测到自己或其他虚拟节点的role发生改变,该节点会广播它自己保存的各个虚拟节点的状态信息(role和version)。
|
||||
|
||||
具体的流程图如下:
|
||||
|
||||
|
@ -124,7 +124,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
|
|||
|
||||
如果一虚拟节点(vnode B) 处于unsynced状态,master存在(vnode A),而且其版本号比master的低,它将立即启动数据恢复流程。在理解恢复流程时,需要澄清几个关于文件的概念和处理规则。
|
||||
|
||||
1. 每个文件(无论是archived data的file还是wal)都有一个index, 这需要应用来维护(vnode里,该index就是fileId*3 + 0/1/2, 对应data, head与last三个文件)。如果index为0,表示系统里最老的数据文件。对于mnode里的文件,数量是固定的,对应于acct, user, db, table等文件。
|
||||
1. 每个文件(无论是archived data的file还是wal)都有一个index, 这需要应用来维护(vnode里,该index就是fileId*3 + 0/1/2, 对应data, head与last三个文件)。如果index为0,表示系统里最老的数据文件。对于mode里的文件,数量是固定的,对应于acct, user, db, table等文件。
|
||||
2. 任何一个数据文件(file)有名字、大小,还有一个magic number。只有文件名、大小与magic number一致时,两个文件才判断是一样的,无需同步。Magic number可以是checksum, 也可以是简单的文件大小。怎么计算magic,换句话说,如何检测数据文件是否有效,完全由应用决定。
|
||||
3. 文件名的处理有点复杂,因为每台服务器的路径可能不一致。比如node A的TDengine的数据文件存放在 /etc/taos目录下,而node B的数据存放在 /home/jhtao目录下。因此同步模块需要应用在启动一个同步实例时提供一个path,这样两台服务器的绝对路径可以不一样,但仍然可以做对比,做同步。
|
||||
4. 当sync模块调用回调函数getFileInfo获得数据文件信息时,有如下的规则
|
||||
|
@ -212,10 +212,10 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
|
|||
|
||||
相同之处:
|
||||
|
||||
- 三大流程一致:Raft里有Leader election, replication, safety,完全对应TDengine的选举、数据转发、数据恢复三个流程
|
||||
- 节点状态定义一致:Raft里每个节点有Leader, Follower, Candidate三个状态,TDengine里是Master, Slave, Unsynced, Offline。多了一个offlince, 但本质上是一样的,因为offline是外界看一个节点的状态,但该节点本身是处于master, slave 或unsynced的
|
||||
- 三大流程一致:Raft里有Leader election, replication, safety,完全对应TDengine的选举、数据转发、数据恢复三个流程。
|
||||
- 节点状态定义一致:Raft里每个节点有Leader, Follower, Candidate三个状态,TDengine里是Master, Slave, Unsynced, Offline。多了一个offlince, 但本质上是一样的,因为offline是外界看一个节点的状态,但该节点本身是处于master, slave 或unsynced的。
|
||||
- 数据转发流程完全一样,Master(leader)需要等待回复确认。
|
||||
- 数据恢复流程几乎一样,Raft没有涉及历史数据同步问题,只考虑了WAL数据同步
|
||||
- 数据恢复流程几乎一样,Raft没有涉及历史数据同步问题,只考虑了WAL数据同步。
|
||||
|
||||
不同之处:
|
||||
|
||||
|
@ -226,7 +226,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
|
|||
|
||||
## Meta Data的数据复制
|
||||
|
||||
TDengine里存在时序数据,也存在Meta Data。Meta Data对数据的可靠性要求更高,那么TDengine设计能否满足要求呢?下面做个仔细分析
|
||||
TDengine里存在时序数据,也存在Meta Data。Meta Data对数据的可靠性要求更高,那么TDengine设计能否满足要求呢?下面做个仔细分析。
|
||||
|
||||
TDengine里Meta Data包括以下:
|
||||
|
||||
|
|
|
@ -39,10 +39,10 @@
|
|||
# number of management nodes in the system
|
||||
# numOfMnodes 3
|
||||
|
||||
# enable/disable backuping vnode directory when removing dnode
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
# vnodeBak 1
|
||||
|
||||
# if report installation / use information
|
||||
# enable/disable installation / usage report
|
||||
# telemetryReporting 1
|
||||
|
||||
# enable/disable load balancing
|
||||
|
@ -81,7 +81,7 @@
|
|||
# minimum time window, milli-second
|
||||
# minIntervalTime 10
|
||||
|
||||
# maximum delay before launching a stream compution, milli-second
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
# maxStreamCompDelay 20000
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
|
@ -156,7 +156,7 @@
|
|||
# max number of connections allowed in dnode
|
||||
# maxShellConns 5000
|
||||
|
||||
# max numerber of connections allowed in client
|
||||
# max number of connections allowed in client
|
||||
# maxConnections 5000
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
|
@ -187,7 +187,7 @@
|
|||
# restfulRowLimit 10240
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of rows per log filters
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
|
@ -199,7 +199,9 @@
|
|||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error, 135: output debug, warning and error, 143 : output trace, debug, warning and error to log.
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
|
||||
|
@ -231,10 +233,10 @@
|
|||
# cDebugFlag 131
|
||||
|
||||
# debug flag for JNI
|
||||
# jniDebugflag 131
|
||||
# jniDebugFlag 131
|
||||
|
||||
# debug flag for storage
|
||||
# uDebugflag 131
|
||||
# uDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# httpDebugFlag 131
|
||||
|
@ -243,12 +245,12 @@
|
|||
# monDebugFlag 131
|
||||
|
||||
# debug flag for query
|
||||
# qDebugflag 131
|
||||
# qDebugFlag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugflag 131
|
||||
# vDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# debug flag for TSDB
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
# debug flag for continue query
|
||||
|
|
|
@ -265,8 +265,14 @@ function install_config() {
|
|||
[ -f ${cfg_dir}/taos.cfg ] && ${csudo} cp ${cfg_dir}/taos.cfg ${cfg_install_dir}
|
||||
${csudo} chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
# Save standard input to 6 and open / dev / TTY on standard input
|
||||
exec 6<&0 0</dev/tty
|
||||
|
||||
local_fqdn_check
|
||||
|
||||
# restore the backup standard input, and turn off 6
|
||||
exec 0<&6 6<&-
|
||||
|
||||
${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.org
|
||||
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir}
|
||||
|
@ -422,7 +428,7 @@ function install_service() {
|
|||
}
|
||||
|
||||
function install_TDengine() {
|
||||
echo -e "${GREEN}Start to install TDEngine...${NC}"
|
||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||
|
||||
#install log and data dir , then ln to /usr/local/taos
|
||||
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||
|
|
|
@ -119,4 +119,4 @@ if ((${service_mod}==2)); then
|
|||
kill_taosd
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}TDEngine is removed successfully!${NC}"
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
||||
|
|
|
@ -2,19 +2,39 @@
|
|||
#
|
||||
# This file is used to set config for core when taosd crash
|
||||
|
||||
set -e
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# set -e
|
||||
# set -x
|
||||
corePath=$1
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
#ulimit -c unlimited
|
||||
if [[ ! -n ${corePath} ]]; then
|
||||
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
|
||||
read corePath
|
||||
while true; do
|
||||
if [[ ! -z "$corePath" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter a file directory to save the coredump file:" corePath
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
${csudo} sed -i '/ulimit -c unlimited/d' /etc/profile ||:
|
||||
${csudo} sed -i '$a\ulimit -c unlimited' /etc/profile ||:
|
||||
source /etc/profile
|
||||
|
||||
${csudo} mkdir -p /coredump ||:
|
||||
${csudo} sysctl -w kernel.core_pattern='/coredump/core-%e-%p' ||:
|
||||
${csudo} echo '/coredump/core-%e-%p' | ${csudo} tee /proc/sys/kernel/core_pattern ||:
|
||||
${csudo} mkdir -p ${corePath} ||:
|
||||
${csudo} sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo} echo "${corePath}/core-%e-%p" | ${csudo} tee /proc/sys/kernel/core_pattern ||:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.0.11.0'
|
||||
version: '2.0.12.0'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
@ -72,7 +72,7 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.0.11.0
|
||||
- usr/lib/libtaos.so.2.0.12.0
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ static void bnUnLock() {
|
|||
|
||||
static bool bnCheckFree(SDnodeObj *pDnode) {
|
||||
if (pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) {
|
||||
mError("dnode:%d, status:%s not available", pDnode->dnodeId, mnodeGetDnodeStatusStr(pDnode->status));
|
||||
mError("dnode:%d, status:%s not available", pDnode->dnodeId, dnodeStatus[pDnode->status]);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -92,13 +92,12 @@ static void bnDiscardVnode(SVgObj *pVgroup, SVnodeGid *pVnodeGid) {
|
|||
}
|
||||
|
||||
static void bnSwapVnodeGid(SVnodeGid *pVnodeGid1, SVnodeGid *pVnodeGid2) {
|
||||
// SVnodeGid tmp = *pVnodeGid1;
|
||||
// *pVnodeGid1 = *pVnodeGid2;
|
||||
// *pVnodeGid2 = tmp;
|
||||
SVnodeGid tmp = *pVnodeGid1;
|
||||
*pVnodeGid1 = *pVnodeGid2;
|
||||
*pVnodeGid2 = tmp;
|
||||
}
|
||||
|
||||
int32_t bnAllocVnodes(SVgObj *pVgroup) {
|
||||
static int32_t randIndex = 0;
|
||||
int32_t dnode = 0;
|
||||
int32_t vnodes = 0;
|
||||
|
||||
|
@ -120,8 +119,7 @@ int32_t bnAllocVnodes(SVgObj *pVgroup) {
|
|||
break;
|
||||
} else {
|
||||
mDebug("dnode:%d, is not selected, status:%s vnodes:%d disk:%fGB role:%d", pDnode->dnodeId,
|
||||
mnodeGetDnodeStatusStr(pDnode->status), pDnode->openVnodes, pDnode->diskAvailable,
|
||||
pDnode->alternativeRole);
|
||||
dnodeStatus[pDnode->status], pDnode->openVnodes, pDnode->diskAvailable, pDnode->alternativeRole);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -137,7 +135,7 @@ int32_t bnAllocVnodes(SVgObj *pVgroup) {
|
|||
while (1) {
|
||||
pIter = mnodeGetNextDnode(pIter, &pDnode);
|
||||
if (pDnode == NULL) break;
|
||||
mDebug("dnode:%d, status:%s vnodes:%d disk:%fGB role:%d", pDnode->dnodeId, mnodeGetDnodeStatusStr(pDnode->status),
|
||||
mDebug("dnode:%d, status:%s vnodes:%d disk:%fGB role:%d", pDnode->dnodeId, dnodeStatus[pDnode->status],
|
||||
pDnode->openVnodes, pDnode->diskAvailable, pDnode->alternativeRole);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
}
|
||||
|
@ -149,36 +147,6 @@ int32_t bnAllocVnodes(SVgObj *pVgroup) {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* make the choice more random.
|
||||
* replica 1: no choice
|
||||
* replica 2: there are 2 combinations
|
||||
* replica 3 or larger: there are 6 combinations
|
||||
*/
|
||||
if (pVgroup->numOfVnodes == 1) {
|
||||
} else if (pVgroup->numOfVnodes == 2) {
|
||||
if (randIndex++ % 2 == 0) {
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid, pVgroup->vnodeGid + 1);
|
||||
}
|
||||
} else {
|
||||
int32_t randVal = randIndex++ % 6;
|
||||
if (randVal == 1) { // 1, 0, 2
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 1);
|
||||
} else if (randVal == 2) { // 1, 2, 0
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 1);
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 1, pVgroup->vnodeGid + 2);
|
||||
} else if (randVal == 3) { // 2, 1, 0
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 2);
|
||||
} else if (randVal == 4) { // 2, 0, 1
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 2);
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 1, pVgroup->vnodeGid + 2);
|
||||
}
|
||||
if (randVal == 5) { // 0, 2, 1
|
||||
bnSwapVnodeGid(pVgroup->vnodeGid + 1, pVgroup->vnodeGid + 2);
|
||||
} else {
|
||||
} // 0, 1, 2
|
||||
}
|
||||
|
||||
bnReleaseDnodes();
|
||||
bnUnLock();
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -214,44 +182,8 @@ static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) {
|
|||
static int32_t bnRemoveVnode(SVgObj *pVgroup) {
|
||||
if (pVgroup->numOfVnodes <= 1) return -1;
|
||||
|
||||
SVnodeGid *pRmVnode = NULL;
|
||||
SVnodeGid *pSelVnode = NULL;
|
||||
int32_t maxScore = 0;
|
||||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVnode = &(pVgroup->vnodeGid[i]);
|
||||
SDnodeObj *pDnode = mnodeGetDnode(pVnode->dnodeId);
|
||||
|
||||
if (pDnode == NULL) {
|
||||
mError("vgId:%d, dnode:%d not exist, remove it", pVgroup->vgId, pVnode->dnodeId);
|
||||
pRmVnode = pVnode;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pDnode->status == TAOS_DN_STATUS_DROPPING) {
|
||||
mDebug("vgId:%d, dnode:%d in dropping state", pVgroup->vgId, pVnode->dnodeId);
|
||||
pRmVnode = pVnode;
|
||||
} else if (pVnode->dnodeId == pVgroup->lbDnodeId) {
|
||||
mDebug("vgId:%d, dnode:%d in updating state", pVgroup->vgId, pVnode->dnodeId);
|
||||
pRmVnode = pVnode;
|
||||
} else {
|
||||
if (pSelVnode == NULL) {
|
||||
pSelVnode = pVnode;
|
||||
maxScore = pDnode->score;
|
||||
} else {
|
||||
if (maxScore < pDnode->score) {
|
||||
pSelVnode = pVnode;
|
||||
maxScore = pDnode->score;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
if (pRmVnode != NULL) {
|
||||
pSelVnode = pRmVnode;
|
||||
}
|
||||
SVnodeGid *pSelVnode = &pVgroup->vnodeGid[pVgroup->numOfVnodes - 1];
|
||||
mDebug("vgId:%d, vnode in dnode:%d will be dropped", pVgroup->vgId, pSelVnode->dnodeId);
|
||||
|
||||
if (!bnCheckVgroupReady(pVgroup, pSelVnode)) {
|
||||
mDebug("vgId:%d, is not ready", pVgroup->vgId);
|
||||
|
@ -275,36 +207,42 @@ static bool bnCheckDnodeInVgroup(SDnodeObj *pDnode, SVgObj *pVgroup) {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* desc: add vnode to vgroup, find a new one if dest dnode is null
|
||||
**/
|
||||
static SDnodeObj *bnGetAvailDnode(SVgObj *pVgroup) {
|
||||
for (int32_t i = 0; i < tsBnDnodes.size; ++i) {
|
||||
SDnodeObj *pDnode = tsBnDnodes.list[i];
|
||||
if (bnCheckDnodeInVgroup(pDnode, pVgroup)) continue;
|
||||
if (!bnCheckFree(pDnode)) continue;
|
||||
|
||||
mDebug("vgId:%d, add vnode to dnode:%d", pVgroup->vgId, pDnode->dnodeId);
|
||||
return pDnode;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t bnAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDestDnode) {
|
||||
if (pDestDnode == NULL) {
|
||||
for (int32_t i = 0; i < tsBnDnodes.size; ++i) {
|
||||
SDnodeObj *pDnode = tsBnDnodes.list[i];
|
||||
if (pDnode == pSrcDnode) continue;
|
||||
if (bnCheckDnodeInVgroup(pDnode, pVgroup)) continue;
|
||||
if (!bnCheckFree(pDnode)) continue;
|
||||
|
||||
pDestDnode = pDnode;
|
||||
mDebug("vgId:%d, add vnode to dnode:%d", pVgroup->vgId, pDnode->dnodeId);
|
||||
if (pDestDnode == NULL || pSrcDnode == pDestDnode) {
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
|
||||
SVnodeGid vnodeGids[TSDB_MAX_REPLICA];
|
||||
memcpy(&vnodeGids, &pVgroup->vnodeGid, sizeof(SVnodeGid) * TSDB_MAX_REPLICA);
|
||||
|
||||
int32_t numOfVnodes = pVgroup->numOfVnodes;
|
||||
vnodeGids[numOfVnodes].dnodeId = pDestDnode->dnodeId;
|
||||
vnodeGids[numOfVnodes].pDnode = pDestDnode;
|
||||
numOfVnodes++;
|
||||
|
||||
for (int32_t v = 0; v < numOfVnodes; ++v) {
|
||||
if (pSrcDnode != NULL && pSrcDnode->dnodeId == vnodeGids[v].dnodeId) {
|
||||
bnSwapVnodeGid(&vnodeGids[v], &vnodeGids[numOfVnodes - 1]);
|
||||
pVgroup->lbDnodeId = pSrcDnode->dnodeId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pDestDnode == NULL) {
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
|
||||
SVnodeGid *pVnodeGid = pVgroup->vnodeGid + pVgroup->numOfVnodes;
|
||||
pVnodeGid->dnodeId = pDestDnode->dnodeId;
|
||||
pVnodeGid->pDnode = pDestDnode;
|
||||
pVgroup->numOfVnodes++;
|
||||
|
||||
if (pSrcDnode != NULL) {
|
||||
pVgroup->lbDnodeId = pSrcDnode->dnodeId;
|
||||
}
|
||||
|
||||
memcpy(&pVgroup->vnodeGid, &vnodeGids, sizeof(SVnodeGid) * TSDB_MAX_REPLICA);
|
||||
pVgroup->numOfVnodes = numOfVnodes;
|
||||
atomic_add_fetch_32(&pDestDnode->openVnodes, 1);
|
||||
|
||||
mnodeUpdateVgroup(pVgroup);
|
||||
|
@ -315,16 +253,16 @@ static int32_t bnAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDes
|
|||
static bool bnMonitorBalance() {
|
||||
if (tsBnDnodes.size < 2) return false;
|
||||
|
||||
mDebug("monitor dnodes for balance, avail:%d", tsBnDnodes.size);
|
||||
for (int32_t src = tsBnDnodes.size - 1; src >= 0; --src) {
|
||||
SDnodeObj *pDnode = tsBnDnodes.list[src];
|
||||
mDebug("%d-dnode:%d, state:%s, score:%.1f, numOfCores:%d, openVnodes:%d", tsBnDnodes.size - src - 1,
|
||||
pDnode->dnodeId, mnodeGetDnodeStatusStr(pDnode->status), pDnode->score, pDnode->numOfCores,
|
||||
pDnode->openVnodes);
|
||||
mDebug("%d-dnode:%d, state:%s, score:%.1f, cores:%d, vnodes:%d", tsBnDnodes.size - src - 1, pDnode->dnodeId,
|
||||
dnodeStatus[pDnode->status], pDnode->score, pDnode->numOfCores, pDnode->openVnodes);
|
||||
}
|
||||
|
||||
float scoresDiff = tsBnDnodes.list[tsBnDnodes.size - 1]->score - tsBnDnodes.list[0]->score;
|
||||
if (scoresDiff < 0.01) {
|
||||
mDebug("all dnodes:%d is already balanced, scoresDiff:%f", tsBnDnodes.size, scoresDiff);
|
||||
mDebug("all dnodes:%d is already balanced, scoreDiff:%.1f", tsBnDnodes.size, scoresDiff);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -392,7 +330,7 @@ void bnReset() {
|
|||
tsAccessSquence = 0;
|
||||
}
|
||||
|
||||
static int32_t bnMonitorVgroups() {
|
||||
static bool bnMonitorVgroups() {
|
||||
void * pIter = NULL;
|
||||
SVgObj *pVgroup = NULL;
|
||||
bool hasUpdatingVgroup = false;
|
||||
|
@ -412,7 +350,13 @@ static int32_t bnMonitorVgroups() {
|
|||
} else if (vgReplica < dbReplica) {
|
||||
mInfo("vgId:%d, replica:%d numOfVnodes:%d, try add one vnode", pVgroup->vgId, dbReplica, vgReplica);
|
||||
hasUpdatingVgroup = true;
|
||||
code = bnAddVnode(pVgroup, NULL, NULL);
|
||||
|
||||
SDnodeObj *pAvailDnode = bnGetAvailDnode(pVgroup);
|
||||
if (pAvailDnode == NULL) {
|
||||
code = TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
} else {
|
||||
code = bnAddVnode(pVgroup, NULL, pAvailDnode);
|
||||
}
|
||||
}
|
||||
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
|
@ -545,6 +489,7 @@ void bnCheckStatus() {
|
|||
mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
|
||||
pDnode->lastAccess, pDnode->status);
|
||||
bnSetVgroupOffline(pDnode);
|
||||
bnStartTimer(3000);
|
||||
}
|
||||
}
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
|
|
|
@ -299,7 +299,7 @@ static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void
|
|||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
STR_TO_VARSTR(pWrite, mnodeGetDnodeStatusStr(pDnode->status));
|
||||
STR_TO_VARSTR(pWrite, dnodeStatus[pDnode->status]);
|
||||
cols++;
|
||||
|
||||
numOfRows++;
|
||||
|
|
|
@ -31,7 +31,10 @@ static void *bnThreadFunc(void *arg) {
|
|||
}
|
||||
|
||||
pthread_cond_wait(&tsBnThread.cond, &tsBnThread.mutex);
|
||||
mDebug("balance thread wakes up to work");
|
||||
bool updateSoon = bnStart();
|
||||
mDebug("balance thread finished this poll, updateSoon:%d", updateSoon);
|
||||
|
||||
bnStartTimer(updateSoon ? 1000 : -1);
|
||||
pthread_mutex_unlock(&(tsBnThread.mutex));
|
||||
}
|
||||
|
@ -101,8 +104,8 @@ static void bnProcessTimer(void *handle, void *tmrId) {
|
|||
tsBnThread.timer = NULL;
|
||||
tsAccessSquence++;
|
||||
|
||||
bnCheckStatus();
|
||||
bnStartTimer(-1);
|
||||
bnCheckStatus();
|
||||
|
||||
if (handle == NULL) {
|
||||
if (tsAccessSquence % tsBalanceInterval == 0) {
|
||||
|
@ -121,6 +124,7 @@ void bnStartTimer(int64_t mseconds) {
|
|||
|
||||
bool updateSoon = (mseconds != -1);
|
||||
if (updateSoon) {
|
||||
mTrace("balance function will be called after %" PRId64 " ms", mseconds);
|
||||
taosTmrReset(bnProcessTimer, mseconds, (void *)mseconds, tsMnodeTmr, &tsBnThread.timer);
|
||||
} else {
|
||||
taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer);
|
||||
|
|
|
@ -38,12 +38,6 @@ typedef struct SLocalDataSource {
|
|||
tFilePage filePage;
|
||||
} SLocalDataSource;
|
||||
|
||||
enum {
|
||||
TSC_LOCALREDUCE_READY = 0x0,
|
||||
TSC_LOCALREDUCE_IN_PROGRESS = 0x1,
|
||||
TSC_LOCALREDUCE_TOBE_FREED = 0x2,
|
||||
};
|
||||
|
||||
typedef struct SLocalReducer {
|
||||
SLocalDataSource ** pLocalDataSrc;
|
||||
int32_t numOfBuffer;
|
||||
|
@ -56,7 +50,6 @@ typedef struct SLocalReducer {
|
|||
tFilePage * pTempBuffer;
|
||||
struct SQLFunctionCtx *pCtx;
|
||||
int32_t rowSize; // size of each intermediate result.
|
||||
int32_t status; // denote it is in reduce process, in reduce process, it
|
||||
bool hasPrevRow; // cannot be released
|
||||
bool hasUnprocessedRow;
|
||||
tOrderDescriptor * pDesc;
|
||||
|
|
|
@ -22,8 +22,8 @@ extern "C" {
|
|||
|
||||
#include "tlog.h"
|
||||
|
||||
extern uint32_t cDebugFlag;
|
||||
extern uint32_t tscEmbedded;
|
||||
extern int32_t cDebugFlag;
|
||||
extern int8_t tscEmbedded;
|
||||
|
||||
#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscError(...) do { if (cDebugFlag & DEBUG_ERROR) { taosPrintLog("TSC ERROR ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
|
|
|
@ -69,9 +69,10 @@ typedef struct STableMeta {
|
|||
int16_t sversion;
|
||||
int16_t tversion;
|
||||
char sTableId[TSDB_TABLE_FNAME_LEN];
|
||||
SVgroupInfo vgroupInfo;
|
||||
int32_t vgId;
|
||||
SCorVgroupInfo corVgroupInfo;
|
||||
STableId id;
|
||||
// union {int64_t stableUid; SSchema* schema;};
|
||||
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
|
||||
} STableMeta;
|
||||
|
||||
|
@ -307,7 +308,7 @@ typedef struct STscObj {
|
|||
SRpcCorEpSet *tscCorMgmtEpSet;
|
||||
void* pDnodeConn;
|
||||
pthread_mutex_t mutex;
|
||||
T_REF_DECLARE()
|
||||
int32_t numOfObj; // number of sqlObj from this tscObj
|
||||
} STscObj;
|
||||
|
||||
typedef struct SSubqueryState {
|
||||
|
@ -420,7 +421,7 @@ void tscCloseTscObj(void *pObj);
|
|||
// todo move to taos? or create a new file: taos_internal.h
|
||||
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
|
||||
void *param, TAOS **taos);
|
||||
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, TAOS_RES** res);
|
||||
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res);
|
||||
void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
|
||||
|
||||
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
|
||||
|
@ -478,15 +479,14 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
|||
}
|
||||
}
|
||||
|
||||
extern SCacheObj* tscMetaCache;
|
||||
extern int tscObjRef;
|
||||
extern void * tscTmr;
|
||||
extern void * tscQhandle;
|
||||
extern int tscKeepConn[];
|
||||
extern int tsInsertHeadSize;
|
||||
extern int tscNumOfThreads;
|
||||
extern int tscRefId;
|
||||
|
||||
extern SCacheObj *tscMetaCache;
|
||||
|
||||
extern int tscObjRef;
|
||||
extern void *tscTmr;
|
||||
extern void *tscQhandle;
|
||||
extern int tscKeepConn[];
|
||||
extern int tscRefId;
|
||||
extern int tscNumOfObj; // number of existed sqlObj in current process.
|
||||
|
||||
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
|
||||
|
||||
|
|
|
@ -388,10 +388,10 @@ void tscQueueAsyncRes(SSqlObj *pSql) {
|
|||
return;
|
||||
}
|
||||
|
||||
assert(pSql->res.code != TSDB_CODE_SUCCESS);
|
||||
tscError("%p add into queued async res, code:%s", pSql, tstrerror(pSql->res.code));
|
||||
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
if (pSql->fp == NULL || pSql->fetchFp == NULL){
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2597,14 +2597,23 @@ static void percentile_next_step(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
static void buildHistogramInfo(SAPercentileInfo* pInfo) {
|
||||
pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo));
|
||||
pInfo->pHisto->elems = (SHistBin*) ((char*)pInfo->pHisto + sizeof(SHistogramInfo));
|
||||
}
|
||||
|
||||
static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
|
||||
SAPercentileInfo* pInfo = NULL;
|
||||
|
||||
if (pCtx->stableQuery && pCtx->currentStage != SECONDARY_STAGE_MERGE) {
|
||||
return (SAPercentileInfo*) pCtx->aOutputBuf;
|
||||
pInfo = (SAPercentileInfo*) pCtx->aOutputBuf;
|
||||
} else {
|
||||
return GET_ROWCELL_INTERBUF(pResInfo);
|
||||
pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
}
|
||||
|
||||
buildHistogramInfo(pInfo);
|
||||
return pInfo;
|
||||
}
|
||||
|
||||
static bool apercentile_function_setup(SQLFunctionCtx *pCtx) {
|
||||
|
@ -2624,6 +2633,8 @@ static void apercentile_function(SQLFunctionCtx *pCtx) {
|
|||
|
||||
SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
|
||||
SAPercentileInfo *pInfo = getAPerctInfo(pCtx);
|
||||
|
||||
assert(pInfo->pHisto->elems != NULL);
|
||||
|
||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||
|
|
|
@ -93,7 +93,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
|
|||
|
||||
// for top/bottom function, the output of timestamp is the first column
|
||||
int32_t functionId = pExpr->functionId;
|
||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
|
||||
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
|
||||
pCtx->param[2].i64Key = pQueryInfo->order.order;
|
||||
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
|
||||
|
@ -493,13 +493,6 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
|
|||
// there is no more result, so we release all allocated resource
|
||||
SLocalReducer *pLocalReducer = (SLocalReducer *)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
|
||||
if (pLocalReducer != NULL) {
|
||||
int32_t status = 0;
|
||||
while ((status = atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY,
|
||||
TSC_LOCALREDUCE_TOBE_FREED)) == TSC_LOCALREDUCE_IN_PROGRESS) {
|
||||
taosMsleep(100);
|
||||
tscDebug("%p waiting for delete procedure, status: %d", pSql, status);
|
||||
}
|
||||
|
||||
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
|
||||
|
||||
if (pLocalReducer->pCtx != NULL) {
|
||||
|
@ -911,6 +904,13 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
|
|||
}
|
||||
}
|
||||
|
||||
if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
|
||||
pRes->numOfRows = 0;
|
||||
pBeforeFillData->num = 0;
|
||||
pLocalReducer->discard = true;
|
||||
return;
|
||||
}
|
||||
|
||||
pRes->numOfRowsGroup += pRes->numOfRows;
|
||||
|
||||
// impose the limitation of output rows on the final result
|
||||
|
@ -1296,6 +1296,10 @@ void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// re
|
|||
for (int32_t i = 0; i < t; ++i) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
|
||||
|
||||
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
|
||||
pLocalReducer->pCtx[i].ptsOutputBuf = pLocalReducer->pCtx[0].aOutputBuf;
|
||||
}
|
||||
}
|
||||
|
||||
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
|
||||
|
@ -1430,24 +1434,13 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
|
||||
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
// set the data merge in progress
|
||||
int32_t prevStatus =
|
||||
atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS);
|
||||
if (prevStatus != TSC_LOCALREDUCE_READY) {
|
||||
assert(prevStatus == TSC_LOCALREDUCE_TOBE_FREED); // it is in tscDestroyLocalReducer function already
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
|
||||
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
|
||||
|
||||
if (doHandleLastRemainData(pSql)) {
|
||||
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (doBuildFilledResultForGroup(pSql)) {
|
||||
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1503,7 +1496,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
pLocalReducer->discardData->num = 0;
|
||||
|
||||
if (saveGroupResultInfo(pSql)) {
|
||||
pLocalReducer->status = TSC_LOCALREDUCE_READY;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1549,7 +1541,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
|
||||
// here we do not check the return value
|
||||
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
|
||||
assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS);
|
||||
|
||||
if (pRes->numOfRows == 0) {
|
||||
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
|
||||
|
@ -1560,7 +1551,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
* If previous group is not skipped, keep it in pRes->numOfGroups
|
||||
*/
|
||||
if (notSkipped && saveGroupResultInfo(pSql)) {
|
||||
pLocalReducer->status = TSC_LOCALREDUCE_READY;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1580,7 +1570,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
if (pRes->numOfRows == 0) {
|
||||
continue;
|
||||
} else {
|
||||
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else { // result buffer is not full
|
||||
|
@ -1605,9 +1594,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
genFinalResults(pSql, pLocalReducer, true);
|
||||
}
|
||||
|
||||
assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS && pRes->row == 0);
|
||||
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -731,7 +731,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
|
|||
return code;
|
||||
}
|
||||
|
||||
dataBuf->vgId = pTableMeta->vgroupInfo.vgId;
|
||||
dataBuf->vgId = pTableMeta->vgId;
|
||||
dataBuf->numOfTables = 1;
|
||||
|
||||
*totalNum += numOfRows;
|
||||
|
|
|
@ -666,6 +666,7 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
|||
const char* msg1 = "invalid query expression";
|
||||
const char* msg2 = "interval cannot be less than 10 ms";
|
||||
const char* msg3 = "sliding cannot be used without interval";
|
||||
const char* msg4 = "top/bottom query does not support order by value in interval query";
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
|
@ -712,6 +713,11 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
|||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
int32_t colId = pQueryInfo->order.orderColId;
|
||||
if (pQueryInfo->interval.interval > 0 && colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -996,33 +1002,6 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
|
|||
return false;
|
||||
}
|
||||
|
||||
int32_t nLen = 0;
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
|
||||
if (p->bytes == 0) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||
return false;
|
||||
}
|
||||
|
||||
nLen += p->bytes;
|
||||
}
|
||||
|
||||
// max tag row length must be less than TSDB_MAX_TAGS_LEN
|
||||
if (nLen > TSDB_MAX_TAGS_LEN) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return false;
|
||||
}
|
||||
|
||||
// field name must be unique
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
|
||||
|
||||
if (has(pFieldList, 0, p->name) == true) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* timestamp in tag is not allowed */
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
|
||||
|
@ -1054,6 +1033,33 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
|
|||
}
|
||||
}
|
||||
|
||||
int32_t nLen = 0;
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
|
||||
if (p->bytes == 0) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||
return false;
|
||||
}
|
||||
|
||||
nLen += p->bytes;
|
||||
}
|
||||
|
||||
// max tag row length must be less than TSDB_MAX_TAGS_LEN
|
||||
if (nLen > TSDB_MAX_TAGS_LEN) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return false;
|
||||
}
|
||||
|
||||
// field name must be unique
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
|
||||
|
||||
if (has(pFieldList, 0, p->name) == true) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -4646,7 +4652,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
|||
|
||||
if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
} else {
|
||||
} else { // order by top/bottom result value column is not supported in case of interval query.
|
||||
assert(!(orderByTags && orderByTS));
|
||||
}
|
||||
|
||||
|
@ -4936,7 +4942,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
|
||||
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
|
||||
pUpdateMsg->head.vgId = htonl(pTableMeta->vgId);
|
||||
pUpdateMsg->tid = htonl(pTableMeta->id.tid);
|
||||
pUpdateMsg->uid = htobe64(pTableMeta->id.uid);
|
||||
pUpdateMsg->colId = htons(pTagsSchema->colId);
|
||||
|
@ -5442,6 +5448,7 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
|
|||
pMsg->quorum = pCreateDb->quorum;
|
||||
pMsg->ignoreExist = pCreateDb->ignoreExists;
|
||||
pMsg->update = pCreateDb->update;
|
||||
pMsg->cacheLastRow = pCreateDb->cachelast;
|
||||
}
|
||||
|
||||
int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) {
|
||||
|
|
|
@ -130,13 +130,14 @@ SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) {
|
||||
static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupMsg *pVgroupMsg) {
|
||||
corVgroupInfo->version = 0;
|
||||
corVgroupInfo->inUse = 0;
|
||||
corVgroupInfo->numOfEps = vgroupInfo->numOfEps;
|
||||
for (int32_t i = 0; i < corVgroupInfo->numOfEps; i++) {
|
||||
corVgroupInfo->epAddr[i].fqdn = strdup(vgroupInfo->epAddr[i].fqdn);
|
||||
corVgroupInfo->epAddr[i].port = vgroupInfo->epAddr[i].port;
|
||||
corVgroupInfo->inUse = 0;
|
||||
corVgroupInfo->numOfEps = pVgroupMsg->numOfEps;
|
||||
|
||||
for (int32_t i = 0; i < pVgroupMsg->numOfEps; i++) {
|
||||
corVgroupInfo->epAddr[i].fqdn = strndup(pVgroupMsg->epAddr[i].fqdn, tListLen(pVgroupMsg->epAddr[0].fqdn));
|
||||
corVgroupInfo->epAddr[i].port = pVgroupMsg->epAddr[i].port;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,8 +146,10 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
|
|||
|
||||
int32_t schemaSize = (pTableMetaMsg->numOfColumns + pTableMetaMsg->numOfTags) * sizeof(SSchema);
|
||||
STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + schemaSize);
|
||||
|
||||
pTableMeta->tableType = pTableMetaMsg->tableType;
|
||||
|
||||
pTableMeta->vgId = pTableMetaMsg->vgroup.vgId;
|
||||
|
||||
pTableMeta->tableInfo = (STableComInfo) {
|
||||
.numOfTags = pTableMetaMsg->numOfTags,
|
||||
.precision = pTableMetaMsg->precision,
|
||||
|
@ -156,18 +159,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
|
|||
pTableMeta->id.tid = pTableMetaMsg->tid;
|
||||
pTableMeta->id.uid = pTableMetaMsg->uid;
|
||||
|
||||
SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
|
||||
pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps;
|
||||
pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId;
|
||||
|
||||
for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
|
||||
SEpAddrMsg* pEpMsg = &pTableMetaMsg->vgroup.epAddr[i];
|
||||
|
||||
pVgroupInfo->epAddr[i].fqdn = strndup(pEpMsg->fqdn, tListLen(pEpMsg->fqdn));
|
||||
pVgroupInfo->epAddr[i].port = pEpMsg->port;
|
||||
}
|
||||
|
||||
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo);
|
||||
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMetaMsg->vgroup);
|
||||
|
||||
pTableMeta->sversion = pTableMetaMsg->sversion;
|
||||
pTableMeta->tversion = pTableMetaMsg->tversion;
|
||||
|
|
|
@ -45,32 +45,30 @@ static int32_t getWaitingTimeInterval(int32_t count) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
return initial * (2<<(count - 2));
|
||||
return initial * ((2u)<<(count - 2));
|
||||
}
|
||||
|
||||
static void tscSetDnodeEpSet(SSqlObj* pSql, SVgroupInfo* pVgroupInfo) {
|
||||
assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
|
||||
|
||||
SRpcEpSet* pEpSet = &pSql->epSet;
|
||||
static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) {
|
||||
assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
|
||||
|
||||
// Issue the query to one of the vnode among a vgroup randomly.
|
||||
// change the inUse property would not affect the isUse attribute of STableMeta
|
||||
pEpSet->inUse = rand() % pVgroupInfo->numOfEps;
|
||||
|
||||
// apply the FQDN string length check here
|
||||
bool hasFqdn = false;
|
||||
bool existed = false;
|
||||
|
||||
pEpSet->numOfEps = pVgroupInfo->numOfEps;
|
||||
for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
|
||||
tstrncpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn, tListLen(pEpSet->fqdn[i]));
|
||||
pEpSet->port[i] = pVgroupInfo->epAddr[i].port;
|
||||
|
||||
if (!hasFqdn) {
|
||||
hasFqdn = (strlen(pEpSet->fqdn[i]) > 0);
|
||||
int32_t len = (int32_t) strnlen(pVgroupInfo->epAddr[i].fqdn, TSDB_FQDN_LEN);
|
||||
if (len > 0) {
|
||||
tstrncpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn, tListLen(pEpSet->fqdn[i]));
|
||||
existed = true;
|
||||
}
|
||||
}
|
||||
|
||||
assert(hasFqdn);
|
||||
assert(existed);
|
||||
}
|
||||
|
||||
static void tscDumpMgmtEpSet(SSqlObj *pSql) {
|
||||
|
@ -102,7 +100,8 @@ void tscUpdateMgmtEpSet(SSqlObj *pSql, SRpcEpSet *pEpSet) {
|
|||
pCorEpSet->epSet = *pEpSet;
|
||||
taosCorEndWrite(&pCorEpSet->version);
|
||||
}
|
||||
static void tscDumpEpSetFromVgroupInfo(SCorVgroupInfo *pVgroupInfo, SRpcEpSet *pEpSet) {
|
||||
|
||||
static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SCorVgroupInfo *pVgroupInfo) {
|
||||
if (pVgroupInfo == NULL) { return;}
|
||||
taosCorBeginRead(&pVgroupInfo->version);
|
||||
int8_t inUse = pVgroupInfo->inUse;
|
||||
|
@ -515,8 +514,8 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
}
|
||||
} else {
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId);
|
||||
tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId);
|
||||
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId);
|
||||
tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgId);
|
||||
}
|
||||
|
||||
pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg);
|
||||
|
@ -535,7 +534,6 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
// NOTE: shell message size should not include SMsgDesc
|
||||
int32_t size = pSql->cmd.payloadLen - sizeof(SMsgDesc);
|
||||
int32_t vgId = pTableMeta->vgroupInfo.vgId;
|
||||
|
||||
SMsgDesc* pMsgDesc = (SMsgDesc*) pMsg;
|
||||
pMsgDesc->numOfVnodes = htonl(1); // always one vnode
|
||||
|
@ -543,7 +541,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pMsg += sizeof(SMsgDesc);
|
||||
SSubmitMsg *pShellMsg = (SSubmitMsg *)pMsg;
|
||||
|
||||
pShellMsg->header.vgId = htonl(vgId);
|
||||
pShellMsg->header.vgId = htonl(pTableMeta->vgId);
|
||||
pShellMsg->header.contLen = htonl(size); // the length not includes the size of SMsgDesc
|
||||
pShellMsg->length = pShellMsg->header.contLen;
|
||||
|
||||
|
@ -551,9 +549,9 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
// pSql->cmd.payloadLen is set during copying data into payload
|
||||
pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT;
|
||||
tscDumpEpSetFromVgroupInfo(&pTableMeta->corVgroupInfo, &pSql->epSet);
|
||||
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &pTableMeta->corVgroupInfo);
|
||||
|
||||
tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql, vgId, pSql->cmd.numOfTablesInSubmit,
|
||||
tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql, pTableMeta->vgId, pSql->cmd.numOfTablesInSubmit,
|
||||
pSql->epSet.numOfEps);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -597,24 +595,28 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || pTableMetaInfo->pVgroupTables == NULL) {
|
||||
|
||||
SVgroupInfo* pVgroupInfo = NULL;
|
||||
int32_t vgId = -1;
|
||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
int32_t index = pTableMetaInfo->vgroupIndex;
|
||||
assert(index >= 0);
|
||||
|
||||
|
||||
SVgroupInfo* pVgroupInfo = NULL;
|
||||
if (pTableMetaInfo->vgroupList->numOfVgroups > 0) {
|
||||
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
|
||||
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
|
||||
}
|
||||
|
||||
vgId = pVgroupInfo->vgId;
|
||||
tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo);
|
||||
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups);
|
||||
} else {
|
||||
pVgroupInfo = &pTableMeta->vgroupInfo;
|
||||
vgId = pTableMeta->vgId;
|
||||
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &pTableMeta->corVgroupInfo);
|
||||
}
|
||||
|
||||
assert(pVgroupInfo != NULL);
|
||||
pSql->epSet.inUse = rand()%pSql->epSet.numOfEps;
|
||||
|
||||
tscSetDnodeEpSet(pSql, pVgroupInfo);
|
||||
pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
|
||||
pQueryMsg->head.vgId = htonl(vgId);
|
||||
|
||||
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
|
||||
pTableIdInfo->tid = htonl(pTableMeta->id.tid);
|
||||
|
@ -633,7 +635,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index);
|
||||
|
||||
// set the vgroup info
|
||||
tscSetDnodeEpSet(pSql, &pTableIdList->vgInfo);
|
||||
tscSetDnodeEpSet(&pSql->epSet, &pTableIdList->vgInfo);
|
||||
pQueryMsg->head.vgId = htonl(pTableIdList->vgInfo.vgId);
|
||||
|
||||
int32_t numOfTables = (int32_t)taosArrayGetSize(pTableIdList->itemList);
|
||||
|
@ -1247,7 +1249,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pShowMsg->payloadLen = htons(pEpAddr->n);
|
||||
}
|
||||
|
||||
pCmd->payloadLen = sizeof(SShowMsg) + pShowMsg->payloadLen;
|
||||
pCmd->payloadLen = sizeof(SShowMsg) + htons(pShowMsg->payloadLen);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1448,48 +1450,11 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
|
|||
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
tscDumpEpSetFromVgroupInfo(&pTableMetaInfo->pTableMeta->corVgroupInfo, &pSql->epSet);
|
||||
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &pTableMetaInfo->pTableMeta->corVgroupInfo);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
//int tscBuildCancelQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||
// SCancelQueryMsg *pCancelMsg = (SCancelQueryMsg*) pSql->cmd.payload;
|
||||
// pCancelMsg->qhandle = htobe64(pSql->res.qhandle);
|
||||
//
|
||||
// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
//
|
||||
// if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
// int32_t vgIndex = pTableMetaInfo->vgroupIndex;
|
||||
// if (pTableMetaInfo->pVgroupTables == NULL) {
|
||||
// SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
|
||||
// assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
|
||||
//
|
||||
// pCancelMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
|
||||
// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex);
|
||||
// } else {
|
||||
// int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
||||
// assert(vgIndex >= 0 && vgIndex < numOfVgroups);
|
||||
//
|
||||
// SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
|
||||
//
|
||||
// pCancelMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId);
|
||||
// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex);
|
||||
// }
|
||||
// } else {
|
||||
// STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
// pCancelMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId);
|
||||
// tscDebug("%p build cancel query msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId);
|
||||
// }
|
||||
//
|
||||
// pSql->cmd.payloadLen = sizeof(SCancelQueryMsg);
|
||||
// pSql->cmd.msgType = TSDB_MSG_TYPE_CANCEL_QUERY;
|
||||
//
|
||||
// pCancelMsg->header.contLen = htonl(sizeof(SCancelQueryMsg));
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
//}
|
||||
|
||||
int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
pCmd->payloadLen = sizeof(SAlterDbMsg);
|
||||
|
|
|
@ -115,9 +115,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
|
|||
|
||||
pObj->signature = pObj;
|
||||
pObj->pDnodeConn = pDnodeConn;
|
||||
T_REF_INIT_VAL(pObj, 1);
|
||||
|
||||
|
||||
tstrncpy(pObj->user, user, sizeof(pObj->user));
|
||||
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
|
||||
memcpy(pObj->pass, secretEncrypt, secretEncryptLen);
|
||||
|
@ -172,11 +170,9 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
|
|||
if (taos != NULL) {
|
||||
*taos = pObj;
|
||||
}
|
||||
|
||||
registerSqlObj(pSql);
|
||||
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||
|
||||
pObj->rid = taosAddRef(tscRefId, pObj);
|
||||
registerSqlObj(pSql);
|
||||
|
||||
return pSql;
|
||||
}
|
||||
|
||||
|
@ -288,34 +284,21 @@ void taos_close(TAOS *taos) {
|
|||
return;
|
||||
}
|
||||
|
||||
// make sure that the close connection can only be executed once.
|
||||
pObj->signature = NULL;
|
||||
taosTmrStopA(&(pObj->pTimer));
|
||||
|
||||
if (pObj->hbrid > 0) {
|
||||
if (RID_VALID(pObj->hbrid)) {
|
||||
SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid);
|
||||
if (pHb != NULL) {
|
||||
if (pHb->rpcRid > 0) { // wait for rsp from dnode
|
||||
if (RID_VALID(pHb->rpcRid)) { // wait for rsp from dnode
|
||||
rpcCancelRequest(pHb->rpcRid);
|
||||
pHb->rpcRid = -1;
|
||||
}
|
||||
|
||||
tscDebug("%p HB is freed", pHb);
|
||||
taos_free_result(pHb);
|
||||
taosReleaseRef(tscObjRef, pHb->self);
|
||||
taos_free_result(pHb);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t ref = T_REF_DEC(pObj);
|
||||
assert(ref >= 0);
|
||||
|
||||
if (ref > 0) {
|
||||
tscDebug("%p %d remain sqlObjs, not free tscObj and dnodeConn:%p", pObj, ref, pObj->pDnodeConn);
|
||||
return;
|
||||
}
|
||||
|
||||
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
|
||||
|
||||
taosRemoveRef(tscRefId, pObj->rid);
|
||||
}
|
||||
|
||||
|
@ -331,7 +314,7 @@ static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
tsem_post(&pSql->rspSem);
|
||||
}
|
||||
|
||||
TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES** res) {
|
||||
TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, int64_t* res) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
|
@ -357,7 +340,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES
|
|||
doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
|
||||
|
||||
if (res != NULL) {
|
||||
*res = pSql;
|
||||
atomic_store_64(res, pSql->self);
|
||||
}
|
||||
|
||||
tsem_wait(&pSql->rspSem);
|
||||
|
@ -368,7 +351,7 @@ TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) {
|
|||
return taos_query_c(taos, sqlstr, (uint32_t)strlen(sqlstr), NULL);
|
||||
}
|
||||
|
||||
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, TAOS_RES** res) {
|
||||
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res) {
|
||||
return taos_query_c(taos, sqlstr, (uint32_t) strlen(sqlstr), res);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,18 +30,16 @@
|
|||
#include "tlocale.h"
|
||||
|
||||
// global, not configurable
|
||||
SCacheObj* tscMetaCache;
|
||||
SCacheObj *tscMetaCache; // table meta cache
|
||||
SHashObj *tscHashMap; // hash map to keep the global vgroup info
|
||||
int tscObjRef = -1;
|
||||
void * tscTmr;
|
||||
void * tscQhandle;
|
||||
void * tscCheckDiskUsageTmr;
|
||||
int tsInsertHeadSize;
|
||||
void *tscTmr;
|
||||
void *tscQhandle;
|
||||
void *tscCheckDiskUsageTmr;
|
||||
int tscRefId = -1;
|
||||
|
||||
int tscNumOfThreads;
|
||||
int tscNumOfObj = 0; // number of sqlObj in current process.
|
||||
|
||||
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
|
||||
//void tscUpdateEpSet(void *ahandle, SRpcEpSet *pEpSet);
|
||||
|
||||
void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) {
|
||||
taosGetDisk();
|
||||
|
@ -114,7 +112,7 @@ void taos_init_imp(void) {
|
|||
int queueSize = tsMaxConnections*2;
|
||||
|
||||
double factor = (tscEmbedded == 0)? 2.0:4.0;
|
||||
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
|
||||
int32_t tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
|
||||
if (tscNumOfThreads < 2) {
|
||||
tscNumOfThreads = 2;
|
||||
}
|
||||
|
@ -133,7 +131,8 @@ void taos_init_imp(void) {
|
|||
int64_t refreshTime = 10; // 10 seconds by default
|
||||
if (tscMetaCache == NULL) {
|
||||
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, tscFreeTableMetaHelper, "tableMeta");
|
||||
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
|
||||
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
|
||||
tscHashMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||
}
|
||||
|
||||
tscRefId = taosOpenRef(200, tscCloseTscObj);
|
||||
|
|
|
@ -458,29 +458,19 @@ void tscFreeRegisteredSqlObj(void *pSql) {
|
|||
SSqlObj* p = *(SSqlObj**)pSql;
|
||||
STscObj* pTscObj = p->pTscObj;
|
||||
|
||||
assert(p->self != 0);
|
||||
assert(RID_VALID(p->self));
|
||||
|
||||
tscFreeSqlObj(p);
|
||||
taosReleaseRef(tscRefId, pTscObj->rid);
|
||||
|
||||
int32_t ref = T_REF_DEC(pTscObj);
|
||||
assert(ref >= 0);
|
||||
|
||||
tscDebug("%p free sqlObj completed, tscObj:%p ref:%d", p, pTscObj, ref);
|
||||
if (ref == 0) {
|
||||
tscDebug("%p all sqlObj freed, free tscObj:%p", p, pTscObj);
|
||||
taosRemoveRef(tscRefId, pTscObj->rid);
|
||||
}
|
||||
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1);
|
||||
int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1);
|
||||
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
|
||||
}
|
||||
|
||||
void tscFreeTableMetaHelper(void *pTableMeta) {
|
||||
STableMeta* p = (STableMeta*) pTableMeta;
|
||||
|
||||
int32_t numOfEps = p->vgroupInfo.numOfEps;
|
||||
assert(numOfEps >= 0 && numOfEps <= TSDB_MAX_REPLICA);
|
||||
|
||||
for(int32_t i = 0; i < numOfEps; ++i) {
|
||||
tfree(p->vgroupInfo.epAddr[i].fqdn);
|
||||
}
|
||||
|
||||
int32_t numOfEps1 = p->corVgroupInfo.numOfEps;
|
||||
assert(numOfEps1 >= 0 && numOfEps1 <= TSDB_MAX_REPLICA);
|
||||
|
||||
|
@ -810,6 +800,7 @@ static void extractTableMeta(SSqlCmd* pCmd) {
|
|||
}
|
||||
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
|
||||
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
|
@ -824,7 +815,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
|
|||
STableDataBlocks* dataBuf = NULL;
|
||||
|
||||
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
|
||||
tsInsertHeadSize, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
|
||||
INSERT_HEAD_SIZE, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
|
||||
taosHashCleanup(pVnodeDataBlockHashList);
|
||||
|
@ -1917,10 +1908,12 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
|
|||
}
|
||||
|
||||
void registerSqlObj(SSqlObj* pSql) {
|
||||
int32_t ref = T_REF_INC(pSql->pTscObj);
|
||||
tscDebug("%p add to tscObj:%p, ref:%d", pSql, pSql->pTscObj, ref);
|
||||
|
||||
taosAcquireRef(tscRefId, pSql->pTscObj->rid);
|
||||
pSql->self = taosAddRef(tscObjRef, pSql);
|
||||
|
||||
int32_t num = atomic_add_fetch_32(&pSql->pTscObj->numOfObj, 1);
|
||||
int32_t total = atomic_add_fetch_32(&tscNumOfObj, 1);
|
||||
tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total);
|
||||
}
|
||||
|
||||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) {
|
||||
|
@ -1950,30 +1943,24 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pNew->fp = fp;
|
||||
pNew->fp = fp;
|
||||
pNew->fetchFp = fp;
|
||||
pNew->param = param;
|
||||
pNew->param = param;
|
||||
pNew->sqlstr = NULL;
|
||||
pNew->maxRetry = TSDB_MAX_REPLICA;
|
||||
|
||||
pNew->sqlstr = strdup(pSql->sqlstr);
|
||||
if (pNew->sqlstr == NULL) {
|
||||
tscError("%p new subquery failed", pSql);
|
||||
tscFreeSqlObj(pNew);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, 0);
|
||||
|
||||
assert(pSql->cmd.clauseIndex == 0);
|
||||
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
|
||||
|
||||
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
|
||||
|
||||
registerSqlObj(pNew);
|
||||
|
||||
return pNew;
|
||||
}
|
||||
|
||||
static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* pNewQueryInfo, int64_t uid) {
|
||||
static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t uid) {
|
||||
int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo);
|
||||
if (numOfOutput == 0) {
|
||||
return;
|
||||
|
@ -2026,15 +2013,9 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, tableIndex);
|
||||
|
||||
pNew->pTscObj = pSql->pTscObj;
|
||||
pNew->pTscObj = pSql->pTscObj;
|
||||
pNew->signature = pNew;
|
||||
|
||||
pNew->sqlstr = strdup(pSql->sqlstr);
|
||||
if (pNew->sqlstr == NULL) {
|
||||
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
pNew->sqlstr = NULL;
|
||||
|
||||
SSqlCmd* pnCmd = &pNew->cmd;
|
||||
memcpy(pnCmd, pCmd, sizeof(SSqlCmd));
|
||||
|
@ -2122,23 +2103,22 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
goto _error;
|
||||
}
|
||||
|
||||
doSetSqlExprAndResultFieldInfo(pQueryInfo, pNewQueryInfo, uid);
|
||||
doSetSqlExprAndResultFieldInfo(pNewQueryInfo, uid);
|
||||
|
||||
pNew->fp = fp;
|
||||
pNew->fp = fp;
|
||||
pNew->fetchFp = fp;
|
||||
|
||||
pNew->param = param;
|
||||
pNew->param = param;
|
||||
pNew->maxRetry = TSDB_MAX_REPLICA;
|
||||
|
||||
char* name = pTableMetaInfo->name;
|
||||
STableMetaInfo* pFinalInfo = NULL;
|
||||
|
||||
if (pPrevSql == NULL) {
|
||||
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
|
||||
if (pPrevSql == NULL) { // get by name may failed due to the cache cleanup
|
||||
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta);
|
||||
assert(pTableMeta != NULL);
|
||||
|
||||
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
|
||||
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
|
||||
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
|
||||
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
||||
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
||||
|
||||
|
@ -2629,4 +2609,4 @@ int32_t copyTagData(STagData* dst, const STagData* src) {
|
|||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@ extern uint16_t tsSyncPort;
|
|||
extern uint16_t tsArbitratorPort;
|
||||
extern int32_t tsStatusInterval;
|
||||
extern int32_t tsNumOfMnodes;
|
||||
extern int32_t tsEnableVnodeBak;
|
||||
extern int32_t tsEnableTelemetryReporting;
|
||||
extern int8_t tsEnableVnodeBak;
|
||||
extern int8_t tsEnableTelemetryReporting;
|
||||
extern char tsEmail[];
|
||||
extern char tsArbitrator[];
|
||||
|
||||
|
@ -51,7 +51,7 @@ extern int8_t tsDaylight;
|
|||
extern char tsTimezone[];
|
||||
extern char tsLocale[];
|
||||
extern char tsCharset[]; // default encode string
|
||||
extern int32_t tsEnableCoreFile;
|
||||
extern int8_t tsEnableCoreFile;
|
||||
extern int32_t tsCompressMsgSize;
|
||||
extern char tsTempDir[];
|
||||
|
||||
|
@ -59,12 +59,12 @@ extern char tsTempDir[];
|
|||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
|
||||
extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked
|
||||
|
||||
extern int32_t tsKeepOriginalColumnName;
|
||||
extern int8_t tsKeepOriginalColumnName;
|
||||
|
||||
// client
|
||||
extern int32_t tsTableMetaKeepTimer;
|
||||
extern int32_t tsMaxSQLStringLen;
|
||||
extern int32_t tsTscEnableRecordSql;
|
||||
extern int8_t tsTscEnableRecordSql;
|
||||
extern int32_t tsMaxNumOfOrderedResults;
|
||||
extern int32_t tsMinSlidingTime;
|
||||
extern int32_t tsMinIntervalTime;
|
||||
|
@ -93,48 +93,51 @@ extern int16_t tsWAL;
|
|||
extern int32_t tsFsyncPeriod;
|
||||
extern int32_t tsReplications;
|
||||
extern int32_t tsQuorum;
|
||||
extern int32_t tsUpdate;
|
||||
extern int8_t tsUpdate;
|
||||
extern int8_t tsCacheLastRow;
|
||||
|
||||
// balance
|
||||
extern int32_t tsEnableBalance;
|
||||
extern int32_t tsAlternativeRole;
|
||||
extern int8_t tsEnableBalance;
|
||||
extern int8_t tsAlternativeRole;
|
||||
extern int32_t tsBalanceInterval;
|
||||
extern int32_t tsOfflineThreshold;
|
||||
extern int32_t tsMnodeEqualVnodeNum;
|
||||
extern int32_t tsFlowCtrl;
|
||||
extern int8_t tsEnableFlowCtrl;
|
||||
extern int8_t tsEnableSlaveQuery;
|
||||
extern int8_t tsEnableAdjustMaster;
|
||||
|
||||
// restful
|
||||
extern int32_t tsEnableHttpModule;
|
||||
extern int8_t tsEnableHttpModule;
|
||||
extern int32_t tsRestRowLimit;
|
||||
extern uint16_t tsHttpPort;
|
||||
extern int32_t tsHttpCacheSessions;
|
||||
extern int32_t tsHttpSessionExpire;
|
||||
extern int32_t tsHttpMaxThreads;
|
||||
extern int32_t tsHttpEnableCompress;
|
||||
extern int32_t tsHttpEnableRecordSql;
|
||||
extern int32_t tsTelegrafUseFieldNum;
|
||||
extern int8_t tsHttpEnableCompress;
|
||||
extern int8_t tsHttpEnableRecordSql;
|
||||
extern int8_t tsTelegrafUseFieldNum;
|
||||
|
||||
// mqtt
|
||||
extern int32_t tsEnableMqttModule;
|
||||
extern char tsMqttHostName[];
|
||||
extern char tsMqttPort[];
|
||||
extern char tsMqttUser[];
|
||||
extern char tsMqttPass[];
|
||||
extern char tsMqttClientId[];
|
||||
extern char tsMqttTopic[];
|
||||
extern int8_t tsEnableMqttModule;
|
||||
extern char tsMqttHostName[];
|
||||
extern char tsMqttPort[];
|
||||
extern char tsMqttUser[];
|
||||
extern char tsMqttPass[];
|
||||
extern char tsMqttClientId[];
|
||||
extern char tsMqttTopic[];
|
||||
|
||||
// monitor
|
||||
extern int32_t tsEnableMonitorModule;
|
||||
extern int8_t tsEnableMonitorModule;
|
||||
extern char tsMonitorDbName[];
|
||||
extern char tsInternalPass[];
|
||||
extern int32_t tsMonitorInterval;
|
||||
|
||||
// stream
|
||||
extern int32_t tsEnableStream;
|
||||
extern int8_t tsEnableStream;
|
||||
|
||||
// internal
|
||||
extern int32_t tsPrintAuth;
|
||||
extern uint32_t tscEmbedded;
|
||||
extern int8_t tsPrintAuth;
|
||||
extern int8_t tscEmbedded;
|
||||
extern char configDir[];
|
||||
extern char tsVnodeDir[];
|
||||
extern char tsDnodeDir[];
|
||||
|
@ -170,13 +173,13 @@ extern char gitinfoOfInternal[];
|
|||
extern char buildinfo[];
|
||||
|
||||
// log
|
||||
extern int32_t tsAsyncLog;
|
||||
extern int8_t tsAsyncLog;
|
||||
extern int32_t tsNumOfLogLines;
|
||||
extern int32_t tsLogKeepDays;
|
||||
extern int32_t dDebugFlag;
|
||||
extern int32_t vDebugFlag;
|
||||
extern int32_t mDebugFlag;
|
||||
extern uint32_t cDebugFlag;
|
||||
extern int32_t cDebugFlag;
|
||||
extern int32_t jniDebugFlag;
|
||||
extern int32_t tmrDebugFlag;
|
||||
extern int32_t sdbDebugFlag;
|
||||
|
@ -186,7 +189,7 @@ extern int32_t monDebugFlag;
|
|||
extern int32_t uDebugFlag;
|
||||
extern int32_t rpcDebugFlag;
|
||||
extern int32_t odbcDebugFlag;
|
||||
extern uint32_t qDebugFlag;
|
||||
extern int32_t qDebugFlag;
|
||||
extern int32_t wDebugFlag;
|
||||
extern int32_t cqDebugFlag;
|
||||
extern int32_t debugFlag;
|
||||
|
|
|
@ -23,7 +23,7 @@ extern "C" {
|
|||
#include "tlog.h"
|
||||
|
||||
extern int32_t uDebugFlag;
|
||||
extern uint32_t tscEmbedded;
|
||||
extern int8_t tscEmbedded;
|
||||
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
|
|
|
@ -39,8 +39,8 @@ uint16_t tsSyncPort = 6040;
|
|||
uint16_t tsArbitratorPort = 6042;
|
||||
int32_t tsStatusInterval = 1; // second
|
||||
int32_t tsNumOfMnodes = 3;
|
||||
int32_t tsEnableVnodeBak = 1;
|
||||
int32_t tsEnableTelemetryReporting = 1;
|
||||
int8_t tsEnableVnodeBak = 1;
|
||||
int8_t tsEnableTelemetryReporting = 1;
|
||||
char tsEmail[TSDB_FQDN_LEN] = {0};
|
||||
|
||||
// common
|
||||
|
@ -56,7 +56,7 @@ int8_t tsDaylight = 0;
|
|||
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
|
||||
char tsLocale[TSDB_LOCALE_LEN] = {0};
|
||||
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
|
||||
int32_t tsEnableCoreFile = 0;
|
||||
int8_t tsEnableCoreFile = 0;
|
||||
int32_t tsMaxBinaryDisplayWidth = 30;
|
||||
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
|
||||
|
||||
|
@ -73,7 +73,7 @@ int32_t tsCompressMsgSize = -1;
|
|||
// client
|
||||
int32_t tsTableMetaKeepTimer = 7200; // second
|
||||
int32_t tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
|
||||
int32_t tsTscEnableRecordSql = 0;
|
||||
int8_t tsTscEnableRecordSql = 0;
|
||||
|
||||
// the maximum number of results for projection query on super table that are returned from
|
||||
// one virtual node, to order according to timestamp
|
||||
|
@ -110,7 +110,7 @@ int32_t tsQueryBufferSize = -1;
|
|||
int32_t tsRetrieveBlockingModel = 0;
|
||||
|
||||
// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
|
||||
int32_t tsKeepOriginalColumnName = 0;
|
||||
int8_t tsKeepOriginalColumnName = 0;
|
||||
|
||||
// db parameters
|
||||
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
|
||||
|
@ -126,33 +126,36 @@ int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
|
|||
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
|
||||
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
|
||||
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
|
||||
int32_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
|
||||
int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
|
||||
int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
|
||||
int32_t tsMaxVgroupsPerDb = 0;
|
||||
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
|
||||
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
|
||||
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
|
||||
|
||||
// balance
|
||||
int32_t tsEnableBalance = 1;
|
||||
int32_t tsAlternativeRole = 0;
|
||||
int32_t tsBalanceInterval = 300; // seconds
|
||||
int32_t tsOfflineThreshold = 86400*100; // seconds 10days
|
||||
int8_t tsEnableBalance = 1;
|
||||
int8_t tsAlternativeRole = 0;
|
||||
int32_t tsBalanceInterval = 300; // seconds
|
||||
int32_t tsOfflineThreshold = 86400 * 100; // seconds 10days
|
||||
int32_t tsMnodeEqualVnodeNum = 4;
|
||||
int32_t tsFlowCtrl = 1;
|
||||
int8_t tsEnableFlowCtrl = 1;
|
||||
int8_t tsEnableSlaveQuery = 1;
|
||||
int8_t tsEnableAdjustMaster = 1;
|
||||
|
||||
// restful
|
||||
int32_t tsEnableHttpModule = 1;
|
||||
int8_t tsEnableHttpModule = 1;
|
||||
int32_t tsRestRowLimit = 10240;
|
||||
uint16_t tsHttpPort = 6041; // only tcp, range tcp[6041]
|
||||
int32_t tsHttpCacheSessions = 1000;
|
||||
int32_t tsHttpSessionExpire = 36000;
|
||||
int32_t tsHttpMaxThreads = 2;
|
||||
int32_t tsHttpEnableCompress = 1;
|
||||
int32_t tsHttpEnableRecordSql = 0;
|
||||
int32_t tsTelegrafUseFieldNum = 0;
|
||||
int8_t tsHttpEnableCompress = 1;
|
||||
int8_t tsHttpEnableRecordSql = 0;
|
||||
int8_t tsTelegrafUseFieldNum = 0;
|
||||
|
||||
// mqtt
|
||||
int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default
|
||||
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
|
||||
char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
|
||||
char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
|
||||
char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
|
||||
|
@ -161,23 +164,24 @@ char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
|
|||
char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
|
||||
|
||||
// monitor
|
||||
int32_t tsEnableMonitorModule = 1;
|
||||
int8_t tsEnableMonitorModule = 1;
|
||||
char tsMonitorDbName[TSDB_DB_NAME_LEN] = "log";
|
||||
char tsInternalPass[] = "secretkey";
|
||||
int32_t tsMonitorInterval = 30; // seconds
|
||||
|
||||
// stream
|
||||
int32_t tsEnableStream = 1;
|
||||
int8_t tsEnableStream = 1;
|
||||
|
||||
// internal
|
||||
int32_t tsPrintAuth = 0;
|
||||
uint32_t tscEmbedded = 0;
|
||||
char configDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsDataDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsScriptDir[TSDB_FILENAME_LEN] = {0};
|
||||
int8_t tsPrintAuth = 0;
|
||||
int8_t tscEmbedded = 0;
|
||||
char configDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsDataDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsScriptDir[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
int32_t tsDiskCfgNum = 0;
|
||||
|
||||
#ifndef _STORAGE
|
||||
|
@ -204,8 +208,8 @@ float tsTotalTmpDirGB = 0;
|
|||
float tsTotalDataDirGB = 0;
|
||||
float tsAvailTmpDirectorySpace = 0;
|
||||
float tsAvailDataDirGB = 0;
|
||||
float tsReservedTmpDirectorySpace = 0.1f;
|
||||
float tsMinimalDataDirGB = 0.5f;
|
||||
float tsReservedTmpDirectorySpace = 1.0f;
|
||||
float tsMinimalDataDirGB = 1.0f;
|
||||
int32_t tsTotalMemoryMB = 0;
|
||||
int32_t tsVersion = 0;
|
||||
|
||||
|
@ -215,13 +219,13 @@ int32_t mDebugFlag = 131;
|
|||
int32_t sdbDebugFlag = 131;
|
||||
int32_t dDebugFlag = 135;
|
||||
int32_t vDebugFlag = 135;
|
||||
uint32_t cDebugFlag = 131;
|
||||
int32_t cDebugFlag = 131;
|
||||
int32_t jniDebugFlag = 131;
|
||||
int32_t odbcDebugFlag = 131;
|
||||
int32_t httpDebugFlag = 131;
|
||||
int32_t mqttDebugFlag = 131;
|
||||
int32_t monDebugFlag = 131;
|
||||
uint32_t qDebugFlag = 131;
|
||||
int32_t qDebugFlag = 131;
|
||||
int32_t rpcDebugFlag = 131;
|
||||
int32_t uDebugFlag = 131;
|
||||
int32_t debugFlag = 0;
|
||||
|
@ -282,12 +286,16 @@ bool taosCfgDynamicOptions(char *msg) {
|
|||
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
|
||||
SGlobalCfg *cfg = tsGlobalConfig + i;
|
||||
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
|
||||
if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue;
|
||||
if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue;
|
||||
|
||||
int32_t cfgLen = (int32_t)strlen(cfg->option);
|
||||
if (cfgLen != olen) continue;
|
||||
if (strncasecmp(option, cfg->option, olen) != 0) continue;
|
||||
*((int32_t *)cfg->ptr) = vint;
|
||||
if (cfg->valType != TAOS_CFG_VTYPE_INT32) {
|
||||
*((int32_t *)cfg->ptr) = vint;
|
||||
} else {
|
||||
*((int8_t *)cfg->ptr) = (int8_t)vint;
|
||||
}
|
||||
|
||||
if (strncasecmp(cfg->option, "monitor", olen) == 0) {
|
||||
if (1 == vint) {
|
||||
|
@ -508,7 +516,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "vnodeBak";
|
||||
cfg.ptr = &tsEnableVnodeBak;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -518,7 +526,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "telemetryReporting";
|
||||
cfg.ptr = &tsEnableTelemetryReporting;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -528,7 +536,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "balance";
|
||||
cfg.ptr = &tsEnableBalance;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -549,7 +557,7 @@ static void doInitGlobalConfig(void) {
|
|||
// 0-any; 1-mnode; 2-vnode
|
||||
cfg.option = "role";
|
||||
cfg.ptr = &tsAlternativeRole;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 2;
|
||||
|
@ -582,7 +590,7 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.ptr = &tsOfflineThreshold;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 5;
|
||||
cfg.minValue = 3;
|
||||
cfg.maxValue = 7200000;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
|
||||
|
@ -851,7 +859,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "update";
|
||||
cfg.ptr = &tsUpdate;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = TSDB_MIN_DB_UPDATE;
|
||||
cfg.maxValue = TSDB_MAX_DB_UPDATE;
|
||||
|
@ -941,7 +949,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "keepColumnName";
|
||||
cfg.ptr = &tsKeepOriginalColumnName;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1044,8 +1052,28 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
// module configs
|
||||
cfg.option = "flowctrl";
|
||||
cfg.ptr = &tsFlowCtrl;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.ptr = &tsEnableFlowCtrl;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "slaveQuery";
|
||||
cfg.ptr = &tsEnableSlaveQuery;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "adjustMaster";
|
||||
cfg.ptr = &tsEnableAdjustMaster;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1055,7 +1083,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "http";
|
||||
cfg.ptr = &tsEnableHttpModule;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1065,7 +1093,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "mqtt";
|
||||
cfg.ptr = &tsEnableMqttModule;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1075,7 +1103,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "monitor";
|
||||
cfg.ptr = &tsEnableMonitorModule;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1085,7 +1113,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "stream";
|
||||
cfg.ptr = &tsEnableStream;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1095,7 +1123,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "httpEnableRecordSql";
|
||||
cfg.ptr = &tsHttpEnableRecordSql;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1105,7 +1133,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "telegrafUseFieldNum";
|
||||
cfg.ptr = &tsTelegrafUseFieldNum;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1156,7 +1184,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "asyncLog";
|
||||
cfg.ptr = &tsAsyncLog;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT16;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1357,7 +1385,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "enableRecordSql";
|
||||
cfg.ptr = &tsTscEnableRecordSql;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1367,7 +1395,7 @@ static void doInitGlobalConfig(void) {
|
|||
|
||||
cfg.option = "enableCoreFile";
|
||||
cfg.ptr = &tsEnableCoreFile;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
|
@ -1484,6 +1512,12 @@ int32_t taosCheckGlobalCfg() {
|
|||
tsNumOfCores = 1;
|
||||
}
|
||||
|
||||
if (tsMaxTablePerVnode < tsMinTablePerVnode) {
|
||||
uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)",
|
||||
tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode);
|
||||
tsMaxTablePerVnode = tsMinTablePerVnode;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
tsVersion = 0;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
|
|
@ -349,11 +349,12 @@ CTaosInterface.prototype.useResult = function useResult(result) {
|
|||
return fields;
|
||||
}
|
||||
CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
|
||||
let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
|
||||
let num_of_rows = this.libtaos.taos_fetch_block(result, pblock)
|
||||
if (num_of_rows == 0) {
|
||||
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
|
||||
let pblock = this.libtaos.taos_fetch_row(result);
|
||||
if (pblock == null) {
|
||||
return {block:null, num_of_rows:0};
|
||||
}
|
||||
|
||||
var fieldL = this.libtaos.taos_fetch_lengths(result);
|
||||
|
||||
let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
|
||||
|
@ -361,7 +362,6 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
|
|||
var fieldlens = [];
|
||||
|
||||
if (ref.isNull(fieldL) == false) {
|
||||
|
||||
for (let i = 0; i < fields.length; i ++) {
|
||||
let plen = ref.reinterpret(fieldL, 4, i*4);
|
||||
let len = plen.readInt32LE(0);
|
||||
|
|
|
@ -25,6 +25,7 @@ int32_t dnodeInitCfg();
|
|||
void dnodeCleanupCfg();
|
||||
void dnodeUpdateCfg(SDnodeCfg *cfg);
|
||||
int32_t dnodeGetDnodeId();
|
||||
void dnodeGetClusterId(char *clusterId);
|
||||
void dnodeGetCfg(int32_t *dnodeId, char *clusterId);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -51,6 +51,12 @@ int32_t dnodeGetDnodeId() {
|
|||
return dnodeId;
|
||||
}
|
||||
|
||||
void dnodeGetClusterId(char *clusterId) {
|
||||
pthread_mutex_lock(&tsCfgMutex);
|
||||
tstrncpy(clusterId, tsCfg.clusterId, TSDB_CLUSTER_ID_LEN);
|
||||
pthread_mutex_unlock(&tsCfgMutex);
|
||||
}
|
||||
|
||||
void dnodeGetCfg(int32_t *dnodeId, char *clusterId) {
|
||||
pthread_mutex_lock(&tsCfgMutex);
|
||||
*dnodeId = tsCfg.dnodeId;
|
||||
|
|
|
@ -16,9 +16,7 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "tqueue.h"
|
||||
#include "twal.h"
|
||||
#include "mnode.h"
|
||||
#include "dnodeVMgmt.h"
|
||||
#include "dnodeMInfos.h"
|
||||
#include "dnodeMRead.h"
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include "ttimer.h"
|
||||
#include "tqueue.h"
|
||||
#include "mnode.h"
|
||||
#include "dnodeVMgmt.h"
|
||||
#include "dnodeMInfos.h"
|
||||
#include "dnodeMWrite.h"
|
||||
|
||||
|
|
|
@ -113,6 +113,7 @@ static void dnodeCleanupTmr() {
|
|||
int32_t dnodeInitSystem() {
|
||||
dnodeSetRunStatus(TSDB_RUN_STATUS_INITIALIZE);
|
||||
tscEmbedded = 1;
|
||||
taosIgnSIGPIPE();
|
||||
taosBlockSIGPIPE();
|
||||
taosResolveCRC();
|
||||
taosInitGlobalCfg();
|
||||
|
@ -120,7 +121,6 @@ int32_t dnodeInitSystem() {
|
|||
taosSetCoreDump();
|
||||
taosInitNotes();
|
||||
dnodeInitTmr();
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
if (dnodeCreateDir(tsLogDir) < 0) {
|
||||
printf("failed to create dir: %s, reason: %s\n", tsLogDir, strerror(errno));
|
||||
|
|
|
@ -195,7 +195,7 @@ static void addRuntimeInfo(SBufferWriter* bw) {
|
|||
|
||||
static void sendTelemetryReport() {
|
||||
char buf[128];
|
||||
uint32_t ip = taosGetIpFromFqdn(TELEMETRY_SERVER);
|
||||
uint32_t ip = taosGetIpv4FromFqdn(TELEMETRY_SERVER);
|
||||
if (ip == 0xffffffff) {
|
||||
dTrace("failed to get IP address of " TELEMETRY_SERVER ", reason:%s", strerror(errno));
|
||||
return;
|
||||
|
|
|
@ -129,7 +129,8 @@ static void *dnodeProcessMgmtQueue(void *wparam) {
|
|||
static SCreateVnodeMsg* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
SCreateVnodeMsg *pCreate = rpcMsg->pCont;
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.cfgVersion = htonl(pCreate->cfg.cfgVersion);
|
||||
pCreate->cfg.dbCfgVersion = htonl(pCreate->cfg.dbCfgVersion);
|
||||
pCreate->cfg.vgCfgVersion = htonl(pCreate->cfg.vgCfgVersion);
|
||||
pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables);
|
||||
pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize);
|
||||
pCreate->cfg.totalBlocks = htonl(pCreate->cfg.totalBlocks);
|
||||
|
|
|
@ -54,6 +54,7 @@ void dnodeCleanupVRead() {
|
|||
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
|
||||
int32_t queuedMsgNum = 0;
|
||||
int32_t leftLen = pMsg->contLen;
|
||||
int32_t code = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
char * pCont = pMsg->pCont;
|
||||
|
||||
while (leftLen > 0) {
|
||||
|
@ -64,7 +65,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
|
|||
assert(pHead->contLen > 0);
|
||||
void *pVnode = vnodeAcquire(pHead->vgId);
|
||||
if (pVnode != NULL) {
|
||||
int32_t code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
|
||||
code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
|
||||
if (code == TSDB_CODE_SUCCESS) queuedMsgNum++;
|
||||
vnodeRelease(pVnode);
|
||||
}
|
||||
|
@ -74,7 +75,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
if (queuedMsgNum == 0) {
|
||||
SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = TSDB_CODE_VND_INVALID_VGROUP_ID};
|
||||
SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = code};
|
||||
rpcSendResponse(&rpcRsp);
|
||||
}
|
||||
|
||||
|
|
|
@ -188,6 +188,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
|
|||
int32_t numOfMsgs;
|
||||
int32_t qtype;
|
||||
|
||||
taosBlockSIGPIPE();
|
||||
dDebug("dnode vwrite worker:%d is running", pWorker->workerId);
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -245,12 +245,11 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
pStatus->lastReboot = htonl(tsRebootTime);
|
||||
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
|
||||
pStatus->diskAvailable = tsAvailDataDirGB;
|
||||
pStatus->alternativeRole = (uint8_t) tsAlternativeRole;
|
||||
pStatus->alternativeRole = tsAlternativeRole;
|
||||
tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN);
|
||||
|
||||
// fill cluster cfg parameters
|
||||
pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes);
|
||||
pStatus->clusterCfg.enableBalance = htonl(tsEnableBalance);
|
||||
pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum);
|
||||
pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold);
|
||||
pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
|
||||
|
@ -262,8 +261,13 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
char timestr[32] = "1970-01-01 00:00:00.00";
|
||||
(void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
||||
tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN);
|
||||
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
|
||||
|
||||
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
|
||||
|
||||
pStatus->clusterCfg.enableBalance = tsEnableBalance;
|
||||
pStatus->clusterCfg.flowCtrl = tsEnableFlowCtrl;
|
||||
pStatus->clusterCfg.slaveQuery = tsEnableSlaveQuery;
|
||||
pStatus->clusterCfg.adjustMaster = tsEnableAdjustMaster;
|
||||
|
||||
vnodeBuildStatusMsg(pStatus);
|
||||
contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad);
|
||||
pStatus->openVnodes = htons(pStatus->openVnodes);
|
||||
|
|
|
@ -36,6 +36,8 @@ bool dnodeIsMasterEp(char *ep);
|
|||
void dnodeGetEpSetForPeer(SRpcEpSet *epSet);
|
||||
void dnodeGetEpSetForShell(SRpcEpSet *epSet);
|
||||
int32_t dnodeGetDnodeId();
|
||||
void dnodeGetClusterId(char *clusterId);
|
||||
|
||||
void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
|
||||
bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr);
|
||||
bool dnodeStartMnode(SMInfos *pMinfos);
|
||||
|
|
|
@ -369,6 +369,10 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
|
|||
#define TSDB_MAX_DB_UPDATE 1
|
||||
#define TSDB_DEFAULT_DB_UPDATE_OPTION 0
|
||||
|
||||
#define TSDB_MIN_DB_CACHE_LAST_ROW 0
|
||||
#define TSDB_MAX_DB_CACHE_LAST_ROW 1
|
||||
#define TSDB_DEFAULT_CACHE_LAST_ROW 0
|
||||
|
||||
#define TSDB_MIN_FSYNC_PERIOD 0
|
||||
#define TSDB_MAX_FSYNC_PERIOD 180000 // millisecond
|
||||
#define TSDB_DEFAULT_FSYNC_PERIOD 3000 // three second
|
||||
|
|
|
@ -324,6 +324,7 @@ typedef struct {
|
|||
typedef struct {
|
||||
char acctId[TSDB_ACCT_LEN];
|
||||
char serverVersion[TSDB_VERSION_LEN];
|
||||
char clusterId[TSDB_CLUSTER_ID_LEN];
|
||||
int8_t writeAuth;
|
||||
int8_t superAuth;
|
||||
int8_t reserved1;
|
||||
|
@ -518,14 +519,15 @@ typedef struct SRetrieveTableRsp {
|
|||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int32_t cfgVersion;
|
||||
int32_t dbCfgVersion;
|
||||
int64_t totalStorage;
|
||||
int64_t compStorage;
|
||||
int64_t pointsWritten;
|
||||
uint8_t status;
|
||||
uint8_t role;
|
||||
uint8_t replica;
|
||||
uint8_t reserved[5];
|
||||
uint8_t reserved;
|
||||
int32_t vgCfgVersion;
|
||||
} SVnodeLoad;
|
||||
|
||||
typedef struct {
|
||||
|
@ -548,7 +550,8 @@ typedef struct {
|
|||
int8_t quorum;
|
||||
int8_t ignoreExist;
|
||||
int8_t update;
|
||||
int8_t reserve[9];
|
||||
int8_t cacheLastRow;
|
||||
int8_t reserve[8];
|
||||
} SCreateDbMsg, SAlterDbMsg;
|
||||
|
||||
typedef struct {
|
||||
|
@ -603,7 +606,6 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int32_t numOfMnodes; // tsNumOfMnodes
|
||||
int32_t enableBalance; // tsEnableBalance
|
||||
int32_t mnodeEqualVnodeNum; // tsMnodeEqualVnodeNum
|
||||
int32_t offlineThreshold; // tsOfflineThreshold
|
||||
int32_t statusInterval; // tsStatusInterval
|
||||
|
@ -614,6 +616,11 @@ typedef struct {
|
|||
int64_t checkTime; // 1970-01-01 00:00:00.000
|
||||
char locale[TSDB_LOCALE_LEN]; // tsLocale
|
||||
char charset[TSDB_LOCALE_LEN]; // tsCharset
|
||||
int8_t enableBalance; // tsEnableBalance
|
||||
int8_t flowCtrl;
|
||||
int8_t slaveQuery;
|
||||
int8_t adjustMaster;
|
||||
int8_t reserved[4];
|
||||
} SClusterCfg;
|
||||
|
||||
typedef struct {
|
||||
|
@ -641,7 +648,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
uint32_t vgId;
|
||||
int32_t cfgVersion;
|
||||
int32_t dbCfgVersion;
|
||||
int32_t maxTables;
|
||||
int32_t cacheBlockSize;
|
||||
int32_t totalBlocks;
|
||||
|
@ -660,7 +667,9 @@ typedef struct {
|
|||
int8_t wals;
|
||||
int8_t quorum;
|
||||
int8_t update;
|
||||
int8_t reserved[15];
|
||||
int8_t cacheLastRow;
|
||||
int32_t vgCfgVersion;
|
||||
int8_t reserved[10];
|
||||
} SVnodeCfg;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -66,6 +66,7 @@ typedef struct {
|
|||
int8_t precision;
|
||||
int8_t compression;
|
||||
int8_t update;
|
||||
int8_t cacheLastRow;
|
||||
} STsdbCfg;
|
||||
|
||||
// --------- TSDB REPOSITORY USAGE STATISTICS
|
||||
|
@ -119,7 +120,7 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg);
|
|||
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg);
|
||||
int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId);
|
||||
int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
|
||||
TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
|
||||
// TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
|
||||
|
||||
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size);
|
||||
|
||||
|
|
|
@ -114,114 +114,115 @@
|
|||
#define TK_COMP 96
|
||||
#define TK_PRECISION 97
|
||||
#define TK_UPDATE 98
|
||||
#define TK_LP 99
|
||||
#define TK_RP 100
|
||||
#define TK_TAGS 101
|
||||
#define TK_USING 102
|
||||
#define TK_AS 103
|
||||
#define TK_COMMA 104
|
||||
#define TK_NULL 105
|
||||
#define TK_SELECT 106
|
||||
#define TK_UNION 107
|
||||
#define TK_ALL 108
|
||||
#define TK_FROM 109
|
||||
#define TK_VARIABLE 110
|
||||
#define TK_INTERVAL 111
|
||||
#define TK_FILL 112
|
||||
#define TK_SLIDING 113
|
||||
#define TK_ORDER 114
|
||||
#define TK_BY 115
|
||||
#define TK_ASC 116
|
||||
#define TK_DESC 117
|
||||
#define TK_GROUP 118
|
||||
#define TK_HAVING 119
|
||||
#define TK_LIMIT 120
|
||||
#define TK_OFFSET 121
|
||||
#define TK_SLIMIT 122
|
||||
#define TK_SOFFSET 123
|
||||
#define TK_WHERE 124
|
||||
#define TK_NOW 125
|
||||
#define TK_RESET 126
|
||||
#define TK_QUERY 127
|
||||
#define TK_ADD 128
|
||||
#define TK_COLUMN 129
|
||||
#define TK_TAG 130
|
||||
#define TK_CHANGE 131
|
||||
#define TK_SET 132
|
||||
#define TK_KILL 133
|
||||
#define TK_CONNECTION 134
|
||||
#define TK_STREAM 135
|
||||
#define TK_COLON 136
|
||||
#define TK_ABORT 137
|
||||
#define TK_AFTER 138
|
||||
#define TK_ATTACH 139
|
||||
#define TK_BEFORE 140
|
||||
#define TK_BEGIN 141
|
||||
#define TK_CASCADE 142
|
||||
#define TK_CLUSTER 143
|
||||
#define TK_CONFLICT 144
|
||||
#define TK_COPY 145
|
||||
#define TK_DEFERRED 146
|
||||
#define TK_DELIMITERS 147
|
||||
#define TK_DETACH 148
|
||||
#define TK_EACH 149
|
||||
#define TK_END 150
|
||||
#define TK_EXPLAIN 151
|
||||
#define TK_FAIL 152
|
||||
#define TK_FOR 153
|
||||
#define TK_IGNORE 154
|
||||
#define TK_IMMEDIATE 155
|
||||
#define TK_INITIALLY 156
|
||||
#define TK_INSTEAD 157
|
||||
#define TK_MATCH 158
|
||||
#define TK_KEY 159
|
||||
#define TK_OF 160
|
||||
#define TK_RAISE 161
|
||||
#define TK_REPLACE 162
|
||||
#define TK_RESTRICT 163
|
||||
#define TK_ROW 164
|
||||
#define TK_STATEMENT 165
|
||||
#define TK_TRIGGER 166
|
||||
#define TK_VIEW 167
|
||||
#define TK_COUNT 168
|
||||
#define TK_SUM 169
|
||||
#define TK_AVG 170
|
||||
#define TK_MIN 171
|
||||
#define TK_MAX 172
|
||||
#define TK_FIRST 173
|
||||
#define TK_LAST 174
|
||||
#define TK_TOP 175
|
||||
#define TK_BOTTOM 176
|
||||
#define TK_STDDEV 177
|
||||
#define TK_PERCENTILE 178
|
||||
#define TK_APERCENTILE 179
|
||||
#define TK_LEASTSQUARES 180
|
||||
#define TK_HISTOGRAM 181
|
||||
#define TK_DIFF 182
|
||||
#define TK_SPREAD 183
|
||||
#define TK_TWA 184
|
||||
#define TK_INTERP 185
|
||||
#define TK_LAST_ROW 186
|
||||
#define TK_RATE 187
|
||||
#define TK_IRATE 188
|
||||
#define TK_SUM_RATE 189
|
||||
#define TK_SUM_IRATE 190
|
||||
#define TK_AVG_RATE 191
|
||||
#define TK_AVG_IRATE 192
|
||||
#define TK_TBID 193
|
||||
#define TK_SEMI 194
|
||||
#define TK_NONE 195
|
||||
#define TK_PREV 196
|
||||
#define TK_LINEAR 197
|
||||
#define TK_IMPORT 198
|
||||
#define TK_METRIC 199
|
||||
#define TK_TBNAME 200
|
||||
#define TK_JOIN 201
|
||||
#define TK_METRICS 202
|
||||
#define TK_STABLE 203
|
||||
#define TK_INSERT 204
|
||||
#define TK_INTO 205
|
||||
#define TK_VALUES 206
|
||||
#define TK_CACHELAST 99
|
||||
#define TK_LP 100
|
||||
#define TK_RP 101
|
||||
#define TK_TAGS 102
|
||||
#define TK_USING 103
|
||||
#define TK_AS 104
|
||||
#define TK_COMMA 105
|
||||
#define TK_NULL 106
|
||||
#define TK_SELECT 107
|
||||
#define TK_UNION 108
|
||||
#define TK_ALL 109
|
||||
#define TK_FROM 110
|
||||
#define TK_VARIABLE 111
|
||||
#define TK_INTERVAL 112
|
||||
#define TK_FILL 113
|
||||
#define TK_SLIDING 114
|
||||
#define TK_ORDER 115
|
||||
#define TK_BY 116
|
||||
#define TK_ASC 117
|
||||
#define TK_DESC 118
|
||||
#define TK_GROUP 119
|
||||
#define TK_HAVING 120
|
||||
#define TK_LIMIT 121
|
||||
#define TK_OFFSET 122
|
||||
#define TK_SLIMIT 123
|
||||
#define TK_SOFFSET 124
|
||||
#define TK_WHERE 125
|
||||
#define TK_NOW 126
|
||||
#define TK_RESET 127
|
||||
#define TK_QUERY 128
|
||||
#define TK_ADD 129
|
||||
#define TK_COLUMN 130
|
||||
#define TK_TAG 131
|
||||
#define TK_CHANGE 132
|
||||
#define TK_SET 133
|
||||
#define TK_KILL 134
|
||||
#define TK_CONNECTION 135
|
||||
#define TK_STREAM 136
|
||||
#define TK_COLON 137
|
||||
#define TK_ABORT 138
|
||||
#define TK_AFTER 139
|
||||
#define TK_ATTACH 140
|
||||
#define TK_BEFORE 141
|
||||
#define TK_BEGIN 142
|
||||
#define TK_CASCADE 143
|
||||
#define TK_CLUSTER 144
|
||||
#define TK_CONFLICT 145
|
||||
#define TK_COPY 146
|
||||
#define TK_DEFERRED 147
|
||||
#define TK_DELIMITERS 148
|
||||
#define TK_DETACH 149
|
||||
#define TK_EACH 150
|
||||
#define TK_END 151
|
||||
#define TK_EXPLAIN 152
|
||||
#define TK_FAIL 153
|
||||
#define TK_FOR 154
|
||||
#define TK_IGNORE 155
|
||||
#define TK_IMMEDIATE 156
|
||||
#define TK_INITIALLY 157
|
||||
#define TK_INSTEAD 158
|
||||
#define TK_MATCH 159
|
||||
#define TK_KEY 160
|
||||
#define TK_OF 161
|
||||
#define TK_RAISE 162
|
||||
#define TK_REPLACE 163
|
||||
#define TK_RESTRICT 164
|
||||
#define TK_ROW 165
|
||||
#define TK_STATEMENT 166
|
||||
#define TK_TRIGGER 167
|
||||
#define TK_VIEW 168
|
||||
#define TK_COUNT 169
|
||||
#define TK_SUM 170
|
||||
#define TK_AVG 171
|
||||
#define TK_MIN 172
|
||||
#define TK_MAX 173
|
||||
#define TK_FIRST 174
|
||||
#define TK_LAST 175
|
||||
#define TK_TOP 176
|
||||
#define TK_BOTTOM 177
|
||||
#define TK_STDDEV 178
|
||||
#define TK_PERCENTILE 179
|
||||
#define TK_APERCENTILE 180
|
||||
#define TK_LEASTSQUARES 181
|
||||
#define TK_HISTOGRAM 182
|
||||
#define TK_DIFF 183
|
||||
#define TK_SPREAD 184
|
||||
#define TK_TWA 185
|
||||
#define TK_INTERP 186
|
||||
#define TK_LAST_ROW 187
|
||||
#define TK_RATE 188
|
||||
#define TK_IRATE 189
|
||||
#define TK_SUM_RATE 190
|
||||
#define TK_SUM_IRATE 191
|
||||
#define TK_AVG_RATE 192
|
||||
#define TK_AVG_IRATE 193
|
||||
#define TK_TBID 194
|
||||
#define TK_SEMI 195
|
||||
#define TK_NONE 196
|
||||
#define TK_PREV 197
|
||||
#define TK_LINEAR 198
|
||||
#define TK_IMPORT 199
|
||||
#define TK_METRIC 200
|
||||
#define TK_TBNAME 201
|
||||
#define TK_JOIN 202
|
||||
#define TK_METRICS 203
|
||||
#define TK_STABLE 204
|
||||
#define TK_INSERT 205
|
||||
#define TK_INTO 206
|
||||
#define TK_VALUES 207
|
||||
|
||||
|
||||
#define TK_SPACE 300
|
||||
|
|
|
@ -28,7 +28,7 @@ extern "C" {
|
|||
default: \
|
||||
(_v) = (_finalType)GET_INT32_VAL(_data); \
|
||||
break; \
|
||||
};
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -302,14 +302,12 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
|
||||
st = taosGetTimestampUs();
|
||||
|
||||
TAOS_RES* tmpSql = NULL;
|
||||
TAOS_RES* pSql = taos_query_h(con, command, &tmpSql);
|
||||
TAOS_RES* pSql = taos_query_h(con, command, &result);
|
||||
if (taos_errno(pSql)) {
|
||||
taos_error(pSql, st);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_store_64(&result, ((SSqlObj*)tmpSql)->self);
|
||||
int64_t oresult = atomic_load_64(&result);
|
||||
|
||||
if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
|
||||
|
|
|
@ -95,7 +95,7 @@ typedef struct DemoArguments {
|
|||
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
|
||||
#endif
|
||||
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
|
||||
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
|
||||
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
|
||||
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
|
||||
{0, 's', "sql file", 0, "The select sql file.", 3},
|
||||
{0, 'M', 0, 0, "Use metric flag.", 13},
|
||||
|
@ -205,10 +205,10 @@ typedef struct DemoArguments {
|
|||
arguments->tb_prefix = arg;
|
||||
break;
|
||||
case 'M':
|
||||
arguments->use_metric = false;
|
||||
arguments->use_metric = true;
|
||||
break;
|
||||
case 'x':
|
||||
arguments->insert_only = false;
|
||||
arguments->insert_only = true;
|
||||
break;
|
||||
case 'c':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
|
@ -406,9 +406,9 @@ typedef struct DemoArguments {
|
|||
} else if (strcmp(argv[i], "-m") == 0) {
|
||||
arguments->tb_prefix = argv[++i];
|
||||
} else if (strcmp(argv[i], "-M") == 0) {
|
||||
arguments->use_metric = false;
|
||||
arguments->use_metric = true;
|
||||
} else if (strcmp(argv[i], "-x") == 0) {
|
||||
arguments->insert_only = false;
|
||||
arguments->insert_only = true;
|
||||
} else if (strcmp(argv[i], "-c") == 0) {
|
||||
strcpy(configDir, argv[++i]);
|
||||
} else if (strcmp(argv[i], "-O") == 0) {
|
||||
|
@ -476,6 +476,14 @@ typedef struct {
|
|||
int notFinished;
|
||||
tsem_t lock_sem;
|
||||
int counter;
|
||||
|
||||
// insert delay statitics
|
||||
int64_t cntDelay;
|
||||
int64_t totalDelay;
|
||||
int64_t avgDelay;
|
||||
int64_t maxDelay;
|
||||
int64_t minDelay;
|
||||
|
||||
} info;
|
||||
|
||||
typedef struct {
|
||||
|
@ -575,7 +583,7 @@ int main(int argc, char *argv[]) {
|
|||
arguments.num_of_DPT = 100000;
|
||||
arguments.num_of_RPR = 1000;
|
||||
arguments.use_metric = true;
|
||||
arguments.insert_only = true;
|
||||
arguments.insert_only = false;
|
||||
// end change
|
||||
|
||||
parse_args(argc, argv, &arguments);
|
||||
|
@ -739,6 +747,9 @@ int main(int argc, char *argv[]) {
|
|||
printf("Inserting data......\n");
|
||||
pthread_t *pids = malloc(threads * sizeof(pthread_t));
|
||||
info *infos = malloc(threads * sizeof(info));
|
||||
|
||||
memset(pids, 0, threads * sizeof(pthread_t));
|
||||
memset(infos, 0, threads * sizeof(info));
|
||||
|
||||
int a = ntables / threads;
|
||||
if (a < 1) {
|
||||
|
@ -768,6 +779,7 @@ int main(int argc, char *argv[]) {
|
|||
t_info->end_table_id = i < b ? last + a : last + a - 1;
|
||||
last = t_info->end_table_id + 1;
|
||||
t_info->counter = 0;
|
||||
t_info->minDelay = INT16_MAX;
|
||||
|
||||
tsem_init(&(t_info->mutex_sem), 0, 1);
|
||||
t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1;
|
||||
|
@ -799,12 +811,29 @@ int main(int argc, char *argv[]) {
|
|||
t, (int64_t)ntables * nrecords_per_table, nrecords_per_request,
|
||||
(int64_t)ntables * nrecords_per_table / t);
|
||||
|
||||
int64_t totalDelay = 0;
|
||||
int64_t maxDelay = 0;
|
||||
int64_t minDelay = INT16_MAX;
|
||||
int64_t cntDelay = 0;
|
||||
double avgDelay = 0;
|
||||
for (int i = 0; i < threads; i++) {
|
||||
info *t_info = infos + i;
|
||||
taos_close(t_info->taos);
|
||||
tsem_destroy(&(t_info->mutex_sem));
|
||||
tsem_destroy(&(t_info->lock_sem));
|
||||
|
||||
totalDelay += t_info->totalDelay;
|
||||
cntDelay += t_info->cntDelay;
|
||||
if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay;
|
||||
if (t_info->minDelay < minDelay) minDelay = t_info->minDelay;
|
||||
}
|
||||
avgDelay = (double)totalDelay / cntDelay;
|
||||
|
||||
fprintf(fp, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n",
|
||||
avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
|
||||
|
||||
printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n",
|
||||
avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
|
||||
|
||||
free(pids);
|
||||
free(infos);
|
||||
|
@ -859,7 +888,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
|
||||
if (!insert_only) {
|
||||
if (false == insert_only) {
|
||||
// query data
|
||||
pthread_t read_id;
|
||||
info *rInfo = malloc(sizeof(info));
|
||||
|
@ -998,7 +1027,7 @@ void * createTable(void *sarg)
|
|||
/* Create all the tables; */
|
||||
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
|
||||
for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
|
||||
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
|
||||
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s);", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
|
||||
queryDB(winfo->taos, command);
|
||||
}
|
||||
} else {
|
||||
|
@ -1204,6 +1233,41 @@ void *readMetric(void *sarg) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int queryDbExec(TAOS *taos, char *command, int type) {
|
||||
int i;
|
||||
TAOS_RES *res = NULL;
|
||||
int32_t code = -1;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
if (NULL != res) {
|
||||
taos_free_result(res);
|
||||
res = NULL;
|
||||
}
|
||||
|
||||
res = taos_query(taos, command);
|
||||
code = taos_errno(res);
|
||||
if (0 == code) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res));
|
||||
taos_free_result(res);
|
||||
//taos_close(taos);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (1 == type) {
|
||||
int affectedRows = taos_affected_rows(res);
|
||||
taos_free_result(res);
|
||||
return affectedRows;
|
||||
}
|
||||
|
||||
taos_free_result(res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void queryDB(TAOS *taos, char *command) {
|
||||
int i;
|
||||
TAOS_RES *pSql = NULL;
|
||||
|
@ -1273,7 +1337,21 @@ void *syncWrite(void *sarg) {
|
|||
}
|
||||
|
||||
/* puts(buffer); */
|
||||
queryDB(winfo->taos, buffer);
|
||||
int64_t startTs;
|
||||
int64_t endTs;
|
||||
startTs = taosGetTimestampUs();
|
||||
//queryDB(winfo->taos, buffer);
|
||||
int affectedRows = queryDbExec(winfo->taos, buffer, 1);
|
||||
|
||||
if (0 <= affectedRows){
|
||||
endTs = taosGetTimestampUs();
|
||||
int64_t delay = endTs - startTs;
|
||||
if (delay > winfo->maxDelay) winfo->maxDelay = delay;
|
||||
if (delay < winfo->minDelay) winfo->minDelay = delay;
|
||||
winfo->cntDelay++;
|
||||
winfo->totalDelay += delay;
|
||||
//winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay;
|
||||
}
|
||||
|
||||
if (tID == winfo->end_table_id) {
|
||||
i = inserted;
|
||||
|
|
|
@ -332,6 +332,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
break;
|
||||
case 'N':
|
||||
arguments->data_batch = atoi(arg);
|
||||
if (arguments->data_batch >= INT16_MAX) {
|
||||
arguments->data_batch = INT16_MAX - 1;
|
||||
}
|
||||
break;
|
||||
case 'L':
|
||||
{
|
||||
|
|
|
@ -144,7 +144,8 @@ typedef struct SVgObj {
|
|||
int8_t status;
|
||||
int8_t reserved0[4];
|
||||
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
|
||||
int8_t reserved1[12];
|
||||
int32_t vgCfgVersion;
|
||||
int8_t reserved1[8];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
int32_t numOfTables;
|
||||
|
@ -173,7 +174,8 @@ typedef struct {
|
|||
int8_t replications;
|
||||
int8_t quorum;
|
||||
int8_t update;
|
||||
int8_t reserved[11];
|
||||
int8_t cacheLastRow;
|
||||
int8_t reserved[10];
|
||||
} SDbCfg;
|
||||
|
||||
typedef struct SDbObj {
|
||||
|
@ -181,7 +183,7 @@ typedef struct SDbObj {
|
|||
int8_t reserved0[4];
|
||||
char acct[TSDB_USER_LEN];
|
||||
int64_t createdTime;
|
||||
int32_t cfgVersion;
|
||||
int32_t dbCfgVersion;
|
||||
SDbCfg cfg;
|
||||
int8_t status;
|
||||
int8_t reserved1[11];
|
||||
|
|
|
@ -52,15 +52,18 @@ typedef enum EDnodeOfflineReason {
|
|||
TAOS_DN_OFF_TIME_ZONE_NOT_MATCH,
|
||||
TAOS_DN_OFF_LOCALE_NOT_MATCH,
|
||||
TAOS_DN_OFF_CHARSET_NOT_MATCH,
|
||||
TAOS_DN_OFF_FLOW_CTRL_NOT_MATCH,
|
||||
TAOS_DN_OFF_SLAVE_QUERY_NOT_MATCH,
|
||||
TAOS_DN_OFF_ADJUST_MASTER_NOT_MATCH,
|
||||
TAOS_DN_OFF_OTHERS
|
||||
} EDnodeOfflineReason;
|
||||
|
||||
extern char* dnodeStatus[];
|
||||
extern char* dnodeRoles[];
|
||||
|
||||
int32_t mnodeInitDnodes();
|
||||
void mnodeCleanupDnodes();
|
||||
|
||||
char* mnodeGetDnodeStatusStr(int32_t dnodeStatus);
|
||||
void mgmtMonitorDnodeModule();
|
||||
|
||||
int32_t mnodeGetDnodesNum();
|
||||
int32_t mnodeGetOnlinDnodesCpuCoreNum();
|
||||
int32_t mnodeGetOnlineDnodesNum();
|
||||
|
|
|
@ -322,6 +322,11 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
|
|||
return TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||
}
|
||||
|
||||
if (pCfg->cacheLastRow < TSDB_MIN_DB_CACHE_LAST_ROW || pCfg->cacheLastRow > TSDB_MAX_DB_CACHE_LAST_ROW) {
|
||||
mError("invalid db option cacheLastRow:%d valid range: [%d, %d]", pCfg->cacheLastRow, TSDB_MIN_DB_CACHE_LAST_ROW, TSDB_MAX_DB_CACHE_LAST_ROW);
|
||||
return TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -343,6 +348,7 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) {
|
|||
if (pCfg->replications < 0) pCfg->replications = tsReplications;
|
||||
if (pCfg->quorum < 0) pCfg->quorum = tsQuorum;
|
||||
if (pCfg->update < 0) pCfg->update = tsUpdate;
|
||||
if (pCfg->cacheLastRow < 0) pCfg->cacheLastRow = tsCacheLastRow;
|
||||
}
|
||||
|
||||
static int32_t mnodeCreateDbCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
|
@ -396,7 +402,8 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate, SMnodeMsg *
|
|||
.walLevel = pCreate->walLevel,
|
||||
.replications = pCreate->replications,
|
||||
.quorum = pCreate->quorum,
|
||||
.update = pCreate->update
|
||||
.update = pCreate->update,
|
||||
.cacheLastRow = pCreate->cacheLastRow
|
||||
};
|
||||
|
||||
mnodeSetDefaultDbCfg(&pDb->cfg);
|
||||
|
@ -605,6 +612,12 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
|
|||
strcpy(pSchema[cols].name, "comp");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 1;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_TINYINT;
|
||||
strcpy(pSchema[cols].name, "cachelast");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
#ifndef __CLOUD_VERSION__
|
||||
}
|
||||
#endif
|
||||
|
@ -750,6 +763,10 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
|
|||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int8_t *)pWrite = pDb->cfg.compression;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int8_t *)pWrite = pDb->cfg.cacheLastRow;
|
||||
cols++;
|
||||
#ifndef __CLOUD_VERSION__
|
||||
}
|
||||
#endif
|
||||
|
@ -864,6 +881,7 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
|||
int8_t quorum = pAlter->quorum;
|
||||
int8_t precision = pAlter->precision;
|
||||
int8_t update = pAlter->update;
|
||||
int8_t cacheLastRow = pAlter->cacheLastRow;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -976,6 +994,11 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
|||
#endif
|
||||
}
|
||||
|
||||
if (cacheLastRow >= 0 && cacheLastRow != pDb->cfg.cacheLastRow) {
|
||||
mDebug("db:%s, cacheLastRow:%d change to %d", pDb->name, pDb->cfg.cacheLastRow, cacheLastRow);
|
||||
newCfg.cacheLastRow = cacheLastRow;
|
||||
}
|
||||
|
||||
return newCfg;
|
||||
}
|
||||
|
||||
|
@ -1015,7 +1038,7 @@ static int32_t mnodeAlterDb(SDbObj *pDb, SAlterDbMsg *pAlter, void *pMsg) {
|
|||
|
||||
if (memcmp(&newCfg, &pDb->cfg, sizeof(SDbCfg)) != 0) {
|
||||
pDb->cfg = newCfg;
|
||||
pDb->cfgVersion++;
|
||||
pDb->dbCfgVersion++;
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.pTable = tsDbSdb,
|
||||
|
|
|
@ -63,7 +63,6 @@ static int32_t mnodeGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole);
|
||||
static void mnodeUpdateDnodeEps();
|
||||
|
||||
static char* offlineReason[] = {
|
||||
|
@ -376,10 +375,6 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
|
|||
mError("\"numOfMnodes\"[%d - %d] cfg parameters inconsistent", clusterCfg->numOfMnodes, htonl(tsNumOfMnodes));
|
||||
return TAOS_DN_OFF_NUM_OF_MNODES_NOT_MATCH;
|
||||
}
|
||||
if (clusterCfg->enableBalance != htonl(tsEnableBalance)) {
|
||||
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, htonl(tsEnableBalance));
|
||||
return TAOS_DN_OFF_ENABLE_BALANCE_NOT_MATCH;
|
||||
}
|
||||
if (clusterCfg->mnodeEqualVnodeNum != htonl(tsMnodeEqualVnodeNum)) {
|
||||
mError("\"mnodeEqualVnodeNum\"[%d - %d] cfg parameters inconsistent", clusterCfg->mnodeEqualVnodeNum,
|
||||
htonl(tsMnodeEqualVnodeNum));
|
||||
|
@ -429,6 +424,23 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
|
|||
return TAOS_DN_OFF_CHARSET_NOT_MATCH;
|
||||
}
|
||||
|
||||
if (clusterCfg->enableBalance != tsEnableBalance) {
|
||||
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance);
|
||||
return TAOS_DN_OFF_ENABLE_BALANCE_NOT_MATCH;
|
||||
}
|
||||
if (clusterCfg->flowCtrl != tsEnableFlowCtrl) {
|
||||
mError("\"flowCtrl\"[%d - %d] cfg parameters inconsistent", clusterCfg->flowCtrl, tsEnableFlowCtrl);
|
||||
return TAOS_DN_OFF_FLOW_CTRL_NOT_MATCH;
|
||||
}
|
||||
if (clusterCfg->slaveQuery != tsEnableSlaveQuery) {
|
||||
mError("\"slaveQuery\"[%d - %d] cfg parameters inconsistent", clusterCfg->slaveQuery, tsEnableSlaveQuery);
|
||||
return TAOS_DN_OFF_SLAVE_QUERY_NOT_MATCH;
|
||||
}
|
||||
if (clusterCfg->adjustMaster != tsEnableAdjustMaster) {
|
||||
mError("\"adjustMaster\"[%d - %d] cfg parameters inconsistent", clusterCfg->adjustMaster, tsEnableAdjustMaster);
|
||||
return TAOS_DN_OFF_ADJUST_MASTER_NOT_MATCH;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -557,7 +569,8 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
for (int32_t j = 0; j < openVnodes; ++j) {
|
||||
SVnodeLoad *pVload = &pStatus->load[j];
|
||||
pVload->vgId = htonl(pVload->vgId);
|
||||
pVload->cfgVersion = htonl(pVload->cfgVersion);
|
||||
pVload->dbCfgVersion = htonl(pVload->dbCfgVersion);
|
||||
pVload->vgCfgVersion = htonl(pVload->vgCfgVersion);
|
||||
|
||||
SVgObj *pVgroup = mnodeGetVgroup(pVload->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
|
@ -833,12 +846,12 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
|
|||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
char* status = mnodeGetDnodeStatusStr(pDnode->status);
|
||||
char* status = dnodeStatus[pDnode->status];
|
||||
STR_TO_VARSTR(pWrite, status);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
char* role = mnodeGetDnodeAlternativeRoleStr(pDnode->alternativeRole);
|
||||
char* role = dnodeRoles[pDnode->alternativeRole];
|
||||
STR_TO_VARSTR(pWrite, role);
|
||||
cols++;
|
||||
|
||||
|
@ -1031,6 +1044,11 @@ static int32_t mnodeRetrieveConfigs(SShowObj *pShow, char *data, int32_t rows, v
|
|||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
switch (cfg->valType) {
|
||||
case TAOS_CFG_VTYPE_INT8:
|
||||
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%d", *((int8_t *)cfg->ptr));
|
||||
varDataSetLen(pWrite, t);
|
||||
numOfRows++;
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_INT16:
|
||||
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%d", *((int16_t *)cfg->ptr));
|
||||
varDataSetLen(pWrite, t);
|
||||
|
@ -1154,21 +1172,17 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo
|
|||
return numOfRows;
|
||||
}
|
||||
|
||||
char* mnodeGetDnodeStatusStr(int32_t dnodeStatus) {
|
||||
switch (dnodeStatus) {
|
||||
case TAOS_DN_STATUS_OFFLINE: return "offline";
|
||||
case TAOS_DN_STATUS_DROPPING: return "dropping";
|
||||
case TAOS_DN_STATUS_BALANCING: return "balancing";
|
||||
case TAOS_DN_STATUS_READY: return "ready";
|
||||
default: return "undefined";
|
||||
}
|
||||
}
|
||||
char* dnodeStatus[] = {
|
||||
"offline",
|
||||
"dropping",
|
||||
"balancing",
|
||||
"ready",
|
||||
"undefined"
|
||||
};
|
||||
|
||||
static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole) {
|
||||
switch (alternativeRole) {
|
||||
case TAOS_DN_ALTERNATIVE_ROLE_ANY: return "any";
|
||||
case TAOS_DN_ALTERNATIVE_ROLE_MNODE: return "mnode";
|
||||
case TAOS_DN_ALTERNATIVE_ROLE_VNODE: return "vnode";
|
||||
default:return "any";
|
||||
}
|
||||
}
|
||||
char* dnodeRoles[] = {
|
||||
"any",
|
||||
"mnode",
|
||||
"vnode",
|
||||
"any"
|
||||
};
|
||||
|
|
|
@ -377,6 +377,24 @@ static int32_t mnodeCreateMnodeCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static bool mnodeAllOnline() {
|
||||
void *pIter = NULL;
|
||||
bool allOnline = true;
|
||||
|
||||
while (1) {
|
||||
SMnodeObj *pMnode = NULL;
|
||||
pIter = mnodeGetNextMnode(pIter, &pMnode);
|
||||
if (pMnode == NULL) break;
|
||||
if (pMnode->role != TAOS_SYNC_ROLE_MASTER && pMnode->role != TAOS_SYNC_ROLE_SLAVE) {
|
||||
allOnline = false;
|
||||
mnodeDecMnodeRef(pMnode);
|
||||
}
|
||||
}
|
||||
mnodeCancelGetNextMnode(pIter);
|
||||
|
||||
return allOnline;
|
||||
}
|
||||
|
||||
void mnodeCreateMnode(int32_t dnodeId, char *dnodeEp, bool needConfirm) {
|
||||
SMnodeObj *pMnode = calloc(1, sizeof(SMnodeObj));
|
||||
pMnode->mnodeId = dnodeId;
|
||||
|
@ -389,6 +407,11 @@ void mnodeCreateMnode(int32_t dnodeId, char *dnodeEp, bool needConfirm) {
|
|||
.fpRsp = mnodeCreateMnodeCb
|
||||
};
|
||||
|
||||
if (needConfirm && !mnodeAllOnline()) {
|
||||
mDebug("wait all mnode online then create new mnode");
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (needConfirm) {
|
||||
code = mnodeSendCreateMnodeMsg(dnodeId, dnodeEp);
|
||||
|
|
|
@ -1081,6 +1081,8 @@ static void *sdbWorkerFp(void *pWorker) {
|
|||
int32_t qtype;
|
||||
void * unUsed;
|
||||
|
||||
taosBlockSIGPIPE();
|
||||
|
||||
while (1) {
|
||||
int32_t numOfMsgs = taosReadAllQitemsFromQset(tsSdbWQset, tsSdbWQall, &unUsed);
|
||||
if (numOfMsgs == 0) {
|
||||
|
|
|
@ -351,6 +351,8 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
mnodeGetMnodeEpSetForShell(&pConnectRsp->epSet, false);
|
||||
|
||||
dnodeGetClusterId(pConnectRsp->clusterId);
|
||||
|
||||
connect_over:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (pConnectRsp) rpcFreeCont(pConnectRsp);
|
||||
|
|
|
@ -256,6 +256,8 @@ SVgObj *mnodeGetVgroup(int32_t vgId) {
|
|||
}
|
||||
|
||||
void mnodeUpdateVgroup(SVgObj *pVgroup) {
|
||||
pVgroup->vgCfgVersion++;
|
||||
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.pTable = tsVgroupSdb,
|
||||
|
@ -339,10 +341,11 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
|
|||
pVgroup->pointsWritten = htobe64(pVload->pointsWritten);
|
||||
}
|
||||
|
||||
if (pVload->cfgVersion != pVgroup->pDb->cfgVersion || pVload->replica != pVgroup->numOfVnodes) {
|
||||
mError("dnode:%d, vgId:%d, vnode cfgVersion:%d repica:%d not match with mnode cfgVersion:%d replica:%d",
|
||||
pDnode->dnodeId, pVload->vgId, pVload->cfgVersion, pVload->replica, pVgroup->pDb->cfgVersion,
|
||||
pVgroup->numOfVnodes);
|
||||
if (pVload->dbCfgVersion != pVgroup->pDb->dbCfgVersion || pVload->replica != pVgroup->numOfVnodes ||
|
||||
pVload->vgCfgVersion != pVgroup->vgCfgVersion) {
|
||||
mError("dnode:%d, vgId:%d, vnode cfgVersion:%d:%d repica:%d not match with mnode cfgVersion:%d:%d replica:%d",
|
||||
pDnode->dnodeId, pVload->vgId, pVload->dbCfgVersion, pVload->vgCfgVersion, pVload->replica,
|
||||
pVgroup->pDb->dbCfgVersion, pVgroup->vgCfgVersion, pVgroup->numOfVnodes);
|
||||
mnodeSendAlterVgroupMsg(pVgroup);
|
||||
}
|
||||
}
|
||||
|
@ -656,7 +659,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "onlineVnodes");
|
||||
strcpy(pSchema[cols].name, "onlines");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -671,13 +674,13 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
for (int32_t i = 0; i < pShow->maxReplica; ++i) {
|
||||
pShow->bytes[cols] = 2;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
|
||||
snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dDnode", i + 1);
|
||||
snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%d_dnode", i + 1);
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dStatus", i + 1);
|
||||
snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%d_status", i + 1);
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
}
|
||||
|
@ -840,7 +843,8 @@ static SCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) {
|
|||
|
||||
SVnodeCfg *pCfg = &pVnode->cfg;
|
||||
pCfg->vgId = htonl(pVgroup->vgId);
|
||||
pCfg->cfgVersion = htonl(pDb->cfgVersion);
|
||||
pCfg->dbCfgVersion = htonl(pDb->dbCfgVersion);
|
||||
pCfg->vgCfgVersion = htonl(pVgroup->vgCfgVersion);
|
||||
pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize);
|
||||
pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks);
|
||||
pCfg->maxTables = htonl(maxTables + 1);
|
||||
|
@ -859,6 +863,7 @@ static SCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) {
|
|||
pCfg->wals = 3;
|
||||
pCfg->quorum = pDb->cfg.quorum;
|
||||
pCfg->update = pDb->cfg.update;
|
||||
pCfg->cacheLastRow = pDb->cfg.cacheLastRow;
|
||||
|
||||
SVnodeDesc *pNodes = pVnode->nodes;
|
||||
for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) {
|
||||
|
|
|
@ -59,6 +59,7 @@ extern "C" {
|
|||
|
||||
// TAOS_OS_FUNC_SOCKET
|
||||
int32_t taosSetNonblocking(SOCKET sock, int32_t on);
|
||||
void taosIgnSIGPIPE();
|
||||
void taosBlockSIGPIPE();
|
||||
|
||||
// TAOS_OS_FUNC_SOCKET_SETSOCKETOPT
|
||||
|
|
|
@ -39,6 +39,10 @@ int32_t taosSetNonblocking(SOCKET sock, int32_t on) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void taosIgnSIGPIPE() {
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
}
|
||||
|
||||
void taosBlockSIGPIPE() {
|
||||
sigset_t signal_mask;
|
||||
sigemptyset(&signal_mask);
|
||||
|
|
|
@ -46,6 +46,7 @@ int32_t taosSetNonblocking(SOCKET sock, int32_t on) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void taosIgnSIGPIPE() {}
|
||||
void taosBlockSIGPIPE() {}
|
||||
|
||||
int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen) {
|
||||
|
|
|
@ -136,7 +136,7 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) {
|
|||
else
|
||||
httpCode = 400;
|
||||
|
||||
if (pContext->parser->httpCode != 0) {
|
||||
if (pContext->parser && pContext->parser->httpCode != 0) {
|
||||
httpCode = pContext->parser->httpCode;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,13 +33,6 @@ struct SColumnFilterElem;
|
|||
typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, char* val1, char* val2);
|
||||
typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
|
||||
|
||||
typedef struct SGroupResInfo {
|
||||
int32_t groupId;
|
||||
int32_t numOfDataPages;
|
||||
int32_t pageId;
|
||||
int32_t rowId;
|
||||
} SGroupResInfo;
|
||||
|
||||
typedef struct SResultRowPool {
|
||||
int32_t elemSize;
|
||||
int32_t blockSize;
|
||||
|
@ -72,6 +65,12 @@ typedef struct SResultRow {
|
|||
union {STimeWindow win; char* key;}; // start key of current time window
|
||||
} SResultRow;
|
||||
|
||||
typedef struct SGroupResInfo {
|
||||
int32_t rowId;
|
||||
int32_t index;
|
||||
SArray* pRows; // SArray<SResultRow*>
|
||||
} SGroupResInfo;
|
||||
|
||||
/**
|
||||
* If the number of generated results is greater than this value,
|
||||
* query query will be halt and return results to client immediate.
|
||||
|
@ -89,7 +88,6 @@ typedef struct SResultRowInfo {
|
|||
int32_t size:24; // number of result set
|
||||
int32_t capacity; // max capacity
|
||||
int32_t curIndex; // current start active index
|
||||
int64_t startTime; // start time of the first time window for sliding query
|
||||
int64_t prevSKey; // previous (not completed) sliding window start key
|
||||
} SResultRowInfo;
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ void tHistogramDestroy(SHistogramInfo** pHisto);
|
|||
|
||||
void tHistogramPrint(SHistogramInfo* pHisto);
|
||||
|
||||
int32_t vnodeHistobinarySearch(SHistBin* pEntry, int32_t len, double val);
|
||||
int32_t histoBinarySearch(SHistBin* pEntry, int32_t len, double val);
|
||||
|
||||
SHeapEntry* tHeapCreate(int32_t numOfEntries);
|
||||
void tHeapSort(SHeapEntry* pEntry, int32_t len);
|
||||
|
|
|
@ -120,7 +120,8 @@ typedef struct SCreateDBInfo {
|
|||
int32_t compressionLevel;
|
||||
SStrToken precision;
|
||||
bool ignoreExists;
|
||||
int8_t update;
|
||||
int8_t update;
|
||||
int8_t cachelast;
|
||||
|
||||
SArray *keep;
|
||||
} SCreateDBInfo;
|
||||
|
|
|
@ -34,17 +34,13 @@ int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size, int16_t
|
|||
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
|
||||
|
||||
void resetResultRowInfo(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo);
|
||||
void popFrontResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int32_t num);
|
||||
void clearClosedResultRows(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo *pResultRowInfo);
|
||||
int32_t numOfClosedResultRows(SResultRowInfo* pResultRowInfo);
|
||||
void closeAllResultRows(SResultRowInfo* pResultRowInfo);
|
||||
void removeRedundantResultRows(SResultRowInfo *pResultRowInfo, TSKEY lastKey, int32_t order);
|
||||
|
||||
int32_t initResultRow(SResultRow *pResultRow);
|
||||
void closeResultRow(SResultRowInfo* pResultRowInfo, int32_t slot);
|
||||
bool isResultRowClosed(SResultRowInfo *pResultRowInfo, int32_t slot);
|
||||
void clearResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResultRow, int16_t type);
|
||||
void copyResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* dst, const SResultRow* src, int16_t type);
|
||||
|
||||
SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index);
|
||||
|
||||
|
@ -77,7 +73,6 @@ void* destroyResultRowPool(SResultRowPool* p);
|
|||
int32_t getNumOfAllocatedResultRows(SResultRowPool* p);
|
||||
int32_t getNumOfUsedResultRows(SResultRowPool* p);
|
||||
|
||||
uint64_t getResultInfoUId(SQueryRuntimeEnv* pRuntimeEnv);
|
||||
bool isPointInterpoQuery(SQuery *pQuery);
|
||||
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ extern "C" {
|
|||
|
||||
#include "tlog.h"
|
||||
|
||||
extern uint32_t qDebugFlag;
|
||||
extern uint32_t tscEmbedded;
|
||||
extern int32_t qDebugFlag;
|
||||
extern int8_t tscEmbedded;
|
||||
|
||||
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0)
|
||||
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0)
|
||||
|
|
|
@ -112,29 +112,29 @@ cmd ::= SHOW dbPrefix(X) STABLES. {
|
|||
|
||||
cmd ::= SHOW dbPrefix(X) STABLES LIKE ids(Y). {
|
||||
SStrToken token;
|
||||
setDBName(&token, &X);
|
||||
setDbName(&token, &X);
|
||||
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &Y);
|
||||
}
|
||||
|
||||
cmd ::= SHOW dbPrefix(X) VGROUPS. {
|
||||
SStrToken token;
|
||||
setDBName(&token, &X);
|
||||
setDbName(&token, &X);
|
||||
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
|
||||
}
|
||||
|
||||
cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
|
||||
SStrToken token;
|
||||
setDBName(&token, &X);
|
||||
setDbName(&token, &X);
|
||||
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y);
|
||||
}
|
||||
|
||||
//drop configure for tables
|
||||
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
|
||||
X.n += Z.n;
|
||||
setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y);
|
||||
setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y);
|
||||
}
|
||||
|
||||
cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y); }
|
||||
cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y); }
|
||||
cmd ::= DROP DNODE ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &X); }
|
||||
cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &X); }
|
||||
cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &X); }
|
||||
|
@ -149,16 +149,16 @@ cmd ::= DESCRIBE ids(X) cpxName(Y). {
|
|||
}
|
||||
|
||||
/////////////////////////////////THE ALTER STATEMENT////////////////////////////////////////
|
||||
cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); }
|
||||
cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);}
|
||||
cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); }
|
||||
cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);}
|
||||
cmd ::= ALTER DNODE ids(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &X, &Y); }
|
||||
cmd ::= ALTER DNODE ids(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &X, &Y, &Z); }
|
||||
cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &X); }
|
||||
cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &X, &Y); }
|
||||
cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SStrToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &X, &Y, &t);}
|
||||
|
||||
cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, NULL, &Z);}
|
||||
cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, &Y, &Z);}
|
||||
cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &X, NULL, &Z);}
|
||||
cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &X, &Y, &Z);}
|
||||
|
||||
// An IDENTIFIER can be a generic identifier, or one of several keywords.
|
||||
// Any non-standard keyword can also be an identifier.
|
||||
|
@ -179,9 +179,9 @@ ifnotexists(X) ::= . { X.n = 0;}
|
|||
//create option for dnode/db/user/account
|
||||
cmd ::= CREATE DNODE ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &X);}
|
||||
cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z).
|
||||
{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &X, &Y, &Z);}
|
||||
{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &X, &Y, &Z);}
|
||||
cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);}
|
||||
cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSQL(pInfo, &X, &Y);}
|
||||
cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSql(pInfo, &X, &Y);}
|
||||
|
||||
pps(Y) ::= . { Y.n = 0; }
|
||||
pps(Y) ::= PPS INTEGER(X). { Y = X; }
|
||||
|
@ -240,6 +240,7 @@ fsync(Y) ::= FSYNC INTEGER(X). { Y = X; }
|
|||
comp(Y) ::= COMP INTEGER(X). { Y = X; }
|
||||
prec(Y) ::= PRECISION STRING(X). { Y = X; }
|
||||
update(Y) ::= UPDATE INTEGER(X). { Y = X; }
|
||||
cachelast(Y) ::= CACHELAST INTEGER(X). { Y = X; }
|
||||
|
||||
%type db_optr {SCreateDBInfo}
|
||||
db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);}
|
||||
|
@ -258,6 +259,7 @@ db_optr(Y) ::= db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strto
|
|||
db_optr(Y) ::= db_optr(Z) prec(X). { Y = Z; Y.precision = X; }
|
||||
db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
|
||||
db_optr(Y) ::= db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); }
|
||||
db_optr(Y) ::= db_optr(Z) cachelast(X). { Y = Z; Y.cachelast = strtol(X.z, NULL, 10); }
|
||||
|
||||
%type alter_db_optr {SCreateDBInfo}
|
||||
alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);}
|
||||
|
@ -270,21 +272,22 @@ alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLeve
|
|||
alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); }
|
||||
alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); }
|
||||
alter_db_optr(Y) ::= alter_db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); }
|
||||
alter_db_optr(Y) ::= alter_db_optr(Z) cachelast(X). { Y = Z; Y.cachelast = strtol(X.z, NULL, 10); }
|
||||
|
||||
%type typename {TAOS_FIELD}
|
||||
typename(A) ::= ids(X). {
|
||||
X.type = 0;
|
||||
tSQLSetColumnType (&A, &X);
|
||||
tSqlSetColumnType (&A, &X);
|
||||
}
|
||||
|
||||
//define binary type, e.g., binary(10), nchar(10)
|
||||
typename(A) ::= ids(X) LP signed(Y) RP. {
|
||||
if (Y <= 0) {
|
||||
X.type = 0;
|
||||
tSQLSetColumnType(&A, &X);
|
||||
tSqlSetColumnType(&A, &X);
|
||||
} else {
|
||||
X.type = -Y; // negative value of name length
|
||||
tSQLSetColumnType(&A, &X);
|
||||
tSqlSetColumnType(&A, &X);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -315,8 +318,8 @@ create_table_list(A) ::= create_table_list(X) create_from_stable(Z). {
|
|||
|
||||
%type create_table_args{SCreateTableSQL*}
|
||||
create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP. {
|
||||
A = tSetCreateSQLElems(X, NULL, NULL, TSQL_CREATE_TABLE);
|
||||
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
A = tSetCreateSqlElems(X, NULL, NULL, TSQL_CREATE_TABLE);
|
||||
setSqlInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
|
||||
V.n += Z.n;
|
||||
setCreatedTableName(pInfo, &V, &U);
|
||||
|
@ -324,8 +327,8 @@ create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP. {
|
|||
|
||||
// create super table
|
||||
create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP TAGS LP columnlist(Y) RP. {
|
||||
A = tSetCreateSQLElems(X, Y, NULL, TSQL_CREATE_STABLE);
|
||||
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
A = tSetCreateSqlElems(X, Y, NULL, TSQL_CREATE_STABLE);
|
||||
setSqlInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
|
||||
V.n += Z.n;
|
||||
setCreatedTableName(pInfo, &V, &U);
|
||||
|
@ -343,8 +346,8 @@ create_from_stable(A) ::= ifnotexists(U) ids(V) cpxName(Z) USING ids(X) cpxName(
|
|||
// create stream
|
||||
// create table table_name as select count(*) from super_table_name interval(time)
|
||||
create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) AS select(S). {
|
||||
A = tSetCreateSQLElems(NULL, NULL, S, TSQL_CREATE_STREAM);
|
||||
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
A = tSetCreateSqlElems(NULL, NULL, S, TSQL_CREATE_STREAM);
|
||||
setSqlInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
|
||||
|
||||
V.n += Z.n;
|
||||
setCreatedTableName(pInfo, &V, &U);
|
||||
|
@ -359,7 +362,7 @@ columnlist(A) ::= column(X). {A = taosArrayInit(4, sizeof(T
|
|||
// The information used for a column is the name and type of column:
|
||||
// tinyint smallint int bigint float double bool timestamp binary(x) nchar(x)
|
||||
column(A) ::= ids(X) typename(Y). {
|
||||
tSQLSetColumnInfo(&A, &X, &Y);
|
||||
tSqlSetColumnInfo(&A, &X, &Y);
|
||||
}
|
||||
|
||||
%type tagitemlist {SArray*}
|
||||
|
@ -407,7 +410,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
|
|||
%type select {SQuerySQL*}
|
||||
%destructor select {doDestroyQuerySql($$);}
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySQLElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G);
|
||||
A = tSetQuerySqlElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G);
|
||||
}
|
||||
|
||||
%type union {SSubclauseInfo*}
|
||||
|
@ -418,33 +421,33 @@ union(Y) ::= LP union(X) RP. { Y = X; }
|
|||
union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); }
|
||||
union(Y) ::= union(Z) UNION ALL LP select(X) RP. { Y = appendSelectClause(Z, X); }
|
||||
|
||||
cmd ::= union(X). { setSQLInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
|
||||
cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
|
||||
|
||||
// Support for the SQL exprssion without from & where subclauses, e.g.,
|
||||
// select current_database(),
|
||||
// select server_version(), select client_version(),
|
||||
// select server_state();
|
||||
select(A) ::= SELECT(T) selcollist(W). {
|
||||
A = tSetQuerySQLElems(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||
A = tSetQuerySqlElems(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
// selcollist is a list of expressions that are to become the return
|
||||
// values of the SELECT statement. The "*" in statements like
|
||||
// "SELECT * FROM ..." is encoded as a special expression with an opcode of TK_ALL.
|
||||
%type selcollist {tSQLExprList*}
|
||||
%destructor selcollist {tSQLExprListDestroy($$);}
|
||||
%destructor selcollist {tSqlExprListDestroy($$);}
|
||||
|
||||
%type sclp {tSQLExprList*}
|
||||
%destructor sclp {tSQLExprListDestroy($$);}
|
||||
%destructor sclp {tSqlExprListDestroy($$);}
|
||||
sclp(A) ::= selcollist(X) COMMA. {A = X;}
|
||||
sclp(A) ::= . {A = 0;}
|
||||
selcollist(A) ::= sclp(P) expr(X) as(Y). {
|
||||
A = tSQLExprListAppend(P, X, Y.n?&Y:0);
|
||||
A = tSqlExprListAppend(P, X, Y.n?&Y:0);
|
||||
}
|
||||
|
||||
selcollist(A) ::= sclp(P) STAR. {
|
||||
tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL);
|
||||
A = tSQLExprListAppend(P, pNode, 0);
|
||||
tSQLExpr *pNode = tSqlExprIdValueCreate(NULL, TK_ALL);
|
||||
A = tSqlExprListAppend(P, pNode, 0);
|
||||
}
|
||||
|
||||
// An option "AS <id>" phrase that can follow one of the expressions that
|
||||
|
@ -573,7 +576,7 @@ grouplist(A) ::= item(X). {
|
|||
|
||||
//having clause, ignore the input condition in having
|
||||
%type having_opt {tSQLExpr*}
|
||||
%destructor having_opt {tSQLExprDestroy($$);}
|
||||
%destructor having_opt {tSqlExprDestroy($$);}
|
||||
having_opt(A) ::=. {A = 0;}
|
||||
having_opt(A) ::= HAVING expr(X). {A = X;}
|
||||
|
||||
|
@ -595,7 +598,7 @@ slimit_opt(A) ::= SLIMIT signed(X) COMMA signed(Y).
|
|||
{A.limit = Y; A.offset = X;}
|
||||
|
||||
%type where_opt {tSQLExpr*}
|
||||
%destructor where_opt {tSQLExprDestroy($$);}
|
||||
%destructor where_opt {tSqlExprDestroy($$);}
|
||||
|
||||
where_opt(A) ::= . {A = 0;}
|
||||
where_opt(A) ::= WHERE expr(X). {A = X;}
|
||||
|
@ -603,67 +606,67 @@ where_opt(A) ::= WHERE expr(X). {A = X;}
|
|||
/////////////////////////// Expression Processing /////////////////////////////
|
||||
//
|
||||
%type expr {tSQLExpr*}
|
||||
%destructor expr {tSQLExprDestroy($$);}
|
||||
%destructor expr {tSqlExprDestroy($$);}
|
||||
|
||||
expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->token.z = X.z; A->token.n = (Z.z - X.z + 1);}
|
||||
|
||||
expr(A) ::= ID(X). { A = tSQLExprIdValueCreate(&X, TK_ID);}
|
||||
expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ID);}
|
||||
expr(A) ::= ID(X) DOT STAR(Y). { X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ALL);}
|
||||
expr(A) ::= ID(X). { A = tSqlExprIdValueCreate(&X, TK_ID);}
|
||||
expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSqlExprIdValueCreate(&X, TK_ID);}
|
||||
expr(A) ::= ID(X) DOT STAR(Y). { X.n += (1+Y.n); A = tSqlExprIdValueCreate(&X, TK_ALL);}
|
||||
|
||||
expr(A) ::= INTEGER(X). { A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
|
||||
expr(A) ::= MINUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
|
||||
expr(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
|
||||
expr(A) ::= FLOAT(X). { A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
|
||||
expr(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
|
||||
expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
|
||||
expr(A) ::= STRING(X). { A = tSQLExprIdValueCreate(&X, TK_STRING);}
|
||||
expr(A) ::= NOW(X). { A = tSQLExprIdValueCreate(&X, TK_NOW); }
|
||||
expr(A) ::= VARIABLE(X). { A = tSQLExprIdValueCreate(&X, TK_VARIABLE);}
|
||||
expr(A) ::= BOOL(X). { A = tSQLExprIdValueCreate(&X, TK_BOOL);}
|
||||
expr(A) ::= INTEGER(X). { A = tSqlExprIdValueCreate(&X, TK_INTEGER);}
|
||||
expr(A) ::= MINUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprIdValueCreate(&X, TK_INTEGER);}
|
||||
expr(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprIdValueCreate(&X, TK_INTEGER);}
|
||||
expr(A) ::= FLOAT(X). { A = tSqlExprIdValueCreate(&X, TK_FLOAT);}
|
||||
expr(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprIdValueCreate(&X, TK_FLOAT);}
|
||||
expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprIdValueCreate(&X, TK_FLOAT);}
|
||||
expr(A) ::= STRING(X). { A = tSqlExprIdValueCreate(&X, TK_STRING);}
|
||||
expr(A) ::= NOW(X). { A = tSqlExprIdValueCreate(&X, TK_NOW); }
|
||||
expr(A) ::= VARIABLE(X). { A = tSqlExprIdValueCreate(&X, TK_VARIABLE);}
|
||||
expr(A) ::= BOOL(X). { A = tSqlExprIdValueCreate(&X, TK_BOOL);}
|
||||
|
||||
// ordinary functions: min(x), max(x), top(k, 20)
|
||||
expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSQLExprCreateFunction(Y, &X, &E, X.type); }
|
||||
expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSqlExprCreateFunction(Y, &X, &E, X.type); }
|
||||
|
||||
// for parsing sql functions with wildcard for parameters. e.g., count(*)/first(*)/last(*) operation
|
||||
expr(A) ::= ID(X) LP STAR RP(Y). { A = tSQLExprCreateFunction(NULL, &X, &Y, X.type); }
|
||||
expr(A) ::= ID(X) LP STAR RP(Y). { A = tSqlExprCreateFunction(NULL, &X, &Y, X.type); }
|
||||
|
||||
// is (not) null expression
|
||||
expr(A) ::= expr(X) IS NULL. {A = tSQLExprCreate(X, NULL, TK_ISNULL);}
|
||||
expr(A) ::= expr(X) IS NOT NULL. {A = tSQLExprCreate(X, NULL, TK_NOTNULL);}
|
||||
expr(A) ::= expr(X) IS NULL. {A = tSqlExprCreate(X, NULL, TK_ISNULL);}
|
||||
expr(A) ::= expr(X) IS NOT NULL. {A = tSqlExprCreate(X, NULL, TK_NOTNULL);}
|
||||
|
||||
// relational expression
|
||||
expr(A) ::= expr(X) LT expr(Y). {A = tSQLExprCreate(X, Y, TK_LT);}
|
||||
expr(A) ::= expr(X) GT expr(Y). {A = tSQLExprCreate(X, Y, TK_GT);}
|
||||
expr(A) ::= expr(X) LE expr(Y). {A = tSQLExprCreate(X, Y, TK_LE);}
|
||||
expr(A) ::= expr(X) GE expr(Y). {A = tSQLExprCreate(X, Y, TK_GE);}
|
||||
expr(A) ::= expr(X) NE expr(Y). {A = tSQLExprCreate(X, Y, TK_NE);}
|
||||
expr(A) ::= expr(X) EQ expr(Y). {A = tSQLExprCreate(X, Y, TK_EQ);}
|
||||
expr(A) ::= expr(X) LT expr(Y). {A = tSqlExprCreate(X, Y, TK_LT);}
|
||||
expr(A) ::= expr(X) GT expr(Y). {A = tSqlExprCreate(X, Y, TK_GT);}
|
||||
expr(A) ::= expr(X) LE expr(Y). {A = tSqlExprCreate(X, Y, TK_LE);}
|
||||
expr(A) ::= expr(X) GE expr(Y). {A = tSqlExprCreate(X, Y, TK_GE);}
|
||||
expr(A) ::= expr(X) NE expr(Y). {A = tSqlExprCreate(X, Y, TK_NE);}
|
||||
expr(A) ::= expr(X) EQ expr(Y). {A = tSqlExprCreate(X, Y, TK_EQ);}
|
||||
|
||||
expr(A) ::= expr(X) AND expr(Y). {A = tSQLExprCreate(X, Y, TK_AND);}
|
||||
expr(A) ::= expr(X) OR expr(Y). {A = tSQLExprCreate(X, Y, TK_OR); }
|
||||
expr(A) ::= expr(X) AND expr(Y). {A = tSqlExprCreate(X, Y, TK_AND);}
|
||||
expr(A) ::= expr(X) OR expr(Y). {A = tSqlExprCreate(X, Y, TK_OR); }
|
||||
|
||||
// binary arithmetic expression
|
||||
expr(A) ::= expr(X) PLUS expr(Y). {A = tSQLExprCreate(X, Y, TK_PLUS); }
|
||||
expr(A) ::= expr(X) MINUS expr(Y). {A = tSQLExprCreate(X, Y, TK_MINUS); }
|
||||
expr(A) ::= expr(X) STAR expr(Y). {A = tSQLExprCreate(X, Y, TK_STAR); }
|
||||
expr(A) ::= expr(X) SLASH expr(Y). {A = tSQLExprCreate(X, Y, TK_DIVIDE);}
|
||||
expr(A) ::= expr(X) REM expr(Y). {A = tSQLExprCreate(X, Y, TK_REM); }
|
||||
expr(A) ::= expr(X) PLUS expr(Y). {A = tSqlExprCreate(X, Y, TK_PLUS); }
|
||||
expr(A) ::= expr(X) MINUS expr(Y). {A = tSqlExprCreate(X, Y, TK_MINUS); }
|
||||
expr(A) ::= expr(X) STAR expr(Y). {A = tSqlExprCreate(X, Y, TK_STAR); }
|
||||
expr(A) ::= expr(X) SLASH expr(Y). {A = tSqlExprCreate(X, Y, TK_DIVIDE);}
|
||||
expr(A) ::= expr(X) REM expr(Y). {A = tSqlExprCreate(X, Y, TK_REM); }
|
||||
|
||||
// like expression
|
||||
expr(A) ::= expr(X) LIKE expr(Y). {A = tSQLExprCreate(X, Y, TK_LIKE); }
|
||||
expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); }
|
||||
|
||||
//in expression
|
||||
expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSQLExprCreate(X, (tSQLExpr*)Y, TK_IN); }
|
||||
expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSqlExprCreate(X, (tSQLExpr*)Y, TK_IN); }
|
||||
|
||||
%type exprlist {tSQLExprList*}
|
||||
%destructor exprlist {tSQLExprListDestroy($$);}
|
||||
%destructor exprlist {tSqlExprListDestroy($$);}
|
||||
|
||||
%type expritem {tSQLExpr*}
|
||||
%destructor expritem {tSQLExprDestroy($$);}
|
||||
%destructor expritem {tSqlExprDestroy($$);}
|
||||
|
||||
exprlist(A) ::= exprlist(X) COMMA expritem(Y). {A = tSQLExprListAppend(X,Y,0);}
|
||||
exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);}
|
||||
exprlist(A) ::= exprlist(X) COMMA expritem(Y). {A = tSqlExprListAppend(X,Y,0);}
|
||||
exprlist(A) ::= expritem(X). {A = tSqlExprListAppend(0,X,0);}
|
||||
expritem(A) ::= expr(X). {A = X;}
|
||||
expritem(A) ::= . {A = 0;}
|
||||
|
||||
|
@ -673,8 +676,8 @@ cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
|
|||
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN);
|
||||
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
||||
|
@ -683,15 +686,15 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
|||
toTSDBType(A.type);
|
||||
SArray* K = tVariantListAppendToken(NULL, &A, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN);
|
||||
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||
X.n += Y.n;
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN);
|
||||
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
|
||||
X.n += Z.n;
|
||||
|
@ -699,8 +702,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
|
|||
toTSDBType(Y.type);
|
||||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN);
|
||||
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
|
||||
|
@ -712,8 +715,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
|
|||
toTSDBType(Z.type);
|
||||
A = tVariantListAppendToken(A, &Z, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN);
|
||||
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
||||
|
@ -723,14 +726,14 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
|||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
A = tVariantListAppend(A, &Z, -1);
|
||||
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL);
|
||||
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
////////////////////////////////////////kill statement///////////////////////////////////////
|
||||
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
|
||||
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
|
||||
cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
|
||||
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
|
||||
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);}
|
||||
cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &X);}
|
||||
|
||||
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
|
||||
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -158,8 +158,8 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) {
|
|||
}
|
||||
|
||||
#if defined(USE_ARRAYLIST)
|
||||
int32_t idx = vnodeHistobinarySearch((*pHisto)->elems, (*pHisto)->numOfEntries, val);
|
||||
assert(idx >= 0 && idx <= (*pHisto)->maxEntries);
|
||||
int32_t idx = histoBinarySearch((*pHisto)->elems, (*pHisto)->numOfEntries, val);
|
||||
assert(idx >= 0 && idx <= (*pHisto)->maxEntries && (*pHisto)->elems != NULL);
|
||||
|
||||
if ((*pHisto)->elems[idx].val == val && idx >= 0) {
|
||||
(*pHisto)->elems[idx].num += 1;
|
||||
|
@ -356,7 +356,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t vnodeHistobinarySearch(SHistBin* pEntry, int32_t len, double val) {
|
||||
int32_t histoBinarySearch(SHistBin* pEntry, int32_t len, double val) {
|
||||
int32_t end = len - 1;
|
||||
int32_t start = 0;
|
||||
|
||||
|
@ -466,7 +466,7 @@ void tHistogramPrint(SHistogramInfo* pHisto) {
|
|||
*/
|
||||
int64_t tHistogramSum(SHistogramInfo* pHisto, double v) {
|
||||
#if defined(USE_ARRAYLIST)
|
||||
int32_t slotIdx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, v);
|
||||
int32_t slotIdx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, v);
|
||||
if (pHisto->elems[slotIdx].val != v) {
|
||||
slotIdx -= 1;
|
||||
|
||||
|
|
|
@ -384,7 +384,12 @@ void tSqlSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType)
|
|||
pField->name[pName->n] = 0;
|
||||
|
||||
pField->type = pType->type;
|
||||
pField->bytes = pType->bytes;
|
||||
if(pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR){
|
||||
pField->bytes = 0;
|
||||
} else {
|
||||
pField->bytes = pType->bytes;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
||||
|
@ -841,5 +846,6 @@ void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
|
|||
pDBInfo->keep = NULL;
|
||||
|
||||
pDBInfo->update = -1;
|
||||
pDBInfo->cachelast = 0;
|
||||
memset(&pDBInfo->precision, 0, sizeof(SStrToken));
|
||||
}
|
||||
|
|
|
@ -313,7 +313,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
|
|||
|
||||
// allocate buf
|
||||
if (availablePage == NULL) {
|
||||
pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES);
|
||||
pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES + 2); // add extract bytes in case of zipped buffer increased.
|
||||
} else {
|
||||
pi->pData = availablePage;
|
||||
}
|
||||
|
|
|
@ -238,6 +238,7 @@ static SKeyword keywordTable[] = {
|
|||
{"SUM_IRATE", TK_SUM_IRATE},
|
||||
{"AVG_RATE", TK_AVG_RATE},
|
||||
{"AVG_IRATE", TK_AVG_IRATE},
|
||||
{"CACHELAST", TK_CACHELAST},
|
||||
};
|
||||
|
||||
static const char isIdChar[] = {
|
||||
|
|
|
@ -20,18 +20,6 @@
|
|||
#include "qExecutor.h"
|
||||
#include "qUtil.h"
|
||||
|
||||
static int32_t getResultRowKeyInfo(SResultRow* pResult, int16_t type, char** key, int16_t* bytes) {
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
*key = varDataVal(pResult->key);
|
||||
*bytes = varDataLen(pResult->key);
|
||||
} else {
|
||||
*key = (char*) &pResult->win.skey;
|
||||
*bytes = tDataTypeDesc[type].nSize;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t getOutputInterResultBufSize(SQuery* pQuery) {
|
||||
int32_t size = 0;
|
||||
|
||||
|
@ -96,78 +84,9 @@ void resetResultRowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRo
|
|||
|
||||
pResultRowInfo->curIndex = -1;
|
||||
pResultRowInfo->size = 0;
|
||||
|
||||
pResultRowInfo->startTime = TSKEY_INITIAL_VAL;
|
||||
pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
|
||||
}
|
||||
|
||||
void popFrontResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int32_t num) {
|
||||
if (pResultRowInfo == NULL || pResultRowInfo->capacity == 0 || pResultRowInfo->size == 0 || num == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t numOfClosed = numOfClosedResultRows(pResultRowInfo);
|
||||
assert(num >= 0 && num <= numOfClosed);
|
||||
|
||||
int16_t type = pResultRowInfo->type;
|
||||
int64_t uid = getResultInfoUId(pRuntimeEnv);
|
||||
|
||||
char *key = NULL;
|
||||
int16_t bytes = -1;
|
||||
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SResultRow *pResult = pResultRowInfo->pResult[i];
|
||||
if (pResult->closed) { // remove the window slot from hash table
|
||||
getResultRowKeyInfo(pResult, type, &key, &bytes);
|
||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, key, bytes, uid);
|
||||
taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t remain = pResultRowInfo->size - num;
|
||||
|
||||
// clear all the closed windows from the window list
|
||||
for (int32_t k = 0; k < remain; ++k) {
|
||||
copyResultRow(pRuntimeEnv, pResultRowInfo->pResult[k], pResultRowInfo->pResult[num + k], type);
|
||||
}
|
||||
|
||||
// move the unclosed window in the front of the window list
|
||||
for (int32_t k = remain; k < pResultRowInfo->size; ++k) {
|
||||
SResultRow *pWindowRes = pResultRowInfo->pResult[k];
|
||||
clearResultRow(pRuntimeEnv, pWindowRes, pResultRowInfo->type);
|
||||
}
|
||||
|
||||
pResultRowInfo->size = remain;
|
||||
|
||||
for (int32_t k = 0; k < pResultRowInfo->size; ++k) {
|
||||
SResultRow *pResult = pResultRowInfo->pResult[k];
|
||||
getResultRowKeyInfo(pResult, type, &key, &bytes);
|
||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, key, bytes, uid);
|
||||
|
||||
int32_t *p = (int32_t *)taosHashGet(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
|
||||
assert(p != NULL);
|
||||
|
||||
int32_t v = (*p - num);
|
||||
assert(v >= 0 && v <= pResultRowInfo->size);
|
||||
|
||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, key, bytes, uid);
|
||||
taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), (char *)&v, sizeof(int32_t));
|
||||
}
|
||||
|
||||
pResultRowInfo->curIndex = -1;
|
||||
}
|
||||
|
||||
void clearClosedResultRows(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo) {
|
||||
if (pResultRowInfo == NULL || pResultRowInfo->capacity == 0 || pResultRowInfo->size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t numOfClosed = numOfClosedResultRows(pResultRowInfo);
|
||||
popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, numOfClosed);
|
||||
}
|
||||
|
||||
int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) {
|
||||
int32_t i = 0;
|
||||
while (i < pResultRowInfo->size && pResultRowInfo->pResult[i]->closed) {
|
||||
|
@ -181,45 +100,12 @@ void closeAllResultRows(SResultRowInfo *pResultRowInfo) {
|
|||
assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size);
|
||||
|
||||
for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
|
||||
if (pResultRowInfo->pResult[i]->closed) {
|
||||
SResultRow* pRow = pResultRowInfo->pResult[i];
|
||||
if (pRow->closed) {
|
||||
continue;
|
||||
}
|
||||
|
||||
pResultRowInfo->pResult[i]->closed = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* remove the results that are not the FIRST time window that spreads beyond the
|
||||
* the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time.
|
||||
* NOTE: remove redundant, only when the result set order equals to traverse order
|
||||
*/
|
||||
void removeRedundantResultRows(SResultRowInfo *pResultRowInfo, TSKEY lastKey, int32_t order) {
|
||||
assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size);
|
||||
if (pResultRowInfo->size <= 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// get the result order
|
||||
int32_t resultOrder = (pResultRowInfo->pResult[0]->win.skey < pResultRowInfo->pResult[1]->win.skey)? 1:-1;
|
||||
if (order != resultOrder) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t i = 0;
|
||||
if (order == QUERY_ASC_FORWARD_STEP) {
|
||||
TSKEY ekey = pResultRowInfo->pResult[i]->win.ekey;
|
||||
while (i < pResultRowInfo->size && (ekey < lastKey)) {
|
||||
++i;
|
||||
}
|
||||
} else if (order == QUERY_DESC_FORWARD_STEP) {
|
||||
while (i < pResultRowInfo->size && (pResultRowInfo->pResult[i]->win.skey > lastKey)) {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
if (i < pResultRowInfo->size) {
|
||||
pResultRowInfo->size = (i + 1);
|
||||
pRow->closed = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -263,47 +149,6 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The source window result pos attribution of the source window result does not assign to the destination,
|
||||
* since the attribute of "Pos" is bound to each window result when the window result is created in the
|
||||
* disk-based result buffer.
|
||||
*/
|
||||
void copyResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *dst, const SResultRow *src, int16_t type) {
|
||||
dst->numOfRows = src->numOfRows;
|
||||
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
dst->key = realloc(dst->key, varDataTLen(src->key));
|
||||
varDataCopy(dst->key, src->key);
|
||||
} else {
|
||||
dst->win = src->win;
|
||||
}
|
||||
dst->closed = src->closed;
|
||||
|
||||
int32_t nOutputCols = pRuntimeEnv->pQuery->numOfOutput;
|
||||
|
||||
for (int32_t i = 0; i < nOutputCols; ++i) {
|
||||
SResultRowCellInfo *pDst = getResultCell(pRuntimeEnv, dst, i);
|
||||
SResultRowCellInfo *pSrc = getResultCell(pRuntimeEnv, src, i);
|
||||
|
||||
// char *buf = pDst->interResultBuf;
|
||||
memcpy(pDst, pSrc, sizeof(SResultRowCellInfo) + pRuntimeEnv->pCtx[i].interBufBytes);
|
||||
// pDst->interResultBuf = buf; // restore the allocated buffer
|
||||
|
||||
// copy the result info struct
|
||||
// memcpy(pDst->interResultBuf, pSrc->interResultBuf, pRuntimeEnv->pCtx[i].interBufBytes);
|
||||
|
||||
// copy the output buffer data from src to dst, the position info keep unchanged
|
||||
tFilePage *dstpage = getResBufPage(pRuntimeEnv->pResultBuf, dst->pageId);
|
||||
char * dstBuf = getPosInResultPage(pRuntimeEnv, i, dst, dstpage);
|
||||
|
||||
tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pageId);
|
||||
char * srcBuf = getPosInResultPage(pRuntimeEnv, i, (SResultRow *)src, srcpage);
|
||||
size_t s = pRuntimeEnv->pQuery->pExpr1[i].bytes;
|
||||
|
||||
memcpy(dstBuf, srcBuf, s);
|
||||
}
|
||||
}
|
||||
|
||||
SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index) {
|
||||
assert(index >= 0 && index < pRuntimeEnv->pQuery->numOfOutput);
|
||||
return (SResultRowCellInfo*)((char*) pRow->pCellInfo + pRuntimeEnv->rowCellInfoOffset[index]);
|
||||
|
@ -383,18 +228,4 @@ void* destroyResultRowPool(SResultRowPool* p) {
|
|||
|
||||
tfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint64_t getResultInfoUId(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
if (!pRuntimeEnv->stableQuery) {
|
||||
return 0; // for simple table query, the uid is always set to be 0;
|
||||
}
|
||||
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
if (pQuery->interval.interval == 0 || isPointInterpoQuery(pQuery) || pRuntimeEnv->groupbyNormalCol) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
STableId* id = TSDB_TABLEID(pRuntimeEnv->pQuery->current->pTable);
|
||||
return id->uid;
|
||||
}
|
2455
src/query/src/sql.c
2455
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -21,19 +21,19 @@ TEST(testCase, histogram_binary_search) {
|
|||
pHisto->elems[i].val = i;
|
||||
}
|
||||
|
||||
int32_t idx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, 1);
|
||||
int32_t idx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, 1);
|
||||
assert(idx == 1);
|
||||
|
||||
idx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, 9);
|
||||
idx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, 9);
|
||||
assert(idx == 9);
|
||||
|
||||
idx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, 20);
|
||||
idx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, 20);
|
||||
assert(idx == 10);
|
||||
|
||||
idx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, -1);
|
||||
idx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, -1);
|
||||
assert(idx == 0);
|
||||
|
||||
idx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, 3.9);
|
||||
idx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, 3.9);
|
||||
assert(idx == 4);
|
||||
|
||||
free(pHisto);
|
||||
|
|
|
@ -23,7 +23,7 @@ extern "C" {
|
|||
#include "tlog.h"
|
||||
|
||||
extern int32_t rpcDebugFlag;
|
||||
extern uint32_t tscEmbedded;
|
||||
extern int8_t tscEmbedded;
|
||||
|
||||
#define tFatal(...) { if (rpcDebugFlag & DEBUG_FATAL) { taosPrintLog("RPC FATAL ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }}
|
||||
#define tError(...) { if (rpcDebugFlag & DEBUG_ERROR) { taosPrintLog("RPC ERROR ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }}
|
||||
|
|
|
@ -576,7 +576,7 @@ static void rpcFreeMsg(void *msg) {
|
|||
static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort, int8_t connType) {
|
||||
SRpcConn *pConn;
|
||||
|
||||
uint32_t peerIp = taosGetIpFromFqdn(peerFqdn);
|
||||
uint32_t peerIp = taosGetIpv4FromFqdn(peerFqdn);
|
||||
if (peerIp == 0xFFFFFFFF) {
|
||||
tError("%s, failed to resolve FQDN:%s", pRpc->label, peerFqdn);
|
||||
terrno = TSDB_CODE_RPC_FQDN_ERROR;
|
||||
|
|
|
@ -38,7 +38,7 @@ extern "C" {
|
|||
#define SYNC_MAX_FWDS 512
|
||||
#define SYNC_FWD_TIMER 300
|
||||
#define SYNC_ROLE_TIMER 15000 // ms
|
||||
#define SYNC_CHECK_INTERVAL 1 // ms
|
||||
#define SYNC_CHECK_INTERVAL 1000 // ms
|
||||
#define SYNC_WAIT_AFTER_CHOOSE_MASTER 10 // ms
|
||||
|
||||
#define nodeRole pNode->peerInfo[pNode->selfIndex]->role
|
||||
|
@ -86,9 +86,10 @@ typedef struct SsyncPeer {
|
|||
int32_t peerFd; // forward FD
|
||||
int32_t numOfRetrieves; // number of retrieves tried
|
||||
int32_t fileChanged; // a flag to indicate file is changed during retrieving process
|
||||
int32_t refCount;
|
||||
int64_t rid;
|
||||
void * timer;
|
||||
void * pConn;
|
||||
int32_t refCount; // reference count
|
||||
struct SSyncNode *pSyncNode;
|
||||
} SSyncPeer;
|
||||
|
||||
|
@ -98,6 +99,7 @@ typedef struct SSyncNode {
|
|||
int8_t quorum;
|
||||
int8_t selfIndex;
|
||||
uint32_t vgId;
|
||||
int32_t refCount;
|
||||
int64_t rid;
|
||||
SSyncPeer * peerInfo[TAOS_SYNC_MAX_REPLICA + 1]; // extra one for arbitrator
|
||||
SSyncPeer * pMaster;
|
||||
|
@ -121,13 +123,13 @@ extern int32_t tsSyncNum;
|
|||
extern char tsNodeFqdn[TSDB_FQDN_LEN];
|
||||
extern char * syncStatus[];
|
||||
|
||||
void *syncRetrieveData(void *param);
|
||||
void *syncRestoreData(void *param);
|
||||
int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead);
|
||||
void syncRestartConnection(SSyncPeer *pPeer);
|
||||
void syncBroadcastStatus(SSyncNode *pNode);
|
||||
void syncAddPeerRef(SSyncPeer *pPeer);
|
||||
int32_t syncDecPeerRef(SSyncPeer *pPeer);
|
||||
void * syncRetrieveData(void *param);
|
||||
void * syncRestoreData(void *param);
|
||||
int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead);
|
||||
void syncRestartConnection(SSyncPeer *pPeer);
|
||||
void syncBroadcastStatus(SSyncNode *pNode);
|
||||
SSyncPeer *syncAcquirePeer(int64_t rid);
|
||||
void syncReleasePeer(SSyncPeer *pPeer);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -25,14 +25,14 @@ typedef struct {
|
|||
uint32_t serverIp;
|
||||
int16_t port;
|
||||
int32_t bufferSize;
|
||||
void (*processBrokenLink)(void *ahandle);
|
||||
int32_t (*processIncomingMsg)(void *ahandle, void *buffer);
|
||||
void (*processBrokenLink)(int64_t handleId);
|
||||
int32_t (*processIncomingMsg)(int64_t handleId, void *buffer);
|
||||
void (*processIncomingConn)(int32_t fd, uint32_t ip);
|
||||
} SPoolInfo;
|
||||
|
||||
void *syncOpenTcpThreadPool(SPoolInfo *pInfo);
|
||||
void syncCloseTcpThreadPool(void *);
|
||||
void *syncAllocateTcpConn(void *, void *ahandle, int32_t connFd);
|
||||
void *syncAllocateTcpConn(void *, int64_t rid, int32_t connFd);
|
||||
void syncFreeTcpConn(void *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -29,8 +29,8 @@
|
|||
|
||||
static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context);
|
||||
static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp);
|
||||
static void arbProcessBrokenLink(void *param);
|
||||
static int32_t arbProcessPeerMsg(void *param, void *buffer);
|
||||
static void arbProcessBrokenLink(int64_t rid);
|
||||
static int32_t arbProcessPeerMsg(int64_t rid, void *buffer);
|
||||
static tsem_t tsArbSem;
|
||||
static void * tsArbTcpPool;
|
||||
|
||||
|
@ -138,20 +138,20 @@ static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
|
|||
|
||||
sDebug("%s, arbitrator request is accepted", pNode->id);
|
||||
pNode->nodeFd = connFd;
|
||||
pNode->pConn = syncAllocateTcpConn(tsArbTcpPool, pNode, connFd);
|
||||
pNode->pConn = syncAllocateTcpConn(tsArbTcpPool, (int64_t)pNode, connFd);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void arbProcessBrokenLink(void *param) {
|
||||
SNodeConn *pNode = param;
|
||||
static void arbProcessBrokenLink(int64_t rid) {
|
||||
SNodeConn *pNode = (SNodeConn *)rid;
|
||||
|
||||
sDebug("%s, TCP link is broken since %s, close connection", pNode->id, strerror(errno));
|
||||
tfree(pNode);
|
||||
}
|
||||
|
||||
static int32_t arbProcessPeerMsg(void *param, void *buffer) {
|
||||
SNodeConn *pNode = param;
|
||||
static int32_t arbProcessPeerMsg(int64_t rid, void *buffer) {
|
||||
SNodeConn *pNode = (SNodeConn *)rid;
|
||||
SSyncHead head;
|
||||
int32_t bytes = 0;
|
||||
char * cont = (char *)buffer;
|
||||
|
|
|
@ -35,19 +35,21 @@ char tsNodeFqdn[TSDB_FQDN_LEN] = {0};
|
|||
static void * tsTcpPool = NULL;
|
||||
static void * tsSyncTmrCtrl = NULL;
|
||||
static void * tsVgIdHash = NULL;
|
||||
static int32_t tsSyncRefId = -1;
|
||||
static int32_t tsNodeRefId = -1;
|
||||
static int32_t tsPeerRefId = -1;
|
||||
|
||||
// local functions
|
||||
static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer);
|
||||
static void syncRecoverFromMaster(SSyncPeer *pPeer);
|
||||
static void syncCheckPeerConnection(void *param, void *tmrId);
|
||||
static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId);
|
||||
static void syncProcessBrokenLink(void *param);
|
||||
static int32_t syncProcessPeerMsg(void *param, void *buffer);
|
||||
static int32_t syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId);
|
||||
static void syncProcessBrokenLink(int64_t rid);
|
||||
static int32_t syncProcessPeerMsg(int64_t rid, void *buffer);
|
||||
static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp);
|
||||
static void syncRemovePeer(SSyncPeer *pPeer);
|
||||
static void syncAddArbitrator(SSyncNode *pNode);
|
||||
static void syncFreeNode(void *);
|
||||
static void syncFreePeer(void *);
|
||||
static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode);
|
||||
static void syncMonitorFwdInfos(void *param, void *tmrId);
|
||||
static void syncMonitorNodeRole(void *param, void *tmrId);
|
||||
|
@ -55,7 +57,12 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t c
|
|||
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle);
|
||||
static void syncRestartPeer(SSyncPeer *pPeer);
|
||||
static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle, int32_t qtyp);
|
||||
|
||||
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo);
|
||||
static void syncStartCheckPeerConn(SSyncPeer *pPeer);
|
||||
static void syncStopCheckPeerConn(SSyncPeer *pPeer);
|
||||
static SSyncNode *syncAcquireNode(int64_t rid);
|
||||
static void syncReleaseNode(SSyncNode *pNode);
|
||||
|
||||
char* syncRole[] = {
|
||||
"offline",
|
||||
|
@ -87,29 +94,34 @@ int32_t syncInit() {
|
|||
tsTcpPool = syncOpenTcpThreadPool(&info);
|
||||
if (tsTcpPool == NULL) {
|
||||
sError("failed to init tcpPool");
|
||||
syncCleanUp();
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsSyncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC");
|
||||
if (tsSyncTmrCtrl == NULL) {
|
||||
sError("failed to init tmrCtrl");
|
||||
syncCloseTcpThreadPool(tsTcpPool);
|
||||
tsTcpPool = NULL;
|
||||
syncCleanUp();
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsVgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||
if (tsVgIdHash == NULL) {
|
||||
sError("failed to init vgIdHash");
|
||||
taosTmrCleanUp(tsSyncTmrCtrl);
|
||||
syncCloseTcpThreadPool(tsTcpPool);
|
||||
tsTcpPool = NULL;
|
||||
tsSyncTmrCtrl = NULL;
|
||||
syncCleanUp();
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsSyncRefId = taosOpenRef(200, syncFreeNode);
|
||||
if (tsSyncRefId < 0) {
|
||||
tsNodeRefId = taosOpenRef(200, syncFreeNode);
|
||||
if (tsNodeRefId < 0) {
|
||||
sError("failed to init node ref");
|
||||
syncCleanUp();
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsPeerRefId = taosOpenRef(1000, syncFreePeer);
|
||||
if (tsPeerRefId < 0) {
|
||||
sError("failed to init peer ref");
|
||||
syncCleanUp();
|
||||
return -1;
|
||||
}
|
||||
|
@ -121,12 +133,12 @@ int32_t syncInit() {
|
|||
}
|
||||
|
||||
void syncCleanUp() {
|
||||
if (tsTcpPool) {
|
||||
if (tsTcpPool != NULL) {
|
||||
syncCloseTcpThreadPool(tsTcpPool);
|
||||
tsTcpPool = NULL;
|
||||
}
|
||||
|
||||
if (tsSyncTmrCtrl) {
|
||||
if (tsSyncTmrCtrl != NULL) {
|
||||
taosTmrCleanUp(tsSyncTmrCtrl);
|
||||
tsSyncTmrCtrl = NULL;
|
||||
}
|
||||
|
@ -136,8 +148,15 @@ void syncCleanUp() {
|
|||
tsVgIdHash = NULL;
|
||||
}
|
||||
|
||||
taosCloseRef(tsSyncRefId);
|
||||
tsSyncRefId = -1;
|
||||
if (tsNodeRefId != -1) {
|
||||
taosCloseRef(tsNodeRefId);
|
||||
tsNodeRefId = -1;
|
||||
}
|
||||
|
||||
if (tsPeerRefId != -1) {
|
||||
taosCloseRef(tsPeerRefId);
|
||||
tsPeerRefId = -1;
|
||||
}
|
||||
|
||||
sInfo("sync module is cleaned up");
|
||||
}
|
||||
|
@ -170,7 +189,8 @@ int64_t syncStart(const SSyncInfo *pInfo) {
|
|||
pNode->quorum = pCfg->quorum;
|
||||
if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica;
|
||||
|
||||
pNode->rid = taosAddRef(tsSyncRefId, pNode);
|
||||
pNode->refCount = 1;
|
||||
pNode->rid = taosAddRef(tsNodeRefId, pNode);
|
||||
if (pNode->rid < 0) {
|
||||
syncFreeNode(pNode);
|
||||
return -1;
|
||||
|
@ -232,13 +252,18 @@ int64_t syncStart(const SSyncInfo *pInfo) {
|
|||
(*pNode->notifyRole)(pNode->vgId, nodeRole);
|
||||
}
|
||||
|
||||
syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb
|
||||
for (int32_t index = 0; index < pNode->replica; ++index) {
|
||||
syncStartCheckPeerConn(pNode->peerInfo[index]);
|
||||
}
|
||||
|
||||
return pNode->rid;
|
||||
}
|
||||
|
||||
void syncStop(int64_t rid) {
|
||||
SSyncPeer *pPeer;
|
||||
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return;
|
||||
|
||||
sInfo("vgId:%d, cleanup sync", pNode->vgId);
|
||||
|
@ -259,14 +284,14 @@ void syncStop(int64_t rid) {
|
|||
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
taosRemoveRef(tsSyncRefId, rid);
|
||||
syncReleaseNode(pNode);
|
||||
taosRemoveRef(tsNodeRefId, rid);
|
||||
}
|
||||
|
||||
int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) {
|
||||
int32_t i, j;
|
||||
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return TSDB_CODE_SYN_INVALID_CONFIG;
|
||||
|
||||
sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], pNewCfg->replica,
|
||||
|
@ -274,6 +299,11 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) {
|
|||
|
||||
pthread_mutex_lock(&pNode->mutex);
|
||||
|
||||
syncStopCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb
|
||||
for (int32_t index = 0; index < pNode->replica; ++index) {
|
||||
syncStopCheckPeerConn(pNode->peerInfo[index]);
|
||||
}
|
||||
|
||||
for (i = 0; i < pNode->replica; ++i) {
|
||||
for (j = 0; j < pNewCfg->replica; ++j) {
|
||||
if ((strcmp(pNode->peerInfo[i]->fqdn, pNewCfg->nodeInfo[j].nodeFqdn) == 0) &&
|
||||
|
@ -330,28 +360,32 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) {
|
|||
(*pNode->notifyRole)(pNode->vgId, nodeRole);
|
||||
}
|
||||
|
||||
syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb
|
||||
for (int32_t index = 0; index < pNode->replica; ++index) {
|
||||
syncStartCheckPeerConn(pNode->peerInfo[index]);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
|
||||
sInfo("vgId:%d, %d replicas are configured, quorum:%d", pNode->vgId, pNode->replica, pNode->quorum);
|
||||
syncBroadcastStatus(pNode);
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
syncReleaseNode(pNode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncForwardToPeer(int64_t rid, void *data, void *mhandle, int32_t qtype) {
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
if (pNode == NULL) return 0;
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return 0;
|
||||
|
||||
int32_t code = syncForwardToPeerImpl(pNode, data, mhandle, qtype);
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
|
||||
syncReleaseNode(pNode);
|
||||
return code;
|
||||
}
|
||||
|
||||
void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) {
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return;
|
||||
|
||||
SSyncPeer *pPeer = pNode->pMaster;
|
||||
|
@ -367,14 +401,14 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
syncReleaseNode(pNode);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void syncRecover(int64_t rid) {
|
||||
SSyncPeer *pPeer;
|
||||
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return;
|
||||
|
||||
// to do: add a few lines to check if recover is OK
|
||||
|
@ -395,12 +429,12 @@ void syncRecover(int64_t rid) {
|
|||
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
syncReleaseNode(pNode);
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) {
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return -1;
|
||||
|
||||
pNodesRole->selfIndex = pNode->selfIndex;
|
||||
|
@ -409,8 +443,7 @@ int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) {
|
|||
pNodesRole->role[i] = pNode->peerInfo[i]->role;
|
||||
}
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
|
||||
syncReleaseNode(pNode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -446,24 +479,61 @@ static void syncAddArbitrator(SSyncNode *pNode) {
|
|||
static void syncFreeNode(void *param) {
|
||||
SSyncNode *pNode = param;
|
||||
|
||||
int32_t refCount = atomic_sub_fetch_32(&pNode->refCount, 1);
|
||||
sDebug("vgId:%d, syncnode is freed, refCount:%d", pNode->vgId, refCount);
|
||||
|
||||
pthread_mutex_destroy(&pNode->mutex);
|
||||
tfree(pNode->pRecv);
|
||||
tfree(pNode->pSyncFwds);
|
||||
tfree(pNode);
|
||||
}
|
||||
|
||||
void syncAddPeerRef(SSyncPeer *pPeer) { atomic_add_fetch_32(&pPeer->refCount, 1); }
|
||||
|
||||
int32_t syncDecPeerRef(SSyncPeer *pPeer) {
|
||||
if (atomic_sub_fetch_32(&pPeer->refCount, 1) == 0) {
|
||||
taosReleaseRef(tsSyncRefId, pPeer->pSyncNode->rid);
|
||||
|
||||
sDebug("%s, resource is freed", pPeer->id);
|
||||
tfree(pPeer);
|
||||
return 0;
|
||||
SSyncNode *syncAcquireNode(int64_t rid) {
|
||||
SSyncNode *pNode = taosAcquireRef(tsNodeRefId, rid);
|
||||
if (pNode == NULL) {
|
||||
sDebug("failed to acquire syncnode from refId:%" PRId64, rid);
|
||||
} else {
|
||||
int32_t refCount = atomic_add_fetch_32(&pNode->refCount, 1);
|
||||
sTrace("vgId:%d, acquire syncnode refId:%" PRId64 ", refCount:%d", pNode->vgId, rid, refCount);
|
||||
}
|
||||
|
||||
return 1;
|
||||
return pNode;
|
||||
}
|
||||
|
||||
void syncReleaseNode(SSyncNode *pNode) {
|
||||
int32_t refCount = atomic_sub_fetch_32(&pNode->refCount, 1);
|
||||
sTrace("vgId:%d, dec syncnode refId:%" PRId64 " refCount:%d", pNode->vgId, pNode->rid, refCount);
|
||||
|
||||
taosReleaseRef(tsNodeRefId, pNode->rid);
|
||||
}
|
||||
|
||||
static void syncFreePeer(void *param) {
|
||||
SSyncPeer *pPeer = param;
|
||||
|
||||
int32_t refCount = atomic_sub_fetch_32(&pPeer->refCount, 1);
|
||||
sDebug("%s, peer is freed, refCount:%d", pPeer->id, refCount);
|
||||
|
||||
syncReleaseNode(pPeer->pSyncNode);
|
||||
tfree(pPeer);
|
||||
}
|
||||
|
||||
SSyncPeer *syncAcquirePeer(int64_t rid) {
|
||||
SSyncPeer *pPeer = taosAcquireRef(tsPeerRefId, rid);
|
||||
if (pPeer == NULL) {
|
||||
sDebug("failed to acquire peer from refId:%" PRId64, rid);
|
||||
} else {
|
||||
int32_t refCount = atomic_add_fetch_32(&pPeer->refCount, 1);
|
||||
sTrace("%s, acquire peer refId:%" PRId64 ", refCount:%d", pPeer->id, rid, refCount);
|
||||
}
|
||||
|
||||
return pPeer;
|
||||
}
|
||||
|
||||
void syncReleasePeer(SSyncPeer *pPeer) {
|
||||
int32_t refCount = atomic_sub_fetch_32(&pPeer->refCount, 1);
|
||||
sTrace("%s, dec peer refId:%" PRId64 ", refCount:%d", pPeer->id, pPeer->rid, refCount);
|
||||
|
||||
taosReleaseRef(tsPeerRefId, pPeer->rid);
|
||||
}
|
||||
|
||||
static void syncClosePeerConn(SSyncPeer *pPeer) {
|
||||
|
@ -473,7 +543,8 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
|
|||
taosClose(pPeer->syncFd);
|
||||
if (pPeer->peerFd >= 0) {
|
||||
pPeer->peerFd = -1;
|
||||
syncFreeTcpConn(pPeer->pConn);
|
||||
void *pConn = pPeer->pConn;
|
||||
if (pConn != NULL) syncFreeTcpConn(pPeer->pConn);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -482,11 +553,32 @@ static void syncRemovePeer(SSyncPeer *pPeer) {
|
|||
|
||||
pPeer->ip = 0;
|
||||
syncClosePeerConn(pPeer);
|
||||
syncDecPeerRef(pPeer);
|
||||
//taosRemoveRef(tsPeerRefId, pPeer->rid);
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static void syncStartCheckPeerConn(SSyncPeer *pPeer) {
|
||||
if (pPeer == NULL) return;
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
||||
if (pNode->vgId > 1) checkMs = tsStatusInterval * 1000 + checkMs;
|
||||
sDebug("%s, check peer connection after %d ms", pPeer->id, checkMs);
|
||||
taosTmrReset(syncCheckPeerConnection, checkMs, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void syncStopCheckPeerConn(SSyncPeer *pPeer) {
|
||||
if (pPeer == NULL) return;
|
||||
|
||||
taosTmrStopA(&pPeer->timer);
|
||||
sDebug("%s, stop check peer connection", pPeer->id);
|
||||
}
|
||||
|
||||
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
||||
uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn);
|
||||
uint32_t ip = taosGetIpv4FromFqdn(pInfo->nodeFqdn);
|
||||
if (ip == 0xFFFFFFFF) {
|
||||
sError("failed to add peer, can resolve fqdn:%s since %s", pInfo->nodeFqdn, strerror(errno));
|
||||
terrno = TSDB_CODE_RPC_FQDN_ERROR;
|
||||
|
@ -508,17 +600,11 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
|||
pPeer->role = TAOS_SYNC_ROLE_OFFLINE;
|
||||
pPeer->pSyncNode = pNode;
|
||||
pPeer->refCount = 1;
|
||||
pPeer->rid = taosAddRef(tsPeerRefId, pPeer);
|
||||
|
||||
sInfo("%s, it is configured, ep:%s:%u", pPeer->id, pPeer->fqdn, pPeer->port);
|
||||
int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
||||
if (pNode->vgId > 1) checkMs = tsStatusInterval * 1000 + checkMs;
|
||||
sDebug("%s, check peer connection after %d ms", pPeer->id, checkMs);
|
||||
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, tsSyncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
sInfo("%s, %p it is configured, ep:%s:%u rid:%" PRId64, pPeer->id, pPeer, pPeer->fqdn, pPeer->port, pPeer->rid);
|
||||
|
||||
taosAcquireRef(tsSyncRefId, pNode->rid);
|
||||
(void)syncAcquireNode(pNode->rid);
|
||||
return pPeer;
|
||||
}
|
||||
|
||||
|
@ -560,6 +646,9 @@ static void syncChooseMaster(SSyncNode *pNode) {
|
|||
index = i;
|
||||
}
|
||||
}
|
||||
sDebug("vgId:%d, master:%s may be choosed, index:%d", pNode->vgId, pNode->peerInfo[index]->id, index);
|
||||
} else {
|
||||
sDebug("vgId:%d, no master election since onlineNum:%d replica:%d", pNode->vgId, onlineNum, pNode->replica);
|
||||
}
|
||||
|
||||
// add arbitrator connection
|
||||
|
@ -580,6 +669,11 @@ static void syncChooseMaster(SSyncNode *pNode) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (index >= 0) {
|
||||
sDebug("vgId:%d, master:%s may be choosed, index:%d onlineNum(arb):%d replica:%d", pNode->vgId,
|
||||
pNode->peerInfo[index]->id, index, onlineNum, replica);
|
||||
}
|
||||
}
|
||||
|
||||
if (index >= 0) {
|
||||
|
@ -621,9 +715,13 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
|
|||
|
||||
if (onlineNum <= replica * 0.5) {
|
||||
if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) {
|
||||
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
|
||||
if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && onlineNum >= 1) {
|
||||
sInfo("vgId:%d, self keep work as master, online:%d replica:%d", pNode->vgId, onlineNum, replica);
|
||||
} else {
|
||||
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
|
||||
sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica);
|
||||
}
|
||||
(*pNode->notifyRole)(pNode->vgId, nodeRole);
|
||||
sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica);
|
||||
}
|
||||
} else {
|
||||
for (int32_t index = 0; index < pNode->replica; ++index) {
|
||||
|
@ -678,7 +776,7 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new
|
|||
if (pMaster) {
|
||||
// master is there
|
||||
pNode->pMaster = pMaster;
|
||||
sDebug("%s, it is the master, sver:%" PRIu64, pMaster->id, pMaster->version);
|
||||
sDebug("%s, it is the master, replica:%d sver:%" PRIu64, pMaster->id, pNode->replica, pMaster->version);
|
||||
|
||||
if (syncValidateMaster(pPeer) < 0) return;
|
||||
|
||||
|
@ -711,10 +809,10 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new
|
|||
}
|
||||
|
||||
if (consistent) {
|
||||
sDebug("vgId:%d, choose master", pNode->vgId);
|
||||
sDebug("vgId:%d, choose master, replica:%d", pNode->vgId, pNode->replica);
|
||||
syncChooseMaster(pNode);
|
||||
} else {
|
||||
sDebug("vgId:%d, cannot choose master since roles inequality", pNode->vgId);
|
||||
sDebug("vgId:%d, cannot choose master since roles inequality, replica:%d", pNode->vgId, pNode->replica);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -743,7 +841,7 @@ static void syncRestartPeer(SSyncPeer *pPeer) {
|
|||
int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
sDebug("%s, check peer connection in 1000 ms", pPeer->id);
|
||||
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer);
|
||||
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -772,25 +870,30 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) {
|
|||
}
|
||||
|
||||
// start a new thread to retrieve the data
|
||||
syncAddPeerRef(pPeer);
|
||||
(void)syncAcquirePeer(pPeer->rid);
|
||||
|
||||
pthread_attr_t thattr;
|
||||
pthread_t thread;
|
||||
pthread_attr_init(&thattr);
|
||||
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED);
|
||||
int32_t ret = pthread_create(&thread, &thattr, syncRetrieveData, pPeer);
|
||||
int32_t ret = pthread_create(&thread, &thattr, syncRetrieveData, (void *)pPeer->rid);
|
||||
pthread_attr_destroy(&thattr);
|
||||
|
||||
if (ret != 0) {
|
||||
sError("%s, failed to create sync thread since %s", pPeer->id, strerror(errno));
|
||||
syncDecPeerRef(pPeer);
|
||||
} else {
|
||||
pPeer->sstatus = TAOS_SYNC_STATUS_START;
|
||||
sDebug("%s, thread is created to retrieve data, set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
|
||||
}
|
||||
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static void syncNotStarted(void *param, void *tmrId) {
|
||||
SSyncPeer *pPeer = param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return;
|
||||
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
pthread_mutex_lock(&pNode->mutex);
|
||||
|
@ -799,15 +902,22 @@ static void syncNotStarted(void *param, void *tmrId) {
|
|||
sInfo("%s, sync conn is still not up, restart and set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
|
||||
syncRestartConnection(pPeer);
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static void syncTryRecoverFromMaster(void *param, void *tmrId) {
|
||||
SSyncPeer *pPeer = param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return;
|
||||
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
pthread_mutex_lock(&pNode->mutex);
|
||||
syncRecoverFromMaster(pPeer);
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static void syncRecoverFromMaster(SSyncPeer *pPeer) {
|
||||
|
@ -823,7 +933,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
|
|||
// Ensure the sync of mnode not interrupted
|
||||
if (pNode->vgId != 1 && tsSyncNum >= SYNC_MAX_NUM) {
|
||||
sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum);
|
||||
taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, tsSyncTmrCtrl, &pPeer->timer);
|
||||
taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -832,7 +942,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
|
|||
SSyncMsg msg;
|
||||
syncBuildSyncReqMsg(&msg, pNode->vgId);
|
||||
|
||||
taosTmrReset(syncNotStarted, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer);
|
||||
taosTmrReset(syncNotStarted, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
|
||||
if (taosWriteMsg(pPeer->peerFd, &msg, sizeof(SSyncMsg)) != sizeof(SSyncMsg)) {
|
||||
sError("%s, failed to send sync-req to peer", pPeer->id);
|
||||
|
@ -920,8 +1030,10 @@ static int32_t syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t syncProcessPeerMsg(void *param, void *buffer) {
|
||||
SSyncPeer *pPeer = param;
|
||||
static int32_t syncProcessPeerMsg(int64_t rid, void *buffer) {
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return -1;
|
||||
|
||||
SSyncHead *pHead = buffer;
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
|
@ -942,12 +1054,17 @@ static int32_t syncProcessPeerMsg(void *param, void *buffer) {
|
|||
}
|
||||
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
syncReleasePeer(pPeer);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId) {
|
||||
if (pPeer->peerFd < 0 || pPeer->ip == 0) return;
|
||||
static int32_t syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId) {
|
||||
if (pPeer->peerFd < 0 || pPeer->ip == 0) {
|
||||
sDebug("%s, failed to send status msg, restart fd:%d", pPeer->id, pPeer->peerFd);
|
||||
syncRestartConnection(pPeer);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSyncNode * pNode = pPeer->pSyncNode;
|
||||
SPeersStatus msg;
|
||||
|
@ -970,9 +1087,11 @@ static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type
|
|||
sDebug("%s, status is sent, self:%s:%s:%" PRIu64 ", peer:%s:%s:%" PRIu64 ", ack:%d tranId:%u type:%s pfd:%d",
|
||||
pPeer->id, syncRole[nodeRole], syncStatus[nodeSStatus], nodeVersion, syncRole[pPeer->role],
|
||||
syncStatus[pPeer->sstatus], pPeer->version, ack, tranId, statusType[type], pPeer->peerFd);
|
||||
return 0;
|
||||
} else {
|
||||
sDebug("%s, failed to send status msg, restart", pPeer->id);
|
||||
syncRestartConnection(pPeer);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -989,7 +1108,7 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
|
|||
int32_t connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0);
|
||||
if (connFd < 0) {
|
||||
sDebug("%s, failed to open tcp socket since %s", pPeer->id, strerror(errno));
|
||||
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer);
|
||||
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1000,17 +1119,19 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
|
|||
sDebug("%s, connection to peer server is setup, pfd:%d sfd:%d tranId:%u", pPeer->id, connFd, pPeer->syncFd, msg.tranId);
|
||||
pPeer->peerFd = connFd;
|
||||
pPeer->role = TAOS_SYNC_ROLE_UNSYNCED;
|
||||
pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer, connFd);
|
||||
syncAddPeerRef(pPeer);
|
||||
pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer->rid, connFd);
|
||||
} else {
|
||||
sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno));
|
||||
taosClose(connFd);
|
||||
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer);
|
||||
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void syncCheckPeerConnection(void *param, void *tmrId) {
|
||||
SSyncPeer *pPeer = param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return;
|
||||
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
pthread_mutex_lock(&pNode->mutex);
|
||||
|
@ -1019,6 +1140,8 @@ static void syncCheckPeerConnection(void *param, void *tmrId) {
|
|||
syncSetupPeerConnection(pPeer);
|
||||
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
|
||||
|
@ -1029,8 +1152,9 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
|
|||
pthread_attr_init(&thattr);
|
||||
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED);
|
||||
|
||||
syncAddPeerRef(pPeer);
|
||||
int32_t ret = pthread_create(&(thread), &thattr, (void *)syncRestoreData, pPeer);
|
||||
(void)syncAcquirePeer(pPeer->rid);
|
||||
|
||||
int32_t ret = pthread_create(&(thread), &thattr, (void *)syncRestoreData, (void *)pPeer->rid);
|
||||
pthread_attr_destroy(&thattr);
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -1038,10 +1162,11 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
|
|||
nodeSStatus = TAOS_SYNC_STATUS_INIT;
|
||||
sError("%s, failed to create sync thread, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
|
||||
taosClose(pPeer->syncFd);
|
||||
syncDecPeerRef(pPeer);
|
||||
} else {
|
||||
sInfo("%s, sync connection is up", pPeer->id);
|
||||
}
|
||||
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
|
||||
|
@ -1073,7 +1198,7 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
|
|||
return;
|
||||
}
|
||||
|
||||
sDebug("vgId:%d, sync msg is received, tranId:%u", vgId, msg.tranId);
|
||||
sDebug("vgId:%d, sync connection is incomming, tranId:%u", vgId, msg.tranId);
|
||||
|
||||
SSyncNode *pNode = *ppNode;
|
||||
pthread_mutex_lock(&pNode->mutex);
|
||||
|
@ -1101,8 +1226,7 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
|
|||
sDebug("%s, TCP connection is up, pfd:%d sfd:%d, old pfd:%d", pPeer->id, connFd, pPeer->syncFd, pPeer->peerFd);
|
||||
syncClosePeerConn(pPeer);
|
||||
pPeer->peerFd = connFd;
|
||||
pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer, connFd);
|
||||
syncAddPeerRef(pPeer);
|
||||
pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer->rid, connFd);
|
||||
sDebug("%s, ready to exchange data", pPeer->id);
|
||||
syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_EXCHANGE_DATA, syncGenTranId());
|
||||
}
|
||||
|
@ -1111,23 +1235,21 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
|
|||
pthread_mutex_unlock(&pNode->mutex);
|
||||
}
|
||||
|
||||
static void syncProcessBrokenLink(void *param) {
|
||||
if (param == NULL) return; // the connection for arbitrator
|
||||
SSyncPeer *pPeer = param;
|
||||
static void syncProcessBrokenLink(int64_t rid) {
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return;
|
||||
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
if (taosAcquireRef(tsSyncRefId, pNode->rid) == NULL) return;
|
||||
pthread_mutex_lock(&pNode->mutex);
|
||||
|
||||
sDebug("%s, TCP link is broken since %s, pfd:%d sfd:%d", pPeer->id, strerror(errno), pPeer->peerFd, pPeer->syncFd);
|
||||
pPeer->peerFd = -1;
|
||||
|
||||
if (syncDecPeerRef(pPeer) != 0) {
|
||||
syncRestartConnection(pPeer);
|
||||
}
|
||||
|
||||
syncRestartConnection(pPeer);
|
||||
pthread_mutex_unlock(&pNode->mutex);
|
||||
taosReleaseRef(tsSyncRefId, pNode->rid);
|
||||
|
||||
syncReleasePeer(pPeer);
|
||||
}
|
||||
|
||||
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) {
|
||||
|
@ -1198,7 +1320,7 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code
|
|||
|
||||
static void syncMonitorNodeRole(void *param, void *tmrId) {
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return;
|
||||
|
||||
for (int32_t index = 0; index < pNode->replica; index++) {
|
||||
|
@ -1215,12 +1337,12 @@ static void syncMonitorNodeRole(void *param, void *tmrId) {
|
|||
}
|
||||
|
||||
pNode->pRoleTimer = taosTmrStart(syncMonitorNodeRole, SYNC_ROLE_TIMER, (void *)pNode->rid, tsSyncTmrCtrl);
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
syncReleaseNode(pNode);
|
||||
}
|
||||
|
||||
static void syncMonitorFwdInfos(void *param, void *tmrId) {
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid);
|
||||
SSyncNode *pNode = syncAcquireNode(rid);
|
||||
if (pNode == NULL) return;
|
||||
|
||||
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
|
||||
|
@ -1246,7 +1368,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
|
|||
pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, SYNC_FWD_TIMER, (void *)pNode->rid, tsSyncTmrCtrl);
|
||||
}
|
||||
|
||||
taosReleaseRef(tsSyncRefId, rid);
|
||||
syncReleaseNode(pNode);
|
||||
}
|
||||
|
||||
static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle, int32_t qtype) {
|
||||
|
|
|
@ -90,15 +90,18 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
|
|||
break;
|
||||
}
|
||||
|
||||
sDebug("%s, file:%s info is received from master, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%d", pPeer->id,
|
||||
minfo.name, minfo.index, minfo.size, minfo.fversion, minfo.magic);
|
||||
|
||||
// remove extra files on slave between the current and last index
|
||||
syncRemoveExtraFile(pPeer, pindex + 1, minfo.index - 1);
|
||||
pindex = minfo.index;
|
||||
|
||||
// check the file info
|
||||
sinfo = minfo;
|
||||
sDebug("%s, get file:%s info size:%" PRId64, pPeer->id, minfo.name, minfo.size);
|
||||
sinfo.magic = (*pNode->getFileInfo)(pNode->vgId, sinfo.name, &sinfo.index, TAOS_SYNC_MAX_INDEX, &sinfo.size,
|
||||
&sinfo.fversion);
|
||||
sinfo.magic = (*pNode->getFileInfo)(pNode->vgId, sinfo.name, &sinfo.index, TAOS_SYNC_MAX_INDEX, &sinfo.size, &sinfo.fversion);
|
||||
sDebug("%s, local file:%s info, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%d", pPeer->id, sinfo.name,
|
||||
sinfo.index, sinfo.size, sinfo.fversion, sinfo.magic);
|
||||
|
||||
// if file not there or magic is not the same, file shall be synced
|
||||
memset(&fileAck, 0, sizeof(SFileAck));
|
||||
|
@ -116,6 +119,8 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
|
|||
if (fileAck.sync == 0) {
|
||||
sDebug("%s, %s is the same", pPeer->id, minfo.name);
|
||||
continue;
|
||||
} else {
|
||||
sDebug("%s, %s will be received, size:%" PRId64, pPeer->id, minfo.name, minfo.size);
|
||||
}
|
||||
|
||||
// if sync is required, open file, receive from master, and write to file
|
||||
|
@ -155,7 +160,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t syncRestoreWal(SSyncPeer *pPeer) {
|
||||
static int32_t syncRestoreWal(SSyncPeer *pPeer, uint64_t *wver) {
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
int32_t ret, code = -1;
|
||||
uint64_t lastVer = 0;
|
||||
|
@ -198,6 +203,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) {
|
|||
}
|
||||
|
||||
free(pHead);
|
||||
*wver = lastVer;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -321,12 +327,19 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) {
|
|||
|
||||
nodeVersion = fversion;
|
||||
|
||||
sInfo("%s, start to restore wal", pPeer->id);
|
||||
if (syncRestoreWal(pPeer) < 0) {
|
||||
sError("%s, failed to restore wal", pPeer->id);
|
||||
sInfo("%s, start to restore wal, fver:%" PRIu64, pPeer->id, nodeVersion);
|
||||
uint64_t wver = 0;
|
||||
code = syncRestoreWal(pPeer, &wver); // lastwar
|
||||
if (code < 0) {
|
||||
sError("%s, failed to restore wal, code:%d", pPeer->id, code);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (wver != 0) {
|
||||
nodeVersion = wver;
|
||||
sDebug("%s, restore wal finished, set sver:%" PRIu64, pPeer->id, nodeVersion);
|
||||
}
|
||||
|
||||
nodeSStatus = TAOS_SYNC_STATUS_CACHE;
|
||||
sInfo("%s, start to insert buffered points, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
|
||||
if (syncProcessBufferedFwd(pPeer) < 0) {
|
||||
|
@ -338,7 +351,10 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) {
|
|||
}
|
||||
|
||||
void *syncRestoreData(void *param) {
|
||||
SSyncPeer *pPeer = param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return NULL;
|
||||
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
|
||||
taosBlockSIGPIPE();
|
||||
|
@ -369,7 +385,7 @@ void *syncRestoreData(void *param) {
|
|||
taosClose(pPeer->syncFd);
|
||||
syncCloseRecvBuffer(pNode);
|
||||
__sync_fetch_and_sub(&tsSyncNum, 1);
|
||||
syncDecPeerRef(pPeer);
|
||||
syncReleasePeer(pPeer);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -104,7 +104,8 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) {
|
|||
fileInfo.magic = (*pNode->getFileInfo)(pNode->vgId, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX,
|
||||
&fileInfo.size, &fileInfo.fversion);
|
||||
syncBuildFileInfo(&fileInfo, pNode->vgId);
|
||||
sDebug("%s, file:%s info is sent, size:%" PRId64, pPeer->id, fileInfo.name, fileInfo.size);
|
||||
sDebug("%s, file:%s info is sent, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%d", pPeer->id, fileInfo.name,
|
||||
fileInfo.index, fileInfo.size, fileInfo.fversion, fileInfo.magic);
|
||||
|
||||
// send the file info
|
||||
int32_t ret = taosWriteMsg(pPeer->syncFd, &(fileInfo), sizeof(SFileInfo));
|
||||
|
@ -144,6 +145,8 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) {
|
|||
fileInfo.index++;
|
||||
sDebug("%s, %s is the same", pPeer->id, fileInfo.name);
|
||||
continue;
|
||||
} else {
|
||||
sDebug("%s, %s will be sent", pPeer->id, fileInfo.name);
|
||||
}
|
||||
|
||||
// get the full path to file
|
||||
|
@ -461,7 +464,10 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
|
|||
}
|
||||
|
||||
void *syncRetrieveData(void *param) {
|
||||
SSyncPeer *pPeer = (SSyncPeer *)param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncPeer *pPeer = syncAcquirePeer(rid);
|
||||
if (pPeer == NULL) return NULL;
|
||||
|
||||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
taosBlockSIGPIPE();
|
||||
|
||||
|
@ -490,7 +496,7 @@ void *syncRetrieveData(void *param) {
|
|||
|
||||
pPeer->fileChanged = 0;
|
||||
taosClose(pPeer->syncFd);
|
||||
syncDecPeerRef(pPeer);
|
||||
syncReleasePeer(pPeer);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ typedef struct SPoolObj {
|
|||
|
||||
typedef struct {
|
||||
SThreadObj *pThread;
|
||||
void * ahandle;
|
||||
int64_t handleId;
|
||||
int32_t fd;
|
||||
int32_t closedByApp;
|
||||
} SConnObj;
|
||||
|
@ -112,7 +112,7 @@ void syncCloseTcpThreadPool(void *param) {
|
|||
tfree(pPool);
|
||||
}
|
||||
|
||||
void *syncAllocateTcpConn(void *param, void *pPeer, int32_t connFd) {
|
||||
void *syncAllocateTcpConn(void *param, int64_t rid, int32_t connFd) {
|
||||
struct epoll_event event;
|
||||
SPoolObj *pPool = param;
|
||||
|
||||
|
@ -130,7 +130,7 @@ void *syncAllocateTcpConn(void *param, void *pPeer, int32_t connFd) {
|
|||
|
||||
pConn->fd = connFd;
|
||||
pConn->pThread = pThread;
|
||||
pConn->ahandle = pPeer;
|
||||
pConn->handleId = rid;
|
||||
pConn->closedByApp = 0;
|
||||
|
||||
event.events = EPOLLIN | EPOLLRDHUP;
|
||||
|
@ -164,7 +164,7 @@ static void taosProcessBrokenLink(SConnObj *pConn) {
|
|||
SPoolInfo * pInfo = &pPool->info;
|
||||
|
||||
if (pConn->closedByApp == 0) shutdown(pConn->fd, SHUT_WR);
|
||||
(*pInfo->processBrokenLink)(pConn->ahandle);
|
||||
(*pInfo->processBrokenLink)(pConn->handleId);
|
||||
|
||||
pThread->numOfFds--;
|
||||
epoll_ctl(pThread->pollFd, EPOLL_CTL_DEL, pConn->fd, NULL);
|
||||
|
@ -221,7 +221,7 @@ static void *syncProcessTcpData(void *param) {
|
|||
}
|
||||
|
||||
if (pConn->closedByApp == 0) {
|
||||
if ((*pInfo->processIncomingMsg)(pConn->ahandle, buffer) < 0) {
|
||||
if ((*pInfo->processIncomingMsg)(pConn->handleId, buffer) < 0) {
|
||||
syncFreeTcpConn(pConn);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -32,19 +32,19 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int tsdbDebugFlag;
|
||||
extern int32_t tsdbDebugFlag;
|
||||
|
||||
#define tsdbFatal(...) { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TDB FATAL ", 255, __VA_ARGS__); }}
|
||||
#define tsdbError(...) { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TDB ERROR ", 255, __VA_ARGS__); }}
|
||||
#define tsdbWarn(...) { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TDB WARN ", 255, __VA_ARGS__); }}
|
||||
#define tsdbInfo(...) { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TDB ", 255, __VA_ARGS__); }}
|
||||
#define tsdbDebug(...) { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }}
|
||||
#define tsdbTrace(...) { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }}
|
||||
#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TDB FATAL ", 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TDB ERROR ", 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TDB WARN ", 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TDB ", 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
|
||||
#define TSDB_MAX_TABLE_SCHEMAS 16
|
||||
#define TSDB_FILE_HEAD_SIZE 512
|
||||
#define TSDB_FILE_DELIMITER 0xF00AFA0F
|
||||
#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF
|
||||
#define TSDB_FILE_HEAD_SIZE 512
|
||||
#define TSDB_FILE_DELIMITER 0xF00AFA0F
|
||||
#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF
|
||||
|
||||
#define TAOS_IN_RANGE(key, keyMin, keyLast) (((key) >= (keyMin)) && ((key) <= (keyMax)))
|
||||
|
||||
|
@ -67,7 +67,8 @@ typedef struct STable {
|
|||
SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index
|
||||
void* eventHandler; // TODO
|
||||
void* streamHandler; // TODO
|
||||
TSKEY lastKey; // lastkey inserted in this table, initialized as 0, TODO: make a structure
|
||||
TSKEY lastKey;
|
||||
SDataRow lastRow;
|
||||
char* sql;
|
||||
void* cqhandle;
|
||||
SRWLatch latch; // TODO: implementa latch functions
|
||||
|
@ -370,8 +371,11 @@ typedef struct {
|
|||
#define TABLE_UID(t) (t)->tableId.uid
|
||||
#define TABLE_TID(t) (t)->tableId.tid
|
||||
#define TABLE_SUID(t) (t)->suid
|
||||
#define TABLE_LASTKEY(t) (t)->lastKey
|
||||
#define TSDB_META_FILE_MAGIC(m) KVSTORE_MAGIC((m)->pStore)
|
||||
#define TSDB_RLOCK_TABLE(t) taosRLockLatch(&((t)->latch))
|
||||
#define TSDB_RUNLOCK_TABLE(t) taosRUnLockLatch(&((t)->latch))
|
||||
#define TSDB_WLOCK_TABLE(t) taosWLockLatch(&((t)->latch))
|
||||
#define TSDB_WUNLOCK_TABLE(t) taosWUnLockLatch(&((t)->latch))
|
||||
|
||||
STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg);
|
||||
void tsdbFreeMeta(STsdbMeta* pMeta);
|
||||
|
@ -401,7 +405,7 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock,
|
|||
STSchema* pSchema = NULL;
|
||||
STSchema* pTSchema = NULL;
|
||||
|
||||
if (lock) taosRLockLatch(&(pDTable->latch));
|
||||
if (lock) TSDB_RLOCK_TABLE(pDTable);
|
||||
if (version < 0) { // get the latest version of schema
|
||||
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
|
||||
} else { // get the schema with version
|
||||
|
@ -423,7 +427,7 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock,
|
|||
}
|
||||
|
||||
_exit:
|
||||
if (lock) taosRUnLockLatch(&(pDTable->latch));
|
||||
if (lock) TSDB_RUNLOCK_TABLE(pDTable);
|
||||
return pSchema;
|
||||
}
|
||||
|
||||
|
@ -443,6 +447,11 @@ static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) {
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TSKEY tsdbGetTableLastKeyImpl(STable* pTable) {
|
||||
ASSERT(pTable->lastRow == NULL || pTable->lastKey == dataRowKey(pTable->lastRow));
|
||||
return pTable->lastKey;
|
||||
}
|
||||
|
||||
// ------------------ tsdbBuffer.c
|
||||
#define TSDB_BUFFER_RESERVE 1024 // Reseve 1K as commit threshold
|
||||
|
||||
|
|
|
@ -159,6 +159,11 @@ _err:
|
|||
|
||||
static void tsdbEndCommit(STsdbRepo *pRepo, int eno) {
|
||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno);
|
||||
SMemTable *pIMem = pRepo->imem;
|
||||
tsdbLockRepo(pRepo);
|
||||
pRepo->imem = NULL;
|
||||
tsdbUnlockRepo(pRepo);
|
||||
tsdbUnRefMemTable(pRepo, pIMem);
|
||||
sem_post(&(pRepo->readyToCommit));
|
||||
}
|
||||
|
||||
|
@ -216,7 +221,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitH *pch) {
|
|||
SCommitIter *pIter = iters + tid;
|
||||
if (pIter->pTable == NULL) continue;
|
||||
|
||||
taosRLockLatch(&(pIter->pTable->latch));
|
||||
TSDB_RLOCK_TABLE(pIter->pTable);
|
||||
|
||||
if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err;
|
||||
|
||||
|
@ -227,7 +232,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitH *pch) {
|
|||
}
|
||||
|
||||
if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) {
|
||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||
TSDB_RUNLOCK_TABLE(pIter->pTable);
|
||||
tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
|
||||
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
|
||||
tstrerror(terrno));
|
||||
|
@ -235,7 +240,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitH *pch) {
|
|||
}
|
||||
}
|
||||
|
||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||
TSDB_RUNLOCK_TABLE(pIter->pTable);
|
||||
|
||||
// Move the last block to the new .l file if neccessary
|
||||
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
|
||||
|
|
|
@ -74,9 +74,9 @@ int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
|
|||
|
||||
tsdbDebug(
|
||||
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d daysPerFile %d keep "
|
||||
"%d minRowsPerFileBlock %d maxRowsPerFileBlock %d precision %d compression %d",
|
||||
"%d minRowsPerFileBlock %d maxRowsPerFileBlock %d precision %d compression %d update %d cacheLastRow %d",
|
||||
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock,
|
||||
pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
|
||||
pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression, pCfg->update, pCfg->cacheLastRow);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -281,6 +281,10 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) {
|
|||
config.totalBlocks = pCfg->totalBlocks;
|
||||
configChanged = true;
|
||||
}
|
||||
if (pRCfg->cacheLastRow != pCfg->cacheLastRow) {
|
||||
config.cacheLastRow = pCfg->cacheLastRow;
|
||||
configChanged = true;
|
||||
}
|
||||
|
||||
if (configChanged) {
|
||||
if (tsdbSaveConfig(pRepo->rootDir, &config) < 0) {
|
||||
|
@ -475,6 +479,9 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
|
|||
// update check
|
||||
if (pCfg->update != 0) pCfg->update = 1;
|
||||
|
||||
// update cacheLastRow
|
||||
if (pCfg->cacheLastRow != 0) pCfg->cacheLastRow = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
|
@ -696,10 +703,12 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) {
|
|||
}
|
||||
}
|
||||
|
||||
static int tsdbRestoreInfo(STsdbRepo *pRepo) {
|
||||
static int tsdbRestoreInfo(STsdbRepo *pRepo) { // TODO
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
SFileGroup *pFGroup = NULL;
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
SCompBlock *pBlock = NULL;
|
||||
|
||||
SFileGroupIter iter;
|
||||
SRWHelper rhelper = {0};
|
||||
|
@ -717,7 +726,32 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) {
|
|||
if (tsdbSetHelperTable(&rhelper, pTable, pRepo) < 0) goto _err;
|
||||
SBlockIdx *pIdx = &(rhelper.curCompIdx);
|
||||
|
||||
if (pIdx->offset > 0 && pTable->lastKey < pIdx->maxKey) pTable->lastKey = pIdx->maxKey;
|
||||
TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable);
|
||||
if (pIdx->offset > 0 && lastKey < pIdx->maxKey) {
|
||||
pTable->lastKey = pIdx->maxKey;
|
||||
if (pCfg->cacheLastRow) { // load the block of data
|
||||
if (tsdbLoadCompInfo(&rhelper, NULL) < 0) goto _err;
|
||||
|
||||
pBlock = rhelper.pCompInfo->blocks + pIdx->numOfBlocks - 1;
|
||||
if (tsdbLoadBlockData(&rhelper, pBlock, NULL) < 0) goto _err;
|
||||
|
||||
// construct the data row
|
||||
ASSERT(pTable->lastRow == NULL);
|
||||
STSchema *pSchema = tsdbGetTableSchema(pTable);
|
||||
pTable->lastRow = taosTMalloc(schemaTLen(pSchema));
|
||||
if (pTable->lastRow == NULL) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tdInitDataRow(pTable->lastRow, pSchema);
|
||||
for (int icol = 0; icol < schemaNCols(pSchema); icol++) {
|
||||
STColumn *pCol = schemaColAt(pSchema, icol);
|
||||
SDataCol *pDataCol = rhelper.pDataCols[0]->cols + icol;
|
||||
tdAppendColVal(pTable->lastRow, tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, pCol->bytes,
|
||||
pCol->offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -804,6 +838,7 @@ static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg) {
|
|||
tlen += taosEncodeFixedI8(buf, pCfg->precision);
|
||||
tlen += taosEncodeFixedI8(buf, pCfg->compression);
|
||||
tlen += taosEncodeFixedI8(buf, pCfg->update);
|
||||
tlen += taosEncodeFixedI8(buf, pCfg->cacheLastRow);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
@ -821,6 +856,7 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
|
|||
buf = taosDecodeFixedI8(buf, &(pCfg->precision));
|
||||
buf = taosDecodeFixedI8(buf, &(pCfg->compression));
|
||||
buf = taosDecodeFixedI8(buf, &(pCfg->update));
|
||||
buf = taosDecodeFixedI8(buf, &(pCfg->cacheLastRow));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "tsdbMain.h"
|
||||
|
||||
#define TSDB_DATA_SKIPLIST_LEVEL 5
|
||||
#define TSDB_MAX_INSERT_BATCH 512
|
||||
|
||||
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
|
||||
static void tsdbFreeMemTable(SMemTable *pMemTable);
|
||||
|
@ -35,6 +36,7 @@ static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPB
|
|||
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
|
||||
static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter);
|
||||
static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter);
|
||||
static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row);
|
||||
|
||||
static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey,
|
||||
TSKEY now);
|
||||
|
@ -205,7 +207,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
|||
int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
||||
if (pRepo->mem == NULL) return 0;
|
||||
|
||||
SMemTable *pIMem = pRepo->imem;
|
||||
ASSERT(pRepo->imem == NULL);
|
||||
|
||||
sem_wait(&(pRepo->readyToCommit));
|
||||
|
||||
|
@ -220,8 +222,6 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
|||
tsdbScheduleCommit(pRepo);
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
|
||||
if (tsdbUnRefMemTable(pRepo, pIMem) < 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -609,19 +609,13 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *
|
|||
STable * pTable = NULL;
|
||||
SSubmitBlkIter blkIter = {0};
|
||||
SDataRow row = NULL;
|
||||
void ** rows = NULL;
|
||||
void * rows[TSDB_MAX_INSERT_BATCH] = {0};
|
||||
int rowCounter = 0;
|
||||
|
||||
ASSERT(pBlock->tid < pMeta->maxTables);
|
||||
pTable = pMeta->tables[pBlock->tid];
|
||||
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
|
||||
|
||||
rows = (void **)calloc(pBlock->numOfRows, sizeof(void *));
|
||||
if (rows == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbInitSubmitBlkIter(pBlock, &blkIter);
|
||||
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
|
||||
if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) {
|
||||
|
@ -635,9 +629,18 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *
|
|||
if (rows[rowCounter] != NULL) {
|
||||
rowCounter++;
|
||||
}
|
||||
|
||||
if (rowCounter == TSDB_MAX_INSERT_BATCH) {
|
||||
if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
rowCounter = 0;
|
||||
memset(rows, 0, sizeof(rows));
|
||||
}
|
||||
}
|
||||
|
||||
if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
|
||||
if (rowCounter > 0 && tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
@ -645,11 +648,9 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *
|
|||
pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
|
||||
pRepo->stat.totalStorage += points * schemaVLen(pSchema);
|
||||
|
||||
free(rows);
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
free(rows);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -666,9 +667,10 @@ static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (key > TABLE_LASTKEY(pTable)) {
|
||||
TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable);
|
||||
if (key > lastKey) {
|
||||
tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64,
|
||||
REPO_ID(pRepo), key, TABLE_LASTKEY(pTable));
|
||||
REPO_ID(pRepo), key, lastKey);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -849,8 +851,10 @@ static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **ro
|
|||
if (pTableData->keyLast < dataRowKey(rows[rowCounter - 1])) pTableData->keyLast = dataRowKey(rows[rowCounter - 1]);
|
||||
pTableData->numOfRows += dsize;
|
||||
|
||||
// TODO: impl delete row thing
|
||||
if (TABLE_LASTKEY(pTable) < dataRowKey(rows[rowCounter-1])) TABLE_LASTKEY(pTable) = dataRowKey(rows[rowCounter-1]);
|
||||
// update table latest info
|
||||
if (tsdbUpdateTableLatestInfo(pRepo, pTable, rows[rowCounter - 1]) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -892,4 +896,38 @@ static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row) {
|
||||
STsdbCfg *pCfg = &pRepo->config;
|
||||
|
||||
if (tsdbGetTableLastKeyImpl(pTable) < dataRowKey(row)) {
|
||||
if (pCfg->cacheLastRow || pTable->lastRow != NULL) {
|
||||
SDataRow nrow = pTable->lastRow;
|
||||
if (taosTSizeof(nrow) < dataRowLen(row)) {
|
||||
SDataRow orow = nrow;
|
||||
nrow = taosTMalloc(dataRowLen(row));
|
||||
if (nrow == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dataRowCpy(nrow, row);
|
||||
TSDB_WLOCK_TABLE(pTable);
|
||||
pTable->lastKey = dataRowKey(row);
|
||||
pTable->lastRow = nrow;
|
||||
TSDB_WUNLOCK_TABLE(pTable);
|
||||
taosTZfree(orow);
|
||||
} else {
|
||||
TSDB_WLOCK_TABLE(pTable);
|
||||
pTable->lastKey = dataRowKey(row);
|
||||
dataRowCpy(nrow, row);
|
||||
TSDB_WUNLOCK_TABLE(pTable);
|
||||
}
|
||||
} else {
|
||||
pTable->lastKey = dataRowKey(row);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue