Merge branch 'develop' into feature/query

This commit is contained in:
Haojun Liao 2020-09-12 16:05:39 +08:00
commit 5e8ff5103c
100 changed files with 2679 additions and 932 deletions

View File

@ -121,7 +121,21 @@ func (alert *Alert) refresh(rule *Rule, values map[string]interface{}) {
alert.Values = values alert.Values = values
res := rule.Expr.Eval(func(key string) interface{} { res := rule.Expr.Eval(func(key string) interface{} {
// ToLower is required as column name in result is in lower case // ToLower is required as column name in result is in lower case
return alert.Values[strings.ToLower(key)] i := alert.Values[strings.ToLower(key)]
switch v := i.(type) {
case int8:
return int64(v)
case int16:
return int64(v)
case int:
return int64(v)
case int32:
return int64(v)
case float32:
return float64(v)
default:
return v
}
}) })
val, ok := res.(bool) val, ok := res.(bool)

View File

@ -119,7 +119,7 @@ WantedBy=multi-user.target
return nil return nil
} }
const version = "TDengine alert v2.0.0.0" const version = "TDengine alert v2.0.0.1"
func main() { func main() {
var ( var (

View File

@ -128,24 +128,84 @@ TDengine集群中加入一个新的dnode时涉及集群相关的一些参数
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值1000000。 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
- arbitrator: 系统中裁决器的end point缺省为空 - arbitrator: 系统中裁决器的end point缺省为空
- timezone时区。从系统中动态获取当前的时区设置。 - timezone、locale、charset 的配置见客户端配置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
## 客户端配置 ## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
客户端配置参数列表及解释 客户端配置参数列表及解释
- firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 - firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
- secondEp: taos启动时如果first连接不上尝试连接集群中第二个taosd实例的end point, 缺省值为空。 - secondEp: taos启动时如果first连接不上尝试连接集群中第二个taosd实例的end point, 缺省值为空。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
日志的配置参数与server的配置参数完全一样。 - locale
启动taos时也可以从命令行指定一个taosd实例的end point否则就从taos.cfg读取。 > 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是编码正确性是客户端来保证。因此如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为UTF-8部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中编码则是CP936。客户端需要确保正确设置自己所使用的字符集即客户端运行的操作系统当前编码字符集才能保证nchar中的数据正确转换为UCS4-LE编码格式。
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码>zh_CN.UTF-8zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
- charset
> 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置charset在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败则中断启动过程。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则charset的有效值是GBK。
```
charset GBK
locale zh_CN.UTF-8
```
charset的有效值是UTF-8。
日志的配置参数与server 的配置参数完全一样。
- timezone
默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
## 用户管理 ## 用户管理

View File

@ -95,6 +95,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
- [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等 - [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等
- [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数)支持各种聚合函数、选择函数、计算函数如avg, min, diff等 - [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数)支持各种聚合函数、选择函数、计算函数如avg, min, diff等
- [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理 - [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理
- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制)TAOS SQL的边界限制
- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code)TDengine 2.0 错误码以及对应的十进制码
## TDengine的技术设计 ## TDengine的技术设计

View File

@ -86,13 +86,29 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
```mysql ```mysql
ALTER DATABASE db_name COMP 2; ALTER DATABASE db_name COMP 2;
``` ```
修改数据库文件压缩标志位有效数字为012. 0表示不压缩1表示一阶段压缩2表示两阶段压缩。修改后可以使用show databases命令查看是否修改成功 COMP参数是指修改数据库文件压缩标志位取值范围为[0, 2]. 0表示不压缩1表示一阶段压缩2表示两阶段压缩。
```mysql ```mysql
ALTER DATABASE db_name REPLICA 2; ALTER DATABASE db_name REPLICA 2;
``` ```
修改数据库副本数有效副本数为1到3。在集群中使用副本数必须小于dnode的数目。修改后可以使用show databases命令查看是否修改成功 REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用副本数必须小于dnode的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
```
KEEP参数是指修改数据文件保存的天数缺省值为3650取值范围[days, 365000]必须大于或等于days参数值。
```mysql
ALTER DATABASE db_name QUORUM 365;
```
QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制quorum设为1具有master角色的虚拟节点自己确认即可。对于同步复制需要至少大于等于2。原则上Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
```mysql
ALTER DATABASE db_name BLOCKS 365;
```
BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块因此一个VNODE的用的内存大小粗略为cache * blocks。取值范围[3, 1000]。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
- **显示系统所有数据库** - **显示系统所有数据库**
```mysql ```mysql

View File

@ -0,0 +1,173 @@
# TDengine 2.0 错误码以及对应的十进制码
| Code | bit | error code | 错误描述 | 十进制错误码 |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|

View File

@ -93,6 +93,7 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
- rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode - rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode
- debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值 - debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值
- numOfLogLines单个日志文件允许的最大行数。默认值10,000,000行。 - numOfLogLines单个日志文件允许的最大行数。默认值10,000,000行。
- logKeepDays日志文件的最长保存时间。大于0时日志文件会被重命名为taosdlog.xxx其中xxx为日志文件最后修改的时间戳单位为秒。默认值0天。
- maxSQLLength单条SQL语句允许最长限制。默认值65380字节。 - maxSQLLength单条SQL语句允许最长限制。默认值65380字节。
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。
@ -130,23 +131,83 @@ TDengine集群中加入一个新的dnode时涉及集群相关的一些参数
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值1000000。 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
- arbitrator: 系统中裁决器的end point缺省为空 - arbitrator: 系统中裁决器的end point缺省为空
- timezone时区。从系统中动态获取当前的时区设置。 - timezone、locale、charset 的配置见客户端配置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
## 客户端配置 ## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
客户端配置参数列表及解释 客户端配置参数
- firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 - firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
- secondEp: taos启动时如果first连接不上尝试连接集群中第二个taosd实例的end point, 缺省值为空。 - secondEp: taos启动时如果first连接不上尝试连接集群中第二个taosd实例的end point, 缺省值为空。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。 - locale
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- maxBinaryDisplayWidthShell中binary 和 nchar字段的显示宽度上限超过此限制的部分将被隐藏。默认值30。可在 shell 中通过命令 set max_binary_display_width *nn* 动态修改此选项。
日志的配置参数与server的配置参数完全一样。 > 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是编码正确性是客户端来保证。因此如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为UTF-8部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中编码则是CP936。客户端需要确保正确设置自己所使用的字符集即客户端运行的操作系统当前编码字符集才能保证nchar中的数据正确转换为UCS4-LE编码格式。
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码>zh_CN.UTF-8zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
- charset
> 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置charset在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败则中断启动过程。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则charset的有效值是GBK。
```
charset GBK
locale zh_CN.UTF-8
```
charset的有效值是UTF-8。
日志的配置参数与server 的配置参数完全一样。
- timezone
默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
启动taos时也可以从命令行指定一个taosd实例的end point否则就从taos.cfg读取。 启动taos时也可以从命令行指定一个taosd实例的end point否则就从taos.cfg读取。

View File

@ -2,13 +2,15 @@
多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。 多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。
集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。 集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取,FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。 TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
## 准备工作 ## 准备工作
**第一步**如果搭建集群的节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) **第一步**如果搭建集群的节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据rm -rf /var/lib/taos/
**注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。
**第二步**建议关闭防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; **第二步**建议关闭防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
@ -23,7 +25,7 @@ TDengine的集群管理极其简单除添加和删除节点需要人工干预
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关: **第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
``` ```
// firstEp 是每个节点启动后连接的第一个节点 // firstEp 集群中所有节点的配置都是一致的,对其第一次访问后,就获得了整个集群的信息
firstEp h1.taosdata.com:6030 firstEp h1.taosdata.com:6030
// 配置本节点的FQDN如果本机只有一个hostname, 无需配置 // 配置本节点的FQDN如果本机只有一个hostname, 无需配置
@ -32,7 +34,7 @@ fqdn h1.taosdata.com
// 配置本节点的端口号缺省是6030 // 配置本节点的端口号缺省是6030
serverPort 6030 serverPort 6030
// 副本数为偶数的时候需要配置请参考《Arbitrator的使用》的部分 // 服务端节点数为偶数的时候需要配置请参考《Arbitrator的使用》的部分
arbitrator ha.taosdata.com:6042 arbitrator ha.taosdata.com:6042
``` ```

View File

@ -32,7 +32,7 @@
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* 3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得 4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得),FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
5. ping服务器FQDN如果没有反应请检查你的网络DNS设置或客户端所在计算机的系统hosts文件 5. ping服务器FQDN如果没有反应请检查你的网络DNS设置或客户端所在计算机的系统hosts文件
@ -47,69 +47,14 @@
检查服务器侧TCP端口连接是否工作`nc -l {port}` 检查服务器侧TCP端口连接是否工作`nc -l {port}`
检查客户端侧TCP端口链接是否工作`nc {hostIP} {port}` 检查客户端侧TCP端口链接是否工作`nc {hostIP} {port}`
10. 可以使用taos程序内嵌的网络连通检测功能验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP 10. 也可以使用taos程序内嵌的网络连通检测功能来验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server表示运行检测服务端功能-n client表示运行检测客户端功能。
1首先在服务器上停止taosd服务
2在服务器上运行taos内嵌的网络连通检测的服务端功能taos -n server -P 6030 -e 6042 -l 1000
3在客户端运行taos内嵌的网络连通检测的客户端功能taos -n client -h host -P 6030 -e 6042 -l 1000
-n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
-h指示服务端名称可以是ip地址或fqdn格式。如192.168.1.160,或 192.168.1.160:6030或 hostname1或hostname1:6030。缺省值是127.0.0.1。
-P 检测的起始端口号缺省值是6030
-e检测的结束端口号必须大于等于起始端口号缺省值是6042
-l指定检测端口连通的报文长度最大64000字节缺省值是1000字节测试时服务端和客户端必须指定相同
服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
对于起始端口号有三种设置方式:缺省值、-h指定、-P指定优先级是-P指定 > -h指定 > 缺省值。
客户端运行的输出样例:
`sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
`host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
`tcp port:6030 test ok. udp port:6030 test ok.`
`tcp port:6031 test ok. udp port:6031 test ok.`
`tcp port:6032 test ok. udp port:6032 test ok.`
`tcp port:6033 test ok. udp port:6033 test ok.`
`tcp port:6034 test ok. udp port:6034 test ok.`
`tcp port:6035 test ok. udp port:6035 test ok.`
`tcp port:6036 test ok. udp port:6036 test ok.`
`tcp port:6037 test ok. udp port:6037 test ok.`
`tcp port:6038 test ok. udp port:6038 test ok.`
`tcp port:6039 test ok. udp port:6039 test ok.`
`tcp port:6040 test ok. udp port:6040 test ok.`
`tcp port:6041 test ok. udp port:6041 test ok.`
`tcp port:6042 test ok. udp port:6042 test ok.`
如果某个端口不通,会输出 `portxxxx test fail`的信息。
## 6. 遇到错误“Unexpected generic error in RPC” 我怎么办?
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN" 我怎么办?
产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查 产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查
1. 请检查连接的服务器的FQDN是否正确 1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
2. 如果网络配置有DNS server, 请检查是否正常工作 2. 如果网络配置有DNS server, 请检查是否正常工作
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件查看该FQDN是否配置并是否有正确的IP地址。 3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件查看该FQDN是否配置并是否有正确的IP地址。
4. 如果网络配置OK从客户端所在机器你需要能Ping该连接的FQDN否则客户端是无法链接服务器的 4. 如果网络配置OK从客户端所在机器你需要能Ping该连接的FQDN否则客户端是无法链接服务器的

View File

@ -189,6 +189,9 @@
# max number of rows per log filters # max number of rows per log filters
# numOfLogLines 10000000 # numOfLogLines 10000000
# time of keeping log files, days
# logKeepDays 0
# enable/disable async log # enable/disable async log
# asyncLog 1 # asyncLog 1

View File

@ -232,8 +232,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) { } else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
assert(pInfo->pDCLInfo->nTokens == 1); assert(pInfo->pDCLInfo->nTokens == 1);
if (tscSetTableFullName(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) { code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); if(code != TSDB_CODE_SUCCESS) {
return code;
} }
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) { } else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
pzName->n = strdequote(pzName->z); pzName->n = strdequote(pzName->z);
@ -348,8 +349,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: { case TSDB_SQL_DESCRIBE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0]; SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg2 = "table name is too long";
const char* msg1 = "invalid table name"; const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
@ -710,7 +711,9 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
} }
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) { int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
const char* msg = "name too long"; const char* msg1 = "name too long";
const char* msg2 = "invalid db name";
const char *msg = msg1;
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -728,16 +731,14 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa
} else { // get current DB name first, then set it into path } else { // get current DB name first, then set it into path
SStrToken t = {0}; SStrToken t = {0};
getCurrentDBName(pSql, &t); getCurrentDBName(pSql, &t);
if (t.n == 0) {
msg = msg2;
}
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL); code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
} }
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
} free(oldName);
if (code != TSDB_CODE_SUCCESS) {
free(oldName);
return code; return code;
} }
@ -1072,7 +1073,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
/* db name is not specified, the tableName dose not include db name */ /* db name is not specified, the tableName dose not include db name */
if (pDB != NULL) { if (pDB != NULL) {
if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) { if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) {
return TSDB_CODE_TSC_INVALID_SQL; return TSDB_CODE_TSC_INVALID_SQL;
} }
@ -1596,13 +1597,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SColumnIndex index = COLUMN_INDEX_INITIALIZER; SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pItem->pNode->pParam != NULL) { if (pItem->pNode->pParam != NULL) {
SStrToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
if (pToken->z == NULL || pToken->n == 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0];
if (pParamElem->pNode->nSQLOptr == TK_ALL) { SStrToken* pToken = &pParamElem->pNode->colInfo;
short sqlOptr = pParamElem->pNode->nSQLOptr;
if ((pToken->z == NULL || pToken->n == 0)
&& (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (sqlOptr == TK_ALL) {
// select table.* // select table.*
// check if the table name is valid or not // check if the table name is valid or not
SStrToken tmpToken = pParamElem->pNode->colInfo; SStrToken tmpToken = pParamElem->pNode->colInfo;
@ -1614,6 +1616,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
} else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
char buf[8] = {0};
int64_t val = -1;
tVariant* pVariant = &pParamElem->pNode->val;
if (pVariant->nType == TSDB_DATA_TYPE_BIGINT) {
tVariantDump(pVariant, buf, TSDB_DATA_TYPE_BIGINT, true);
val = GET_INT64_VAL(buf);
}
if (val == 1) {
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
} else {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
} else { } else {
// count the number of meters created according to the super table // count the number of meters created according to the super table
if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@ -2741,27 +2758,31 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
} }
} }
int32_t retVal = TSDB_CODE_SUCCESS;
if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) { if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) {
tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false); retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false);
} else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
if (colType == TSDB_DATA_TYPE_BINARY) {
pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE);
pColumnFilter->len = pRight->val.nLen;
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
} else if (colType == TSDB_DATA_TYPE_NCHAR) { } else if (colType == TSDB_DATA_TYPE_BINARY) {
// pRight->val.nLen + 1 is larger than the actual nchar string length pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE);
pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE); pColumnFilter->len = pRight->val.nLen;
retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); } else if (colType == TSDB_DATA_TYPE_NCHAR) {
// pRight->val.nLen + 1 is larger than the actual nchar string length
pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE);
retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
size_t len = twcslen((wchar_t*)pColumnFilter->pz);
pColumnFilter->len = len * TSDB_NCHAR_SIZE;
size_t len = twcslen((wchar_t*)pColumnFilter->pz); } else {
pColumnFilter->len = len * TSDB_NCHAR_SIZE; retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
} else {
tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
}
} }
if (retVal != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
switch (pExpr->nSQLOptr) { switch (pExpr->nSQLOptr) {
case TK_LE: case TK_LE:
pColumnFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL; pColumnFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL;
@ -4424,7 +4445,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const int32_t DEFAULT_TABLE_INDEX = 0; const int32_t DEFAULT_TABLE_INDEX = 0;
const char* msg1 = "invalid table name"; const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
const char* msg3 = "manipulation of tag available for super table"; const char* msg3 = "manipulation of tag available for super table";
const char* msg4 = "set tag value only available for table"; const char* msg4 = "set tag value only available for table";
const char* msg5 = "only support add one tag"; const char* msg5 = "only support add one tag";
@ -4457,7 +4477,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} }
if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return TSDB_CODE_TSC_INVALID_SQL;
} }
int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo); int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo);
@ -5734,7 +5754,6 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) {
const char* msg1 = "invalid table name"; const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
@ -5755,7 +5774,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
} }
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return TSDB_CODE_TSC_INVALID_SQL;
} }
if (!validateTableColumnInfo(pFieldList, pCmd) || if (!validateTableColumnInfo(pFieldList, pCmd) ||
@ -5810,7 +5829,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
} }
if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return TSDB_CODE_TSC_INVALID_SQL;
} }
// get meter meta from mnode // get meter meta from mnode
@ -6002,7 +6021,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0));
const char* msg0 = "invalid table name"; const char* msg0 = "invalid table name";
const char* msg1 = "table name too long"; //const char* msg1 = "table name too long";
const char* msg2 = "point interpolation query needs timestamp"; const char* msg2 = "point interpolation query needs timestamp";
const char* msg5 = "fill only available for interval query"; const char* msg5 = "fill only available for interval query";
const char* msg6 = "start(end) time of query range required or time range too large"; const char* msg6 = "start(end) time of query range required or time range too large";
@ -6075,7 +6094,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz};
if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return TSDB_CODE_TSC_INVALID_SQL;
} }
tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar;

View File

@ -2019,7 +2019,8 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
return 0; return 0;
} }
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
taosCacheEmpty(tscCacheHandle); taosCacheEmpty(tscCacheHandle);
return 0; return 0;
} }
@ -2095,6 +2096,10 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp; SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
if (pRetrieve == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->numOfRows = htonl(pRetrieve->numOfRows);
pRes->precision = htons(pRetrieve->precision); pRes->precision = htons(pRetrieve->precision);

View File

@ -325,8 +325,6 @@ void tdResetKVRowBuilder(SKVRowBuilder *pBuilder);
SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
ASSERT(pBuilder->nCols == 0 || colId > pBuilder->pColIdx[pBuilder->nCols - 1].colId);
if (pBuilder->nCols >= pBuilder->tCols) { if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2; pBuilder->tCols *= 2;
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);

View File

@ -158,6 +158,7 @@ extern char buildinfo[];
// log // log
extern int32_t tsAsyncLog; extern int32_t tsAsyncLog;
extern int32_t tsNumOfLogLines; extern int32_t tsNumOfLogLines;
extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag; extern int32_t dDebugFlag;
extern int32_t vDebugFlag; extern int32_t vDebugFlag;
extern int32_t mDebugFlag; extern int32_t mDebugFlag;

View File

@ -254,7 +254,7 @@ bool taosCfgDynamicOptions(char *msg) {
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue;
int32_t cfgLen = strlen(cfg->option); int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue; if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue;
*((int32_t *)cfg->ptr) = vint; *((int32_t *)cfg->ptr) = vint;
@ -1013,12 +1013,22 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsNumOfLogLines; cfg.ptr = &tsNumOfLogLines;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10000; cfg.minValue = 1000;
cfg.maxValue = 2000000000; cfg.maxValue = 2000000000;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "logKeepDays";
cfg.ptr = &tsLogKeepDays;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 365000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "asyncLog"; cfg.option = "asyncLog";
cfg.ptr = &tsAsyncLog; cfg.ptr = &tsAsyncLog;
cfg.valType = TAOS_CFG_VTYPE_INT16; cfg.valType = TAOS_CFG_VTYPE_INT16;

View File

@ -93,14 +93,13 @@
<version>3.6.1</version> <version>3.6.1</version>
<configuration> <configuration>
<encoding>UTF-8</encoding> <encoding>UTF-8</encoding>
<source>11</source> <source>8</source>
<target>11</target> <target>8</target>
<debug>true</debug> <debug>true</debug>
<showDeprecation>true</showDeprecation> <showDeprecation>true</showDeprecation>
</configuration> </configuration>
</plugin> </plugin>
<plugin> <plugin>
<groupId>org.apache.maven.plugins</groupId> <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId> <artifactId>maven-source-plugin</artifactId>

View File

@ -57,9 +57,9 @@ public class TSDBConnection implements Connection {
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR)); File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0]; File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile); List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()){ if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST,endpoints.get(0).split(":")[0]); info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT,endpoints.get(0).split(":")[1]); info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
} }
//load taos.cfg end //load taos.cfg end
@ -69,15 +69,15 @@ public class TSDBConnection implements Connection {
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
} }
private List<String> loadConfigEndpoints(File cfgFile){ private List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>(); List<String> endpoints = new ArrayList<>();
try(BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) { try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null; String line = null;
while ((line = reader.readLine())!=null){ while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")){ if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p')+1).trim()); endpoints.add(line.substring(line.indexOf('p') + 1).trim());
} }
if (endpoints.size()>1) if (endpoints.size() > 1)
break; break;
} }
} catch (FileNotFoundException e) { } catch (FileNotFoundException e) {
@ -91,7 +91,7 @@ public class TSDBConnection implements Connection {
/** /**
* @param cfgDirPath * @param cfgDirPath
* @return return the config dir * @return return the config dir
* **/ **/
private File loadConfigDir(String cfgDirPath) { private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null) if (cfgDirPath == null)
return loadDefaultConfigDir(); return loadDefaultConfigDir();
@ -103,8 +103,8 @@ public class TSDBConnection implements Connection {
/** /**
* @return search the default config dir, if the config dir is not exist will return null * @return search the default config dir, if the config dir is not exist will return null
* */ */
private File loadDefaultConfigDir(){ private File loadDefaultConfigDir() {
File cfgDir; File cfgDir;
File cfgDir_linux = new File("/etc/taos"); File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null; cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
@ -132,7 +132,9 @@ public class TSDBConnection implements Connection {
public Statement createStatement() throws SQLException { public Statement createStatement() throws SQLException {
if (!this.connector.isClosed()) { if (!this.connector.isClosed()) {
return new TSDBStatement(this.connector); TSDBStatement statement = new TSDBStatement(this, this.connector);
statement.setConnection(this);
return statement;
} else { } else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} }
@ -153,7 +155,7 @@ public class TSDBConnection implements Connection {
public PreparedStatement prepareStatement(String sql) throws SQLException { public PreparedStatement prepareStatement(String sql) throws SQLException {
if (!this.connector.isClosed()) { if (!this.connector.isClosed()) {
return new TSDBPreparedStatement(this.connector, sql); return new TSDBPreparedStatement(this, this.connector, sql);
} else { } else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} }

View File

@ -42,8 +42,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
private SavedPreparedStatement savedPreparedStatement; private SavedPreparedStatement savedPreparedStatement;
TSDBPreparedStatement(TSDBJNIConnector connecter, String sql) { TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
super(connecter); super(connection, connecter);
init(sql); init(sql);
} }

View File

@ -19,153 +19,164 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
public class TSDBStatement implements Statement { public class TSDBStatement implements Statement {
private TSDBJNIConnector connecter = null; private TSDBJNIConnector connecter = null;
/** To store batched commands */ /**
protected List<String> batchedArgs; * To store batched commands
*/
protected List<String> batchedArgs;
/** Timeout for a query */ /**
protected int queryTimeout = 0; * Timeout for a query
*/
protected int queryTimeout = 0;
private Long pSql = 0l; private Long pSql = 0l;
/** /**
* Status of current statement * Status of current statement
*/ */
private boolean isClosed = true; private boolean isClosed = true;
private int affectedRows = 0; private int affectedRows = 0;
TSDBStatement(TSDBJNIConnector connecter) { private TSDBConnection connection;
this.connecter = connecter;
this.isClosed = false;
}
public <T> T unwrap(Class<T> iface) throws SQLException { public void setConnection(TSDBConnection connection) {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); this.connection = connection;
} }
public boolean isWrapperFor(Class<?> iface) throws SQLException { TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); this.connection = connection;
} this.connecter = connecter;
this.isClosed = false;
}
public ResultSet executeQuery(String sql) throws SQLException { public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed) { if (isClosed) {
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} }
// TODO make sure it is not a update query // TODO make sure it is not a update query
pSql = this.connecter.executeQuery(sql); pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet(); long resultSetPointer = this.connecter.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql); this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} }
// create/insert/update/delete/alter // create/insert/update/delete/alter
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
this.connecter.freeResultSet(pSql); this.connecter.freeResultSet(pSql);
return null; return null;
} }
if (!this.connecter.isUpdateQuery(pSql)) { if (!this.connecter.isUpdateQuery(pSql)) {
return new TSDBResultSet(this.connecter, resultSetPointer); return new TSDBResultSet(this.connecter, resultSetPointer);
} else { } else {
this.connecter.freeResultSet(pSql); this.connecter.freeResultSet(pSql);
return null; return null;
} }
} }
public int executeUpdate(String sql) throws SQLException { public int executeUpdate(String sql) throws SQLException {
if (isClosed) { if (isClosed) {
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} }
// TODO check if current query is update query // TODO check if current query is update query
pSql = this.connecter.executeQuery(sql); pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet(); long resultSetPointer = this.connecter.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql); this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} }
this.affectedRows = this.connecter.getAffectedRows(pSql); this.affectedRows = this.connecter.getAffectedRows(pSql);
this.connecter.freeResultSet(pSql); this.connecter.freeResultSet(pSql);
return this.affectedRows; return this.affectedRows;
} }
public String getErrorMsg(long pSql) { public String getErrorMsg(long pSql) {
return this.connecter.getErrMsg(pSql); return this.connecter.getErrMsg(pSql);
} }
public void close() throws SQLException { public void close() throws SQLException {
if (!isClosed) { if (!isClosed) {
if (!this.connecter.isResultsetClosed()) { if (!this.connecter.isResultsetClosed()) {
this.connecter.freeResultSet(); this.connecter.freeResultSet();
} }
isClosed = true; isClosed = true;
} }
} }
public int getMaxFieldSize() throws SQLException { public int getMaxFieldSize() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void setMaxFieldSize(int max) throws SQLException { public void setMaxFieldSize(int max) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int getMaxRows() throws SQLException { public int getMaxRows() throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet // always set maxRows to zero, meaning unlimitted rows in a resultSet
return 0; return 0;
} }
public void setMaxRows(int max) throws SQLException { public void setMaxRows(int max) throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet // always set maxRows to zero, meaning unlimitted rows in a resultSet
} }
public void setEscapeProcessing(boolean enable) throws SQLException { public void setEscapeProcessing(boolean enable) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int getQueryTimeout() throws SQLException { public int getQueryTimeout() throws SQLException {
return queryTimeout; return queryTimeout;
} }
public void setQueryTimeout(int seconds) throws SQLException { public void setQueryTimeout(int seconds) throws SQLException {
this.queryTimeout = seconds; this.queryTimeout = seconds;
} }
public void cancel() throws SQLException { public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public SQLWarning getWarnings() throws SQLException { public SQLWarning getWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void clearWarnings() throws SQLException { public void clearWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void setCursorName(String name) throws SQLException { public void setCursorName(String name) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean execute(String sql) throws SQLException { public boolean execute(String sql) throws SQLException {
if (isClosed) { if (isClosed) {
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} }
boolean res = true; boolean res = true;
pSql = this.connecter.executeQuery(sql); pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet(); long resultSetPointer = this.connecter.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql); this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
// no result set is retrieved // no result set is retrieved
@ -173,145 +184,147 @@ public class TSDBStatement implements Statement {
res = false; res = false;
} }
return res; return res;
} }
public ResultSet getResultSet() throws SQLException { public ResultSet getResultSet() throws SQLException {
if (isClosed) { if (isClosed) {
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} }
long resultSetPointer = connecter.getResultSet(); long resultSetPointer = connecter.getResultSet();
TSDBResultSet resSet = null; TSDBResultSet resSet = null;
if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
resSet = new TSDBResultSet(connecter, resultSetPointer); resSet = new TSDBResultSet(connecter, resultSetPointer);
} }
return resSet; return resSet;
} }
public int getUpdateCount() throws SQLException { public int getUpdateCount() throws SQLException {
if (isClosed) { if (isClosed) {
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} }
return this.affectedRows; return this.affectedRows;
} }
public boolean getMoreResults() throws SQLException { public boolean getMoreResults() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void setFetchDirection(int direction) throws SQLException { public void setFetchDirection(int direction) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int getFetchDirection() throws SQLException { public int getFetchDirection() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
/* /*
* used by spark * used by spark
*/ */
public void setFetchSize(int rows) throws SQLException { public void setFetchSize(int rows) throws SQLException {
} }
/* /*
* used by spark * used by spark
*/ */
public int getFetchSize() throws SQLException { public int getFetchSize() throws SQLException {
return 4096; return 4096;
} }
public int getResultSetConcurrency() throws SQLException { public int getResultSetConcurrency() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int getResultSetType() throws SQLException { public int getResultSetType() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void addBatch(String sql) throws SQLException { public void addBatch(String sql) throws SQLException {
if (batchedArgs == null) { if (batchedArgs == null) {
batchedArgs = new ArrayList<String>(); batchedArgs = new ArrayList<String>();
} }
batchedArgs.add(sql); batchedArgs.add(sql);
} }
public void clearBatch() throws SQLException { public void clearBatch() throws SQLException {
batchedArgs.clear(); batchedArgs.clear();
} }
public int[] executeBatch() throws SQLException { public int[] executeBatch() throws SQLException {
if (isClosed) { if (isClosed) {
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} }
if (batchedArgs == null) { if (batchedArgs == null) {
throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!"));
} else { } else {
int[] res = new int[batchedArgs.size()]; int[] res = new int[batchedArgs.size()];
for (int i = 0; i < batchedArgs.size(); i++) { for (int i = 0; i < batchedArgs.size(); i++) {
res[i] = executeUpdate(batchedArgs.get(i)); res[i] = executeUpdate(batchedArgs.get(i));
} }
return res; return res;
} }
} }
public Connection getConnection() throws SQLException { public Connection getConnection() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); if (this.connecter != null)
} return this.connection;
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean getMoreResults(int current) throws SQLException { public boolean getMoreResults(int current) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public ResultSet getGeneratedKeys() throws SQLException { public ResultSet getGeneratedKeys() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int executeUpdate(String sql, String[] columnNames) throws SQLException { public int executeUpdate(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean execute(String sql, int[] columnIndexes) throws SQLException { public boolean execute(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean execute(String sql, String[] columnNames) throws SQLException { public boolean execute(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int getResultSetHoldability() throws SQLException { public int getResultSetHoldability() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean isClosed() throws SQLException { public boolean isClosed() throws SQLException {
return isClosed; return isClosed;
} }
public void setPoolable(boolean poolable) throws SQLException { public void setPoolable(boolean poolable) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean isPoolable() throws SQLException { public boolean isPoolable() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void closeOnCompletion() throws SQLException { public void closeOnCompletion() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public boolean isCloseOnCompletion() throws SQLException { public boolean isCloseOnCompletion() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
} }

View File

@ -23,6 +23,7 @@ import java.sql.SQLException;
public class SqlSyntaxValidator { public class SqlSyntaxValidator {
private TSDBConnection tsdbConnection; private TSDBConnection tsdbConnection;
public SqlSyntaxValidator(Connection connection) { public SqlSyntaxValidator(Connection connection) {
this.tsdbConnection = (TSDBConnection) connection; this.tsdbConnection = (TSDBConnection) connection;
} }

View File

@ -8,8 +8,7 @@ import org.junit.BeforeClass;
public class BaseTest { public class BaseTest {
private static boolean testCluster = false; private static boolean testCluster = false;
private static TDNodes nodes = new TDNodes(); private static TDNodes nodes = new TDNodes();
@BeforeClass @BeforeClass
public static void setupEnv() { public static void setupEnv() {
@ -19,11 +18,9 @@ public class BaseTest {
nodes.getTDNode(1).setRunning(1); nodes.getTDNode(1).setRunning(1);
nodes.stop(1); nodes.stop(1);
} }
nodes.setTestCluster(testCluster); nodes.setTestCluster(testCluster);
nodes.deploy(1); nodes.deploy(1);
nodes.start(1); nodes.start(1);
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
} }

View File

@ -7,13 +7,11 @@ import org.junit.Test;
import java.sql.*; import java.sql.*;
import java.util.Properties; import java.util.Properties;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class BatchInsertTest extends BaseTest { public class BatchInsertTest extends BaseTest {

View File

@ -0,0 +1,81 @@
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class QueryDataTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))";
statement.executeUpdate(createTableSql);
}
@Test
public void testQueryBinaryData() throws SQLException{
String insertSql = "insert into " + stbName + " values(now, 'taosda')";
System.out.println(insertSql);
statement.executeUpdate(insertSql);
String querySql = "select * from " + stbName;
ResultSet rs = statement.executeQuery(querySql);
while(rs.next()) {
String name = rs.getString(2) + "001";
System.out.println("name = " + name);
assertEquals(name, "taosda001");
}
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
}
}

View File

@ -34,7 +34,6 @@ public class SelectTest extends BaseTest {
statement.executeUpdate("drop database if exists " + dbName); statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
} }
@Test @Test
@ -66,6 +65,5 @@ public class SelectTest extends BaseTest {
statement.close(); statement.close();
connection.close(); connection.close();
Thread.sleep(10); Thread.sleep(10);
} }
} }

View File

@ -0,0 +1,108 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.lib.TSDBCommon;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class BatchInsertTest {
static String host = "localhost";
static String dbName = "test";
static String stbName = "meters";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
private Connection connection;
@Before
public void before() {
try {
connection = TSDBCommon.getConn(host);
TSDBCommon.createDatabase(connection, dbName);
TSDBCommon.createStable(connection, stbName);
TSDBCommon.createTables(connection, numOfTables, stbName, tablePrefix);
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testBatchInsert(){
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(new Runnable() {
@Override
public void run() {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
executorService.shutdown();
try {
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
try{
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from meters");
int num = 0;
while (rs.next()) {
num++;
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
rs.close();
}catch (Exception e){
e.printStackTrace();
}
}
@After
public void after() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,47 @@
package com.taosdata.jdbc.lib;
import com.taosdata.jdbc.TSDBDriver;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
public class TSDBCommon {
public static Connection getConn(String host) throws SQLException, ClassNotFoundException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
return DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
}
public static void createDatabase(Connection connection, String dbName) throws SQLException {
Statement statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
statement.close();
}
public static void createStable(Connection connection, String stbName) throws SQLException {
Statement statement = connection.createStatement();
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
statement.executeUpdate(createTableSql);
statement.close();
}
public static void createTables(Connection connection, int numOfTables, String stbName,String tablePrefix) throws SQLException {
Statement statement = connection.createStatement();
for(int i = 0; i < numOfTables; i++) {
String loc = i % 2 == 0 ? "beijing" : "shanghai";
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
statement.executeUpdate(createSubTalbesSql);
}
statement.close();
}
}

View File

@ -16,11 +16,13 @@
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "os.h" #include "os.h"
#include "taosdef.h" #include "taosdef.h"
#include "taosmsg.h"
#include "tglobal.h" #include "tglobal.h"
#include "mnode.h" #include "mnode.h"
#include "http.h" #include "http.h"
#include "tmqtt.h" #include "tmqtt.h"
#include "monitor.h" #include "monitor.h"
#include "dnode.h"
#include "dnodeInt.h" #include "dnodeInt.h"
#include "dnodeModule.h" #include "dnodeModule.h"
@ -129,17 +131,34 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) {
for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) { for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) {
bool enableModule = moduleStatus & (1 << module); bool enableModule = moduleStatus & (1 << module);
if (!tsModule[module].enable && enableModule) { if (!tsModule[module].enable && enableModule) {
dInfo("module status:%u is received, start %s module", tsModuleStatus, tsModule[module].name); dInfo("module status:%u is set, start %s module", moduleStatus, tsModule[module].name);
tsModule[module].enable = true; tsModule[module].enable = true;
dnodeSetModuleStatus(module); dnodeSetModuleStatus(module);
(*tsModule[module].startFp)(); (*tsModule[module].startFp)();
} }
if (tsModule[module].enable && !enableModule) { if (tsModule[module].enable && !enableModule) {
dInfo("module status:%u is received, stop %s module", tsModuleStatus, tsModule[module].name); dInfo("module status:%u is set, stop %s module", moduleStatus, tsModule[module].name);
tsModule[module].enable = false; tsModule[module].enable = false;
dnodeUnSetModuleStatus(module); dnodeUnSetModuleStatus(module);
(*tsModule[module].stopFp)(); (*tsModule[module].stopFp)();
} }
} }
} }
bool dnodeCheckMnodeStarting() {
if (tsModuleStatus & TSDB_MOD_MNODE) return false;
SDMMnodeInfos *mnodes = dnodeGetMnodeInfos();
for (int32_t i = 0; i < mnodes->nodeNum; ++i) {
SDMMnodeInfo *node = &mnodes->nodeInfos[i];
if (node->nodeId == dnodeGetDnodeId()) {
uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE);;
dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus);
dnodeProcessModuleStatus(moduleStatus);
return true;
}
}
return false;
}

View File

@ -154,15 +154,15 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
rpcMsg.contLen = sizeof(SDMAuthMsg); rpcMsg.contLen = sizeof(SDMAuthMsg);
rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH; rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH;
dDebug("user:%s, send auth msg to mnode", user); dDebug("user:%s, send auth msg to mnodes", user);
SRpcMsg rpcRsp = {0}; SRpcMsg rpcRsp = {0};
dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp); dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp);
if (rpcRsp.code != 0) { if (rpcRsp.code != 0) {
dError("user:%s, auth msg received from mnode, error:%s", user, tstrerror(rpcRsp.code)); dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code));
} else { } else {
SDMAuthRsp *pRsp = rpcRsp.pCont; SDMAuthRsp *pRsp = rpcRsp.pCont;
dDebug("user:%s, auth msg received from mnode", user); dDebug("user:%s, auth msg received from mnodes", user);
memcpy(secret, pRsp->secret, TSDB_KEY_LEN); memcpy(secret, pRsp->secret, TSDB_KEY_LEN);
memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN); memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN);
*spi = pRsp->spi; *spi = pRsp->spi;

View File

@ -43,6 +43,7 @@ void dnodeGetMnodeEpSetForPeer(void *epSet);
void dnodeGetMnodeEpSetForShell(void *epSet); void dnodeGetMnodeEpSetForShell(void *epSet);
void * dnodeGetMnodeInfos(); void * dnodeGetMnodeInfos();
int32_t dnodeGetDnodeId(); int32_t dnodeGetDnodeId();
bool dnodeCheckMnodeStarting();
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)); void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);

View File

@ -103,6 +103,9 @@ typedef struct {
typedef void* tsync_h; typedef void* tsync_h;
int32_t syncInit();
void syncCleanUp();
tsync_h syncStart(const SSyncInfo *); tsync_h syncStart(const SSyncInfo *);
void syncStop(tsync_h shandle); void syncStop(tsync_h shandle);
int32_t syncReconfig(tsync_h shandle, const SSyncCfg *); int32_t syncReconfig(tsync_h shandle, const SSyncCfg *);

View File

@ -786,7 +786,7 @@ void read_history() {
} }
void write_history() { void write_history() {
char f_history[128]; char f_history[TSDB_FILENAME_LEN];
get_history_path(f_history); get_history_path(f_history);
FILE *f = fopen(f_history, "w"); FILE *f = fopen(f_history, "w");

View File

@ -409,7 +409,7 @@ void set_terminal_mode() {
} }
} }
void get_history_path(char *history) { sprintf(history, "%s/%s", getenv("HOME"), HISTORY_FILE); } void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void clearScreen(int ecmd_pos, int cursor_pos) { void clearScreen(int ecmd_pos, int cursor_pos) {
struct winsize w; struct winsize w;

View File

@ -211,8 +211,8 @@ static int32_t mnodeCreateRootAcct() {
strcpy(pAcct->user, TSDB_DEFAULT_USER); strcpy(pAcct->user, TSDB_DEFAULT_USER);
taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass); taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass);
pAcct->cfg = (SAcctCfg){ pAcct->cfg = (SAcctCfg){
.maxUsers = 10, .maxUsers = 128,
.maxDbs = 64, .maxDbs = 128,
.maxTimeSeries = INT32_MAX, .maxTimeSeries = INT32_MAX,
.maxConnections = 1024, .maxConnections = 1024,
.maxStreams = 1000, .maxStreams = 1000,

View File

@ -242,6 +242,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
#if 0
if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) { if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) {
mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep); mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep);
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
@ -251,6 +252,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2); mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2);
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
#endif
if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) { if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) {
mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock, mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock,

View File

@ -72,6 +72,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
pDnode->lastAccess = tsAccessSquence; pDnode->lastAccess = tsAccessSquence;
} }
mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -68,6 +68,7 @@ static int32_t mnodeMnodeActionInsert(SSdbOper *pOper) {
pDnode->isMgmt = true; pDnode->isMgmt = true;
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
mInfo("mnode:%d, fqdn:%s ep:%s port:%d, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -22,6 +22,7 @@
#include "tqueue.h" #include "tqueue.h"
#include "twal.h" #include "twal.h"
#include "tsync.h" #include "tsync.h"
#include "ttimer.h"
#include "tglobal.h" #include "tglobal.h"
#include "dnode.h" #include "dnode.h"
#include "mnode.h" #include "mnode.h"
@ -64,6 +65,7 @@ typedef struct _SSdbTable {
int32_t (*encodeFp)(SSdbOper *pOper); int32_t (*encodeFp)(SSdbOper *pOper);
int32_t (*destroyFp)(SSdbOper *pOper); int32_t (*destroyFp)(SSdbOper *pOper);
int32_t (*restoredFp)(); int32_t (*restoredFp)();
pthread_mutex_t mutex;
} SSdbTable; } SSdbTable;
typedef struct { typedef struct {
@ -88,6 +90,8 @@ typedef struct {
SSdbWriteWorker *writeWorker; SSdbWriteWorker *writeWorker;
} SSdbWriteWorkerPool; } SSdbWriteWorkerPool;
extern void * tsMnodeTmr;
static void * tsUpdateSyncTmr;
static SSdbObject tsSdbObj = {0}; static SSdbObject tsSdbObj = {0};
static taos_qset tsSdbWriteQset; static taos_qset tsSdbWriteQset;
static taos_qall tsSdbWriteQall; static taos_qall tsSdbWriteQall;
@ -290,11 +294,17 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
taosFreeQitem(pOper); taosFreeQitem(pOper);
} }
static void sdbUpdateSyncTmrFp(void *param, void *tmrId) { sdbUpdateSync(); }
void sdbUpdateSync() { void sdbUpdateSync() {
if (!mnodeIsRunning()) { if (!mnodeIsRunning()) {
mDebug("mnode not start yet, update sync info later"); mDebug("mnode not start yet, update sync info later");
if (dnodeCheckMnodeStarting()) {
taosTmrReset(sdbUpdateSyncTmrFp, 1000, NULL, tsMnodeTmr, &tsUpdateSyncTmr);
}
return; return;
} }
mDebug("update sync info in sdb");
SSyncCfg syncCfg = {0}; SSyncCfg syncCfg = {0};
int32_t index = 0; int32_t index = 0;
@ -387,8 +397,6 @@ int32_t sdbInit() {
tsSdbObj.role = TAOS_SYNC_ROLE_MASTER; tsSdbObj.role = TAOS_SYNC_ROLE_MASTER;
} }
sdbUpdateSync();
tsSdbObj.status = SDB_STATUS_SERVING; tsSdbObj.status = SDB_STATUS_SERVING;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -448,8 +456,9 @@ static void *sdbGetRowMeta(SSdbTable *pTable, void *key) {
} }
void **ppRow = (void **)taosHashGet(pTable->iHandle, key, keySize); void **ppRow = (void **)taosHashGet(pTable->iHandle, key, keySize);
if (ppRow == NULL) return NULL; if (ppRow != NULL) return *ppRow;
return *ppRow;
return NULL;
} }
static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) { static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) {
@ -457,13 +466,14 @@ static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) {
} }
void *sdbGetRow(void *handle, void *key) { void *sdbGetRow(void *handle, void *key) {
SSdbTable *pTable = handle;
pthread_mutex_lock(&pTable->mutex);
void *pRow = sdbGetRowMeta(handle, key); void *pRow = sdbGetRowMeta(handle, key);
if (pRow) { if (pRow) sdbIncRef(handle, pRow);
sdbIncRef(handle, pRow); pthread_mutex_unlock(&pTable->mutex);
return pRow;
} else { return pRow;
return NULL;
}
} }
static void *sdbGetRowFromObj(SSdbTable *pTable, void *key) { static void *sdbGetRowFromObj(SSdbTable *pTable, void *key) {
@ -478,7 +488,9 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
keySize = strlen((char *)key); keySize = strlen((char *)key);
} }
pthread_mutex_lock(&pTable->mutex);
taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t)); taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t));
pthread_mutex_unlock(&pTable->mutex);
sdbIncRef(pTable, pOper->pObj); sdbIncRef(pTable, pOper->pObj);
atomic_add_fetch_32(&pTable->numOfRows, 1); atomic_add_fetch_32(&pTable->numOfRows, 1);
@ -519,7 +531,10 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
keySize = strlen((char *)key); keySize = strlen((char *)key);
} }
pthread_mutex_lock(&pTable->mutex);
taosHashRemove(pTable->iHandle, key, keySize); taosHashRemove(pTable->iHandle, key, keySize);
pthread_mutex_unlock(&pTable->mutex);
atomic_sub_fetch_32(&pTable->numOfRows, 1); atomic_sub_fetch_32(&pTable->numOfRows, 1);
sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName, sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
@ -612,8 +627,8 @@ static int sdbWrite(void *param, void *data, int type) {
} else if (action == SDB_ACTION_DELETE) { } else if (action == SDB_ACTION_DELETE) {
void *pRow = sdbGetRowMeta(pTable, pHead->cont); void *pRow = sdbGetRowMeta(pTable, pHead->cont);
if (pRow == NULL) { if (pRow == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName, sdbDebug("table:%s, object:%s not exist in hash, ignore delete action", pTable->tableName,
pHead->cont); sdbGetKeyStr(pTable, pHead->cont));
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSdbOper oper = {.table = pTable, .pObj = pRow}; SSdbOper oper = {.table = pTable, .pObj = pRow};
@ -621,8 +636,8 @@ static int sdbWrite(void *param, void *data, int type) {
} else if (action == SDB_ACTION_UPDATE) { } else if (action == SDB_ACTION_UPDATE) {
void *pRow = sdbGetRowMeta(pTable, pHead->cont); void *pRow = sdbGetRowMeta(pTable, pHead->cont);
if (pRow == NULL) { if (pRow == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName, sdbDebug("table:%s, object:%s not exist in hash, ignore update action", pTable->tableName,
pHead->cont); sdbGetKeyStr(pTable, pHead->cont));
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
@ -861,6 +876,7 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) {
if (pTable == NULL) return NULL; if (pTable == NULL) return NULL;
pthread_mutex_init(&pTable->mutex, NULL);
tstrncpy(pTable->tableName, pDesc->tableName, SDB_TABLE_LEN); tstrncpy(pTable->tableName, pDesc->tableName, SDB_TABLE_LEN);
pTable->keyType = pDesc->keyType; pTable->keyType = pDesc->keyType;
pTable->tableId = pDesc->tableId; pTable->tableId = pDesc->tableId;
@ -908,6 +924,7 @@ void sdbCloseTable(void *handle) {
taosHashDestroyIter(pIter); taosHashDestroyIter(pIter);
taosHashCleanup(pTable->iHandle); taosHashCleanup(pTable->iHandle);
pthread_mutex_destroy(&pTable->mutex);
sdbDebug("table:%s, is closed, numOfTables:%d", pTable->tableName, tsSdbObj.numOfTables); sdbDebug("table:%s, is closed, numOfTables:%d", pTable->tableName, tsSdbObj.numOfTables);
free(pTable); free(pTable);

View File

@ -24,6 +24,7 @@ extern "C" {
void taosRemoveDir(char *rootDir); void taosRemoveDir(char *rootDir);
int taosMkDir(const char *pathname, mode_t mode); int taosMkDir(const char *pathname, mode_t mode);
void taosRename(char* oldName, char *newName); void taosRename(char* oldName, char *newName);
void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -39,6 +39,7 @@
#include <Winsock2.h> #include <Winsock2.h>
#include <time.h> #include <time.h>
#include <inttypes.h> #include <inttypes.h>
#include <conio.h>
#include "msvcProcess.h" #include "msvcProcess.h"
#include "msvcDirect.h" #include "msvcDirect.h"
#include "msvcFcntl.h" #include "msvcFcntl.h"
@ -58,8 +59,6 @@ extern "C" {
int32_t BUILDIN_CTZL(uint64_t val); int32_t BUILDIN_CTZL(uint64_t val);
int32_t BUILDIN_CTZ(uint32_t val); int32_t BUILDIN_CTZ(uint32_t val);
#define TAOS_OS_FUNC_DIR
#define TAOS_OS_FUNC_FILE #define TAOS_OS_FUNC_FILE
#define TAOS_OS_FUNC_FILE_ISREG #define TAOS_OS_FUNC_FILE_ISREG
#define TAOS_OS_FUNC_FILE_ISDIR #define TAOS_OS_FUNC_FILE_ISDIR

View File

@ -18,8 +18,6 @@
#include "tglobal.h" #include "tglobal.h"
#include "tulog.h" #include "tulog.h"
#ifndef TAOS_OS_FUNC_DIR
void taosRemoveDir(char *rootDir) { void taosRemoveDir(char *rootDir) {
DIR *dir = opendir(rootDir); DIR *dir = opendir(rootDir);
if (dir == NULL) return; if (dir == NULL) return;
@ -51,18 +49,54 @@ int taosMkDir(const char *path, mode_t mode) {
} }
void taosRename(char* oldName, char *newName) { void taosRename(char* oldName, char *newName) {
if (0 == tsEnableVnodeBak) {
uInfo("vnode backup not enabled");
return;
}
// if newName in not empty, rename return fail. // if newName in not empty, rename return fail.
// the newName must be empty or does not exist // the newName must be empty or does not exist
if (rename(oldName, newName)) { if (rename(oldName, newName)) {
uError("%s is modify to %s fail, reason:%s", oldName, newName, strerror(errno)); uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno));
} else { } else {
uInfo("%s is modify to %s success!", oldName, newName); uInfo("successfully to rename file %s to %s", oldName, newName);
} }
} }
#endif void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) {
DIR *dir = opendir(rootDir);
if (dir == NULL) return;
int64_t sec = taosGetTimestampSec();
struct dirent *de = NULL;
while ((de = readdir(dir)) != NULL) {
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
char filename[1024];
snprintf(filename, 1023, "%s/%s", rootDir, de->d_name);
if (de->d_type & DT_DIR) {
continue;
} else {
// struct stat fState;
// if (stat(fname, &fState) < 0) {
// continue;
// }
int32_t len = (int32_t)strlen(filename);
int64_t fileSec = 0;
for (int i = len - 1; i >= 0; i--) {
if (filename[i] == '.') {
fileSec = atoll(filename + i + 1);
break;
}
}
if (fileSec <= 100) continue;
int32_t days = (int32_t)(ABS(sec - fileSec) / 86400 + 1);
if (days > keepDays) {
(void)remove(filename);
uInfo("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays);
} else {
uTrace("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays);
}
}
}
closedir(dir);
rmdir(rootDir);
}

View File

@ -1,31 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tulog.h"
void taosRemoveDir(char *rootDir) {
uError("%s not implemented yet", __FUNCTION__);
}
int taosMkDir(const char *path, mode_t mode) {
uError("%s not implemented yet", __FUNCTION__);
return 0;
}
void taosMvDir(char* destDir, char *srcDir) {
uError("%s not implemented yet", __FUNCTION__);
}

View File

@ -58,11 +58,20 @@ char *strsep(char **stringp, const char *delim) {
char *getpass(const char *prefix) { char *getpass(const char *prefix) {
static char passwd[TSDB_KEY_LEN] = {0}; static char passwd[TSDB_KEY_LEN] = {0};
memset(passwd, 0, TSDB_KEY_LEN);
printf("%s", prefix); printf("%s", prefix);
scanf("%s", passwd);
char n = getchar(); int32_t index = 0;
char ch;
while (index < TSDB_KEY_LEN) {
ch = getch();
if (ch == '\n' || ch == '\r') {
break;
} else {
passwd[index++] = ch;
}
}
return passwd; return passwd;
} }
@ -131,11 +140,11 @@ int tasoUcs4Compare(void *f1_ucs4, void *f2_ucs4, int bytes) {
} }
/* Copy memory to memory until the specified number of bytes /* Copy memory to memory until the specified number of bytes
has been copied, return pointer to following byte. has been copied, return pointer to following byte.
Overlap is NOT handled correctly. */ Overlap is NOT handled correctly. */
void *mempcpy(void *dest, const void *src, size_t len) { void *mempcpy(void *dest, const void *src, size_t len) {
return (char*)memcpy(dest, src, len) + len; return (char*)memcpy(dest, src, len) + len;
} }
/* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */ /* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */

View File

@ -115,6 +115,10 @@ static void *monitorThreadFunc(void *param) {
monitorDebug("starting to initialize monitor module ..."); monitorDebug("starting to initialize monitor module ...");
while (1) { while (1) {
static int32_t accessTimes = 0;
accessTimes++;
taosMsleep(1000);
if (tsMonitor.quiting) { if (tsMonitor.quiting) {
tsMonitor.state = MON_STATE_NOT_INIT; tsMonitor.state = MON_STATE_NOT_INIT;
monitorInfo("monitor thread will quit, for taosd is quiting"); monitorInfo("monitor thread will quit, for taosd is quiting");
@ -126,11 +130,7 @@ static void *monitorThreadFunc(void *param) {
if (tsMonitor.start == 0) { if (tsMonitor.start == 0) {
continue; continue;
} }
static int32_t accessTimes = 0;
accessTimes++;
taosMsleep(1000);
if (dnodeGetDnodeId() <= 0) { if (dnodeGetDnodeId() <= 0) {
monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module"); monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module");
continue; continue;

View File

@ -709,21 +709,21 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
} }
if (terrno != 0) { if (terrno != 0) {
taosFreeId(pRpc->idPool, sid); // sid shall be released taosFreeId(pRpc->idPool, sid); // sid shall be released
pConn = NULL; pConn = NULL;
} }
} }
} }
if (pConn) { if (pConn) {
if (pRecv->connType == RPC_CONN_UDPS && pRpc->numOfThreads > 1) { if (pRecv->connType == RPC_CONN_UDPS && pRpc->numOfThreads > 1) {
// UDP server, assign to new connection // UDP server, assign to new connection
pRpc->index = (pRpc->index+1) % pRpc->numOfThreads; pRpc->index = (pRpc->index + 1) % pRpc->numOfThreads;
pConn->localPort = (pRpc->localPort + pRpc->index); pConn->localPort = (pRpc->localPort + pRpc->index);
} }
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES); taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid); tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid, hashstr);
} }
return pConn; return pConn;

View File

@ -30,23 +30,19 @@
#include "syncInt.h" #include "syncInt.h"
// global configurable // global configurable
int tsMaxSyncNum = 2; int tsMaxSyncNum = 2;
int tsSyncTcpThreads = 2; int tsSyncTcpThreads = 2;
int tsMaxWatchFiles = 500; int tsMaxWatchFiles = 500;
int tsMaxFwdInfo = 200; int tsMaxFwdInfo = 200;
int tsSyncTimer = 1; int tsSyncTimer = 1;
//int sDebugFlag = 135;
//char tsArbitrator[TSDB_FQDN_LEN] = {0};
// module global, not configurable // module global, not configurable
int tsSyncNum; // number of sync in process in whole system int tsSyncNum; // number of sync in process in whole system
char tsNodeFqdn[TSDB_FQDN_LEN]; char tsNodeFqdn[TSDB_FQDN_LEN];
static int tsNodeNum; // number of nodes in system static ttpool_h tsTcpPool;
static ttpool_h tsTcpPool; static void * syncTmrCtrl = NULL;
static void *syncTmrCtrl = NULL; static void * vgIdHash;
static void *vgIdHash;
static pthread_once_t syncModuleInit = PTHREAD_ONCE_INIT;
// local functions // local functions
static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer); static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer);
@ -75,7 +71,7 @@ char* syncRole[] = {
"master" "master"
}; };
static void syncModuleInitFunc() { int32_t syncInit() {
SPoolInfo info; SPoolInfo info;
info.numOfThreads = tsSyncTcpThreads; info.numOfThreads = tsSyncTcpThreads;
@ -87,25 +83,52 @@ static void syncModuleInitFunc() {
info.processIncomingConn = syncProcessIncommingConnection; info.processIncomingConn = syncProcessIncommingConnection;
tsTcpPool = taosOpenTcpThreadPool(&info); tsTcpPool = taosOpenTcpThreadPool(&info);
if (tsTcpPool == NULL) return; if (tsTcpPool == NULL) {
sError("failed to init tcpPool");
return -1;
}
syncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC"); syncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC");
if (syncTmrCtrl == NULL) { if (syncTmrCtrl == NULL) {
sError("failed to init tmrCtrl");
taosCloseTcpThreadPool(tsTcpPool); taosCloseTcpThreadPool(tsTcpPool);
tsTcpPool = NULL; tsTcpPool = NULL;
return; return -1;
} }
vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true);
if (vgIdHash == NULL) { if (vgIdHash == NULL) {
sError("failed to init vgIdHash");
taosTmrCleanUp(syncTmrCtrl); taosTmrCleanUp(syncTmrCtrl);
taosCloseTcpThreadPool(tsTcpPool); taosCloseTcpThreadPool(tsTcpPool);
tsTcpPool = NULL; tsTcpPool = NULL;
syncTmrCtrl = NULL; syncTmrCtrl = NULL;
return; return -1;
} }
tstrncpy(tsNodeFqdn, tsLocalFqdn, sizeof(tsNodeFqdn)); tstrncpy(tsNodeFqdn, tsLocalFqdn, sizeof(tsNodeFqdn));
sInfo("sync module initialized successfully");
return 0;
}
void syncCleanUp() {
if (tsTcpPool) {
taosCloseTcpThreadPool(tsTcpPool);
tsTcpPool = NULL;
}
if (syncTmrCtrl) {
taosTmrCleanUp(syncTmrCtrl);
syncTmrCtrl = NULL;
}
if (vgIdHash) {
taosHashCleanup(vgIdHash);
vgIdHash = NULL;
}
sInfo("sync module is cleaned up");
} }
void *syncStart(const SSyncInfo *pInfo) { void *syncStart(const SSyncInfo *pInfo) {
@ -118,15 +141,6 @@ void *syncStart(const SSyncInfo *pInfo) {
return NULL; return NULL;
} }
pthread_once(&syncModuleInit, syncModuleInitFunc);
if (tsTcpPool == NULL) {
free(pNode);
syncModuleInit = PTHREAD_ONCE_INIT;
sError("failed to init sync module(%s)", tstrerror(errno));
return NULL;
}
atomic_add_fetch_32(&tsNodeNum, 1);
tstrncpy(pNode->path, pInfo->path, sizeof(pNode->path)); tstrncpy(pNode->path, pInfo->path, sizeof(pNode->path));
pthread_mutex_init(&pNode->mutex, NULL); pthread_mutex_init(&pNode->mutex, NULL);
@ -138,7 +152,7 @@ void *syncStart(const SSyncInfo *pInfo) {
pNode->confirmForward = pInfo->confirmForward; pNode->confirmForward = pInfo->confirmForward;
pNode->notifyFlowCtrl = pInfo->notifyFlowCtrl; pNode->notifyFlowCtrl = pInfo->notifyFlowCtrl;
pNode->notifyFileSynced = pInfo->notifyFileSynced; pNode->notifyFileSynced = pInfo->notifyFileSynced;
pNode->selfIndex = -1; pNode->selfIndex = -1;
pNode->vgId = pInfo->vgId; pNode->vgId = pInfo->vgId;
pNode->replica = pCfg->replica; pNode->replica = pCfg->replica;
@ -148,8 +162,9 @@ void *syncStart(const SSyncInfo *pInfo) {
for (int i = 0; i < pCfg->replica; ++i) { for (int i = 0; i < pCfg->replica; ++i) {
const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i; const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i;
pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo); pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo);
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) {
pNode->selfIndex = i; pNode->selfIndex = i;
}
} }
if (pNode->selfIndex < 0) { if (pNode->selfIndex < 0) {
@ -181,16 +196,17 @@ void *syncStart(const SSyncInfo *pInfo) {
syncAddArbitrator(pNode); syncAddArbitrator(pNode);
syncAddNodeRef(pNode); syncAddNodeRef(pNode);
taosHashPut(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *)); taosHashPut(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *));
if (pNode->notifyRole) if (pNode->notifyRole) {
(*pNode->notifyRole)(pNode->ahandle, nodeRole); (*pNode->notifyRole)(pNode->ahandle, nodeRole);
}
return pNode; return pNode;
} }
void syncStop(void *param) { void syncStop(void *param) {
SSyncNode * pNode = param; SSyncNode *pNode = param;
SSyncPeer *pPeer; SSyncPeer *pPeer;
if (pNode == NULL) return; if (pNode == NULL) return;
sInfo("vgId:%d, cleanup sync", pNode->vgId); sInfo("vgId:%d, cleanup sync", pNode->vgId);
@ -199,7 +215,7 @@ void syncStop(void *param) {
for (int i = 0; i < pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
pPeer = pNode->peerInfo[i]; pPeer = pNode->peerInfo[i];
if (pPeer) syncRemovePeer(pPeer); if (pPeer) syncRemovePeer(pPeer);
} }
pPeer = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]; pPeer = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA];
@ -214,19 +230,19 @@ void syncStop(void *param) {
} }
int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
SSyncNode * pNode = param; SSyncNode *pNode = param;
int i, j; int i, j;
if (pNode == NULL) return TSDB_CODE_SYN_INVALID_CONFIG; if (pNode == NULL) return TSDB_CODE_SYN_INVALID_CONFIG;
sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], pNewCfg->replica,
pNewCfg->replica, pNode->replica); pNode->replica);
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
for (i = 0; i < pNode->replica; ++i) { for (i = 0; i < pNode->replica; ++i) {
for (j = 0; j < pNewCfg->replica; ++j) { for (j = 0; j < pNewCfg->replica; ++j) {
if ((strcmp(pNode->peerInfo[i]->fqdn, pNewCfg->nodeInfo[j].nodeFqdn) == 0) && if ((strcmp(pNode->peerInfo[i]->fqdn, pNewCfg->nodeInfo[j].nodeFqdn) == 0) &&
(pNode->peerInfo[i]->port == pNewCfg->nodeInfo[j].nodePort)) (pNode->peerInfo[i]->port == pNewCfg->nodeInfo[j].nodePort))
break; break;
} }
@ -241,8 +257,8 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
const SNodeInfo *pNewNode = &pNewCfg->nodeInfo[i]; const SNodeInfo *pNewNode = &pNewCfg->nodeInfo[i];
for (j = 0; j < pNode->replica; ++j) { for (j = 0; j < pNode->replica; ++j) {
if (pNode->peerInfo[j] && (strcmp(pNode->peerInfo[j]->fqdn, pNewNode->nodeFqdn) == 0) && if (pNode->peerInfo[j] && (strcmp(pNode->peerInfo[j]->fqdn, pNewNode->nodeFqdn) == 0) &&
(pNode->peerInfo[j]->port == pNewNode->nodePort)) (pNode->peerInfo[j]->port == pNewNode->nodePort))
break; break;
} }
@ -252,8 +268,9 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
newPeers[i] = pNode->peerInfo[j]; newPeers[i] = pNode->peerInfo[j];
} }
if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort)) if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort)) {
pNode->selfIndex = i; pNode->selfIndex = i;
}
} }
pNode->replica = pNewCfg->replica; pNode->replica = pNewCfg->replica;
@ -261,8 +278,9 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica; if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica;
memcpy(pNode->peerInfo, newPeers, sizeof(SSyncPeer *) * pNewCfg->replica); memcpy(pNode->peerInfo, newPeers, sizeof(SSyncPeer *) * pNewCfg->replica);
for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i) for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i) {
pNode->peerInfo[i] = NULL; pNode->peerInfo[i] = NULL;
}
syncAddArbitrator(pNode); syncAddArbitrator(pNode);
@ -274,43 +292,44 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
pthread_mutex_unlock(&(pNode->mutex)); pthread_mutex_unlock(&(pNode->mutex));
sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, syncRole[nodeRole]); sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum,
syncRole[nodeRole]);
syncBroadcastStatus(pNode); syncBroadcastStatus(pNode);
return 0; return 0;
} }
int32_t syncForwardToPeer(void *param, void *data, void *mhandle, int qtype) { int32_t syncForwardToPeer(void *param, void *data, void *mhandle, int qtype) {
SSyncNode * pNode = param; SSyncNode *pNode = param;
SSyncPeer * pPeer; SSyncPeer *pPeer;
SSyncHead *pSyncHead; SSyncHead *pSyncHead;
SWalHead *pWalHead = data; SWalHead * pWalHead = data;
int fwdLen; int fwdLen;
int code = 0; int code = 0;
if (pNode == NULL) return 0; if (pNode == NULL) return 0;
// always update version // always update version
nodeVersion = pWalHead->version; nodeVersion = pWalHead->version;
if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER ) return 0; if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER) return 0;
// only pkt from RPC or CQ can be forwarded // only pkt from RPC or CQ can be forwarded
if (qtype != TAOS_QTYPE_RPC && qtype != TAOS_QTYPE_CQ) return 0; if (qtype != TAOS_QTYPE_RPC && qtype != TAOS_QTYPE_CQ) return 0;
// a hacker way to improve the performance // a hacker way to improve the performance
pSyncHead = (SSyncHead *) ( ((char *)pWalHead) - sizeof(SSyncHead)); pSyncHead = (SSyncHead *)(((char *)pWalHead) - sizeof(SSyncHead));
pSyncHead->type = TAOS_SMSG_FORWARD; pSyncHead->type = TAOS_SMSG_FORWARD;
pSyncHead->pversion = 0; pSyncHead->pversion = 0;
pSyncHead->len = sizeof(SWalHead) + pWalHead->len; pSyncHead->len = sizeof(SWalHead) + pWalHead->len;
fwdLen = pSyncHead->len + sizeof(SSyncHead); //include the WAL and SYNC head fwdLen = pSyncHead->len + sizeof(SSyncHead); // include the WAL and SYNC head
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
for (int i = 0; i < pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
pPeer = pNode->peerInfo[i]; pPeer = pNode->peerInfo[i];
if (pPeer == NULL || pPeer->peerFd <0) continue; if (pPeer == NULL || pPeer->peerFd < 0) continue;
if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue; if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue;
if (pNode->quorum > 1 && code == 0) { if (pNode->quorum > 1 && code == 0) {
syncSaveFwdInfo(pNode, pWalHead->version, mhandle); syncSaveFwdInfo(pNode, pWalHead->version, mhandle);
code = 1; code = 1;
@ -335,12 +354,12 @@ void syncConfirmForward(void *param, uint64_t version, int32_t code) {
if (pNode == NULL) return; if (pNode == NULL) return;
if (pNode->quorum <= 1) return; if (pNode->quorum <= 1) return;
SSyncPeer *pPeer = pNode->pMaster; SSyncPeer *pPeer = pNode->pMaster;
if (pPeer == NULL) return; if (pPeer == NULL) return;
char msg[sizeof(SSyncHead) + sizeof(SFwdRsp)] = {0}; char msg[sizeof(SSyncHead) + sizeof(SFwdRsp)] = {0};
SSyncHead *pHead = (SSyncHead *) msg; SSyncHead *pHead = (SSyncHead *)msg;
pHead->type = TAOS_SMSG_FORWARD_RSP; pHead->type = TAOS_SMSG_FORWARD_RSP;
pHead->len = sizeof(SFwdRsp); pHead->len = sizeof(SFwdRsp);
@ -363,7 +382,7 @@ void syncRecover(void *param) {
SSyncNode *pNode = param; SSyncNode *pNode = param;
SSyncPeer *pPeer; SSyncPeer *pPeer;
// to do: add a few lines to check if recover is OK // to do: add a few lines to check if recover is OK
// if take this node to unsync state, the whole system may not work // if take this node to unsync state, the whole system may not work
nodeRole = TAOS_SYNC_ROLE_UNSYNCED; nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
@ -373,7 +392,7 @@ void syncRecover(void *param) {
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
for (int i = 0; i < pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
pPeer = (SSyncPeer *) pNode->peerInfo[i]; pPeer = (SSyncPeer *)pNode->peerInfo[i];
if (pPeer->peerFd >= 0) { if (pPeer->peerFd >= 0) {
syncRestartConnection(pPeer); syncRestartConnection(pPeer);
} }
@ -386,7 +405,7 @@ int syncGetNodesRole(void *param, SNodesRole *pNodesRole) {
SSyncNode *pNode = param; SSyncNode *pNode = param;
pNodesRole->selfIndex = pNode->selfIndex; pNodesRole->selfIndex = pNode->selfIndex;
for (int i=0; i<pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
pNodesRole->nodeId[i] = pNode->peerInfo[i]->nodeId; pNodesRole->nodeId[i] = pNode->peerInfo[i]->nodeId;
pNodesRole->role[i] = pNode->peerInfo[i]->role; pNodesRole->role[i] = pNode->peerInfo[i]->role;
} }
@ -410,7 +429,7 @@ static void syncAddArbitrator(SSyncNode *pNode) {
if (-1 == ret) { if (-1 == ret) {
nodeInfo.nodePort = tsArbitratorPort; nodeInfo.nodePort = tsArbitratorPort;
} }
if (pPeer) { if (pPeer) {
if ((strcmp(nodeInfo.nodeFqdn, pPeer->fqdn) == 0) && (nodeInfo.nodePort == pPeer->port)) { if ((strcmp(nodeInfo.nodeFqdn, pPeer->fqdn) == 0) && (nodeInfo.nodePort == pPeer->port)) {
return; return;
@ -418,39 +437,26 @@ static void syncAddArbitrator(SSyncNode *pNode) {
syncRemovePeer(pPeer); syncRemovePeer(pPeer);
pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = NULL; pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = NULL;
} }
} }
pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = syncAddPeer(pNode, &nodeInfo); pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = syncAddPeer(pNode, &nodeInfo);
} }
static void syncAddNodeRef(SSyncNode *pNode) static void syncAddNodeRef(SSyncNode *pNode) {
{ atomic_add_fetch_8(&pNode->refCount, 1);
atomic_add_fetch_8(&pNode->refCount, 1);
} }
static void syncDecNodeRef(SSyncNode *pNode) static void syncDecNodeRef(SSyncNode *pNode) {
{
if (atomic_sub_fetch_8(&pNode->refCount, 1) == 0) { if (atomic_sub_fetch_8(&pNode->refCount, 1) == 0) {
pthread_mutex_destroy(&pNode->mutex); pthread_mutex_destroy(&pNode->mutex);
taosTFree(pNode->pRecv); taosTFree(pNode->pRecv);
taosTFree(pNode->pSyncFwds); taosTFree(pNode->pSyncFwds);
taosTFree(pNode); taosTFree(pNode);
if (atomic_sub_fetch_32(&tsNodeNum, 1) == 0) {
if (tsTcpPool) taosCloseTcpThreadPool(tsTcpPool);
if (syncTmrCtrl) taosTmrCleanUp(syncTmrCtrl);
if (vgIdHash) taosHashCleanup(vgIdHash);
syncTmrCtrl = NULL;
tsTcpPool = NULL;
vgIdHash = NULL;
syncModuleInit = PTHREAD_ONCE_INIT;
sDebug("sync module is cleaned up");
}
} }
} }
void syncAddPeerRef(SSyncPeer *pPeer) { void syncAddPeerRef(SSyncPeer *pPeer) {
atomic_add_fetch_8(&pPeer->refCount, 1); atomic_add_fetch_8(&pPeer->refCount, 1);
} }
int syncDecPeerRef(SSyncPeer *pPeer) { int syncDecPeerRef(SSyncPeer *pPeer) {
@ -486,8 +492,8 @@ static void syncRemovePeer(SSyncPeer *pPeer) {
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn); uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn);
if (ip == -1) return NULL; if (ip == -1) return NULL;
SSyncPeer *pPeer = (SSyncPeer *) calloc(1, sizeof(SSyncPeer)); SSyncPeer *pPeer = (SSyncPeer *)calloc(1, sizeof(SSyncPeer));
if (pPeer == NULL) return NULL; if (pPeer == NULL) return NULL;
pPeer->nodeId = pInfo->nodeId; pPeer->nodeId = pInfo->nodeId;
@ -506,9 +512,11 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
int ret = strcmp(pPeer->fqdn, tsNodeFqdn); int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) { if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
sDebug("%s, start to check peer connection", pPeer->id); sDebug("%s, start to check peer connection", pPeer->id);
taosTmrReset(syncCheckPeerConnection, 100 + (pNode->vgId*10)%100, pPeer, syncTmrCtrl, &pPeer->timer); int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
if (pNode->vgId) checkMs = tsStatusInterval * 2000 + 100;
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
} }
syncAddNodeRef(pNode); syncAddNodeRef(pNode);
return pPeer; return pPeer;
} }
@ -542,16 +550,18 @@ static void syncChooseMaster(SSyncNode *pNode) {
sDebug("vgId:%d, choose master", pNode->vgId); sDebug("vgId:%d, choose master", pNode->vgId);
for (int i = 0; i < pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) {
onlineNum++; onlineNum++;
}
} }
if (onlineNum == pNode->replica) { if (onlineNum == pNode->replica) {
// if all peers are online, peer with highest version shall be master // if all peers are online, peer with highest version shall be master
index = 0; index = 0;
for (int i = 1; i < pNode->replica; ++i) { for (int i = 1; i < pNode->replica; ++i) {
if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) {
index = i; index = i;
}
} }
} }
@ -568,8 +578,9 @@ static void syncChooseMaster(SSyncNode *pNode) {
//slave with highest version shall be master //slave with highest version shall be master
pPeer = pNode->peerInfo[i]; pPeer = pNode->peerInfo[i];
if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) { if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) {
if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) {
index = i; index = i;
}
} }
} }
} }
@ -595,8 +606,9 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
int replica = pNode->replica; int replica = pNode->replica;
for (int i = 0; i < pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) {
onlineNum++; onlineNum++;
}
} }
// add arbitrator connection // add arbitrator connection
@ -644,7 +656,7 @@ static int syncValidateMaster(SSyncPeer *pPeer) {
code = -1; code = -1;
for (int i = 0; i < pNode->replica; ++i) { for (int i = 0; i < pNode->replica; ++i) {
if ( i == pNode->selfIndex ) continue; if (i == pNode->selfIndex) continue;
syncRestartPeer(pNode->peerInfo[i]); syncRestartPeer(pNode->peerInfo[i]);
} }
} }
@ -661,12 +673,11 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne
pNode->peerInfo[pNode->selfIndex]->version = nodeVersion; pNode->peerInfo[pNode->selfIndex]->version = nodeVersion;
pPeer->role = newRole; pPeer->role = newRole;
sDebug("%s, own role:%s, new peer role:%s", pPeer->id, sDebug("%s, own role:%s, new peer role:%s", pPeer->id, syncRole[nodeRole], syncRole[pPeer->role]);
syncRole[nodeRole], syncRole[pPeer->role]);
SSyncPeer *pMaster = syncCheckMaster(pNode); SSyncPeer *pMaster = syncCheckMaster(pNode);
if ( pMaster ) { if (pMaster) {
// master is there // master is there
pNode->pMaster = pMaster; pNode->pMaster = pMaster;
sDebug("%s, it is the master, ver:%" PRIu64, pMaster->id, pMaster->version); sDebug("%s, it is the master, ver:%" PRIu64, pMaster->id, pMaster->version);
@ -691,27 +702,30 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne
for (i = 0; i < pNode->replica; ++i) { for (i = 0; i < pNode->replica; ++i) {
SSyncPeer *pTemp = pNode->peerInfo[i]; SSyncPeer *pTemp = pNode->peerInfo[i];
if (pTemp->role != peersStatus[i].role) break; if (pTemp->role != peersStatus[i].role) break;
if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[i].version)) break; if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[i].version)) break;
} }
if (i >= pNode->replica) consistent = 1; if (i >= pNode->replica) consistent = 1;
} else { } else {
if (pNode->replica == 2) consistent = 1; if (pNode->replica == 2) consistent = 1;
} }
if (consistent) if (consistent) {
syncChooseMaster(pNode); syncChooseMaster(pNode);
}
} }
if (syncRequired) { if (syncRequired) {
syncRecoverFromMaster(pMaster); syncRecoverFromMaster(pMaster);
} }
if (peerOldRole != newRole || nodeRole != selfOldRole) if (peerOldRole != newRole || nodeRole != selfOldRole) {
syncBroadcastStatus(pNode); syncBroadcastStatus(pNode);
}
if (nodeRole != TAOS_SYNC_ROLE_MASTER) if (nodeRole != TAOS_SYNC_ROLE_MASTER) {
syncResetFlowCtrl(pNode); syncResetFlowCtrl(pNode);
}
} }
static void syncRestartPeer(SSyncPeer *pPeer) { static void syncRestartPeer(SSyncPeer *pPeer) {
@ -722,8 +736,9 @@ static void syncRestartPeer(SSyncPeer *pPeer) {
pPeer->sstatus = TAOS_SYNC_STATUS_INIT; pPeer->sstatus = TAOS_SYNC_STATUS_INIT;
int ret = strcmp(pPeer->fqdn, tsNodeFqdn); int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) {
taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer);
}
} }
void syncRestartConnection(SSyncPeer *pPeer) { void syncRestartConnection(SSyncPeer *pPeer) {
@ -747,13 +762,13 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) {
if (pPeer->sstatus != TAOS_SYNC_STATUS_INIT) { if (pPeer->sstatus != TAOS_SYNC_STATUS_INIT) {
sDebug("%s, sync is already started", pPeer->id); sDebug("%s, sync is already started", pPeer->id);
return; // already started return; // already started
} }
// start a new thread to retrieve the data // start a new thread to retrieve the data
syncAddPeerRef(pPeer); syncAddPeerRef(pPeer);
pthread_attr_t thattr; pthread_attr_t thattr;
pthread_t thread; pthread_t thread;
pthread_attr_init(&thattr); pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED);
int ret = pthread_create(&thread, &thattr, syncRetrieveData, pPeer); int ret = pthread_create(&thread, &thattr, syncRetrieveData, pPeer);
@ -780,8 +795,8 @@ static void syncNotStarted(void *param, void *tmrId) {
} }
static void syncTryRecoverFromMaster(void *param, void *tmrId) { static void syncTryRecoverFromMaster(void *param, void *tmrId) {
SSyncPeer *pPeer = param; SSyncPeer *pPeer = param;
SSyncNode *pNode = pPeer->pSyncNode; SSyncNode *pNode = pPeer->pSyncNode;
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
syncRecoverFromMaster(pPeer); syncRecoverFromMaster(pPeer);
@ -797,13 +812,15 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
} }
taosTmrStopA(&pPeer->timer); taosTmrStopA(&pPeer->timer);
if (tsSyncNum >= tsMaxSyncNum) {
// Ensure the sync of mnode not interrupted
if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) {
sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum);
taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId*10)%200, pPeer, syncTmrCtrl, &pPeer->timer); taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, syncTmrCtrl, &pPeer->timer);
return; return;
} }
sDebug("%s, try to sync", pPeer->id) sDebug("%s, try to sync", pPeer->id);
SFirstPkt firstPkt; SFirstPkt firstPkt;
memset(&firstPkt, 0, sizeof(firstPkt)); memset(&firstPkt, 0, sizeof(firstPkt));
@ -812,49 +829,47 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
firstPkt.syncHead.len = sizeof(firstPkt) - sizeof(SSyncHead); firstPkt.syncHead.len = sizeof(firstPkt) - sizeof(SSyncHead);
tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn));
firstPkt.port = tsSyncPort; firstPkt.port = tsSyncPort;
taosTmrReset(syncNotStarted, tsSyncTimer*1000, pPeer, syncTmrCtrl, &pPeer->timer); taosTmrReset(syncNotStarted, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer);
if (write(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt) ) { if (write(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) {
sError("%s, failed to send sync-req to peer", pPeer->id); sError("%s, failed to send sync-req to peer", pPeer->id);
} else { } else {
nodeSStatus = TAOS_SYNC_STATUS_START; nodeSStatus = TAOS_SYNC_STATUS_START;
sInfo("%s, sync-req is sent", pPeer->id); sInfo("%s, sync-req is sent", pPeer->id);
} }
return;
} }
static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) { static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) {
SSyncNode * pNode = pPeer->pSyncNode; SSyncNode *pNode = pPeer->pSyncNode;
SFwdRsp *pFwdRsp = (SFwdRsp *) cont; SFwdRsp * pFwdRsp = (SFwdRsp *)cont;
SSyncFwds *pSyncFwds = pNode->pSyncFwds; SSyncFwds *pSyncFwds = pNode->pSyncFwds;
SFwdInfo *pFwdInfo; SFwdInfo * pFwdInfo;
sDebug("%s, forward-rsp is received, ver:%" PRIu64, pPeer->id, pFwdRsp->version); sDebug("%s, forward-rsp is received, ver:%" PRIu64, pPeer->id, pFwdRsp->version);
SFwdInfo *pFirst = pSyncFwds->fwdInfo + pSyncFwds->first; SFwdInfo *pFirst = pSyncFwds->fwdInfo + pSyncFwds->first;
if (pFirst->version <= pFwdRsp->version && pSyncFwds->fwds > 0) { if (pFirst->version <= pFwdRsp->version && pSyncFwds->fwds > 0) {
// find the forwardInfo from first // find the forwardInfo from first
for (int i=0; i<pSyncFwds->fwds; ++i) { for (int i = 0; i < pSyncFwds->fwds; ++i) {
pFwdInfo = pSyncFwds->fwdInfo + (i+pSyncFwds->first)%tsMaxFwdInfo; pFwdInfo = pSyncFwds->fwdInfo + (i + pSyncFwds->first) % tsMaxFwdInfo;
if (pFwdRsp->version == pFwdInfo->version) break; if (pFwdRsp->version == pFwdInfo->version) break;
} }
syncProcessFwdAck(pNode, pFwdInfo, pFwdRsp->code); syncProcessFwdAck(pNode, pFwdInfo, pFwdRsp->code);
syncRemoveConfirmedFwdInfo(pNode); syncRemoveConfirmedFwdInfo(pNode);
} }
} }
static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
SSyncNode * pNode = pPeer->pSyncNode; SSyncNode *pNode = pPeer->pSyncNode;
SWalHead *pHead = (SWalHead *)cont; SWalHead * pHead = (SWalHead *)cont;
sDebug("%s, forward is received, ver:%" PRIu64, pPeer->id, pHead->version); sDebug("%s, forward is received, ver:%" PRIu64, pPeer->id, pHead->version);
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
//nodeVersion = pHead->version; // nodeVersion = pHead->version;
(*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD); (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD);
} else { } else {
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
syncSaveIntoBuffer(pPeer, pHead); syncSaveIntoBuffer(pPeer, pHead);
} else { } else {
@ -875,12 +890,13 @@ static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) {
pPeer->version = pPeersStatus->version; pPeer->version = pPeersStatus->version;
syncCheckRole(pPeer, pPeersStatus->peersStatus, pPeersStatus->role); syncCheckRole(pPeer, pPeersStatus->peersStatus, pPeersStatus->role);
if (pPeersStatus->ack) if (pPeersStatus->ack) {
syncSendPeersStatusMsgToPeer(pPeer, 0); syncSendPeersStatusMsgToPeer(pPeer, 0);
}
} }
static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) {
if (pPeer->peerFd <0) return -1; if (pPeer->peerFd < 0) return -1;
int hlen = taosReadMsg(pPeer->peerFd, pHead, sizeof(SSyncHead)); int hlen = taosReadMsg(pPeer->peerFd, pHead, sizeof(SSyncHead));
if (hlen != sizeof(SSyncHead)) { if (hlen != sizeof(SSyncHead)) {
@ -904,9 +920,9 @@ static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) {
} }
static int syncProcessPeerMsg(void *param, void *buffer) { static int syncProcessPeerMsg(void *param, void *buffer) {
SSyncPeer * pPeer = param; SSyncPeer *pPeer = param;
SSyncHead head; SSyncHead head;
char *cont = (char *)buffer; char * cont = (char *)buffer;
SSyncNode *pNode = pPeer->pSyncNode; SSyncNode *pNode = pPeer->pSyncNode;
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
@ -930,16 +946,16 @@ static int syncProcessPeerMsg(void *param, void *buffer) {
return code; return code;
} }
#define statusMsgLen sizeof(SSyncHead)+sizeof(SPeersStatus)+sizeof(SPeerStatus)*TAOS_SYNC_MAX_REPLICA #define statusMsgLen sizeof(SSyncHead) + sizeof(SPeersStatus) + sizeof(SPeerStatus) * TAOS_SYNC_MAX_REPLICA
static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack) { static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack) {
SSyncNode *pNode = pPeer->pSyncNode; SSyncNode *pNode = pPeer->pSyncNode;
char msg[statusMsgLen] = {0}; char msg[statusMsgLen] = {0};
if (pPeer->peerFd <0 || pPeer->ip ==0) return; if (pPeer->peerFd < 0 || pPeer->ip == 0) return;
SSyncHead *pHead = (SSyncHead *) msg; SSyncHead * pHead = (SSyncHead *)msg;
SPeersStatus *pPeersStatus = (SPeersStatus *) (msg + sizeof(SSyncHead)); SPeersStatus *pPeersStatus = (SPeersStatus *)(msg + sizeof(SSyncHead));
pHead->type = TAOS_SMSG_STATUS; pHead->type = TAOS_SMSG_STATUS;
pHead->len = statusMsgLen - sizeof(SSyncHead); pHead->len = statusMsgLen - sizeof(SSyncHead);
@ -977,28 +993,28 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
int connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); int connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0);
if (connFd < 0) { if (connFd < 0) {
sDebug("%s, failed to open tcp socket(%s)", pPeer->id, strerror(errno)); sDebug("%s, failed to open tcp socket(%s)", pPeer->id, strerror(errno));
taosTmrReset(syncCheckPeerConnection, tsSyncTimer *1000, pPeer, syncTmrCtrl, &pPeer->timer); taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer);
return; return;
} }
SFirstPkt firstPkt; SFirstPkt firstPkt;
memset(&firstPkt, 0, sizeof(firstPkt)); memset(&firstPkt, 0, sizeof(firstPkt));
firstPkt.syncHead.vgId = pPeer->nodeId ? pNode->vgId:0; firstPkt.syncHead.vgId = pPeer->nodeId ? pNode->vgId : 0;
firstPkt.syncHead.type = TAOS_SMSG_STATUS; firstPkt.syncHead.type = TAOS_SMSG_STATUS;
tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn));
firstPkt.port = tsSyncPort; firstPkt.port = tsSyncPort;
firstPkt.sourceId = pNode->vgId; // tell arbitrator its vgId firstPkt.sourceId = pNode->vgId; // tell arbitrator its vgId
if (write(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) { if (write(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) {
sDebug("%s, connection to peer server is setup", pPeer->id); sDebug("%s, connection to peer server is setup", pPeer->id);
pPeer->peerFd = connFd; pPeer->peerFd = connFd;
pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED;
pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd);
syncAddPeerRef(pPeer); syncAddPeerRef(pPeer);
} else { } else {
sDebug("try later"); sDebug("try later");
close(connFd); close(connFd);
taosTmrReset(syncCheckPeerConnection, tsSyncTimer *1000, pPeer, syncTmrCtrl, &pPeer->timer); taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer);
} }
} }
@ -1009,7 +1025,7 @@ static void syncCheckPeerConnection(void *param, void *tmrId) {
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
sDebug("%s, check peer connection", pPeer->id); sDebug("%s, check peer connection", pPeer->id);
syncSetupPeerConnection(pPeer); syncSetupPeerConnection(pPeer);
pthread_mutex_unlock(&(pNode->mutex)); pthread_mutex_unlock(&(pNode->mutex));
} }
@ -1018,7 +1034,7 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
taosTmrStopA(&pPeer->timer); taosTmrStopA(&pPeer->timer);
pthread_attr_t thattr; pthread_attr_t thattr;
pthread_t thread; pthread_t thread;
pthread_attr_init(&thattr); pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED);
@ -1030,15 +1046,15 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
sError("%s, failed to create sync thread", pPeer->id); sError("%s, failed to create sync thread", pPeer->id);
taosClose(pPeer->syncFd); taosClose(pPeer->syncFd);
syncDecPeerRef(pPeer); syncDecPeerRef(pPeer);
} else { } else {
sInfo("%s, sync connection is up", pPeer->id); sInfo("%s, sync connection is up", pPeer->id);
} }
} }
static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) { static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) {
char ipstr[24]; char ipstr[24];
int i; int i;
tinet_ntoa(ipstr, sourceIp); tinet_ntoa(ipstr, sourceIp);
sDebug("peer TCP connection from ip:%s", ipstr); sDebug("peer TCP connection from ip:%s", ipstr);
@ -1063,8 +1079,7 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) {
SSyncPeer *pPeer; SSyncPeer *pPeer;
for (i = 0; i < pNode->replica; ++i) { for (i = 0; i < pNode->replica; ++i) {
pPeer = pNode->peerInfo[i]; pPeer = pNode->peerInfo[i];
if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port)) if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port)) break;
break;
} }
pPeer = (i < pNode->replica) ? pNode->peerInfo[i] : NULL; pPeer = (i < pNode->replica) ? pNode->peerInfo[i] : NULL;
@ -1089,8 +1104,6 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) {
} }
pthread_mutex_unlock(&(pNode->mutex)); pthread_mutex_unlock(&(pNode->mutex));
return;
} }
static void syncProcessBrokenLink(void *param) { static void syncProcessBrokenLink(void *param) {
@ -1119,10 +1132,12 @@ static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) {
if (pSyncFwds->fwds >= tsMaxFwdInfo) { if (pSyncFwds->fwds >= tsMaxFwdInfo) {
pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo; pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo;
pSyncFwds->fwds--; pSyncFwds->fwds--;
} }
if (pSyncFwds->fwds > 0) {
pSyncFwds->last = (pSyncFwds->last + 1) % tsMaxFwdInfo;
}
if (pSyncFwds->fwds > 0)
pSyncFwds->last = (pSyncFwds->last+1) % tsMaxFwdInfo;
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last; SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last;
pFwdInfo->version = version; pFwdInfo->version = version;
pFwdInfo->mhandle = mhandle; pFwdInfo->mhandle = mhandle;
@ -1138,14 +1153,14 @@ static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode) {
SSyncFwds *pSyncFwds = pNode->pSyncFwds; SSyncFwds *pSyncFwds = pNode->pSyncFwds;
int fwds = pSyncFwds->fwds; int fwds = pSyncFwds->fwds;
for (int i=0; i<fwds; ++i) { for (int i = 0; i < fwds; ++i) {
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->first; SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->first;
if (pFwdInfo->confirmed == 0) break; if (pFwdInfo->confirmed == 0) break;
pSyncFwds->first = (pSyncFwds->first+1) % tsMaxFwdInfo; pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo;
pSyncFwds->fwds--; pSyncFwds->fwds--;
if (pSyncFwds->fwds == 0) pSyncFwds->first = pSyncFwds->last; if (pSyncFwds->fwds == 0) pSyncFwds->first = pSyncFwds->last;
//sDebug("vgId:%d, fwd info is removed, ver:%d, fwds:%d", // sDebug("vgId:%d, fwd info is removed, ver:%d, fwds:%d",
// pNode->vgId, pFwdInfo->version, pSyncFwds->fwds); // pNode->vgId, pFwdInfo->version, pSyncFwds->fwds);
memset(pFwdInfo, 0, sizeof(SFwdInfo)); memset(pFwdInfo, 0, sizeof(SFwdInfo));
} }
@ -1157,12 +1172,14 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code
if (code == 0) { if (code == 0) {
pFwdInfo->acks++; pFwdInfo->acks++;
if (pFwdInfo->acks >= pNode->quorum-1) if (pFwdInfo->acks >= pNode->quorum - 1) {
confirm = 1; confirm = 1;
}
} else { } else {
pFwdInfo->nacks++; pFwdInfo->nacks++;
if (pFwdInfo->nacks > pNode->replica-pNode->quorum) if (pFwdInfo->nacks > pNode->replica - pNode->quorum) {
confirm = 1; confirm = 1;
}
} }
if (confirm && pFwdInfo->confirmed == 0) { if (confirm && pFwdInfo->confirmed == 0) {
@ -1179,15 +1196,15 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
if (pSyncFwds->fwds > 0) { if (pSyncFwds->fwds > 0) {
pthread_mutex_lock(&(pNode->mutex)); pthread_mutex_lock(&(pNode->mutex));
for (int i=0; i<pSyncFwds->fwds; ++i) { for (int i = 0; i < pSyncFwds->fwds; ++i) {
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first+i) % tsMaxFwdInfo; SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo;
if (time - pFwdInfo->time < 2000) break; if (time - pFwdInfo->time < 2000) break;
syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL); syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL);
} }
syncRemoveConfirmedFwdInfo(pNode); syncRemoveConfirmedFwdInfo(pNode);
pthread_mutex_unlock(&(pNode->mutex)); pthread_mutex_unlock(&(pNode->mutex));
} }
pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, 300, pNode, syncTmrCtrl); pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, 300, pNode, syncTmrCtrl);
} }

View File

@ -26,11 +26,13 @@
const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"};
static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type);
static void tsdbDestroyFile(SFile *pFile); static void tsdbDestroyFile(SFile *pFile);
static int compFGroup(const void *arg1, const void *arg2); static int compFGroup(const void *arg1, const void *arg2);
static int keyFGroupCompFunc(const void *key, const void *fgroup); static int keyFGroupCompFunc(const void *key, const void *fgroup);
static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo);
static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep);
static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days);
// ---------------- INTERNAL FUNCTIONS ---------------- // ---------------- INTERNAL FUNCTIONS ----------------
STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) { STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) {
@ -79,9 +81,11 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
int vid = 0; int vid = 0;
regex_t regex1, regex2; regex_t regex1, regex2;
int code = 0; int code = 0;
char fname[TSDB_FILENAME_LEN] = "\0";
SFileGroup fileGroup = {0}; SFileGroup fileGroup = {0};
STsdbFileH *pFileH = pRepo->tsdbFileH; STsdbFileH *pFileH = pRepo->tsdbFileH;
STsdbCfg * pCfg = &(pRepo->config);
tDataDir = tsdbGetDataDirName(pRepo->rootDir); tDataDir = tsdbGetDataDirName(pRepo->rootDir);
if (tDataDir == NULL) { if (tDataDir == NULL) {
@ -108,6 +112,8 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
goto _err; goto _err;
} }
int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile);
struct dirent *dp = NULL; struct dirent *dp = NULL;
while ((dp = readdir(dir)) != NULL) { while ((dp = readdir(dir)) != NULL) {
if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue;
@ -120,6 +126,14 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
continue; continue;
} }
if (fid < mfid) {
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
tsdbGetDataFileName(pRepo->rootDir, pCfg->tsdbId, fid, type, fname);
(void)remove(fname);
}
continue;
}
if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue; if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue;
memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); memset((void *)(&fileGroup), 0, sizeof(SFileGroup));
fileGroup.fileId = fid; fileGroup.fileId = fid;
@ -128,12 +142,30 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
} else if (code == REG_NOMATCH) { } else if (code == REG_NOMATCH) {
code = regexec(&regex2, dp->d_name, 0, NULL, 0); code = regexec(&regex2, dp->d_name, 0, NULL, 0);
if (code == 0) { if (code == 0) {
tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name); size_t tsize = strlen(tDataDir) + strlen(dp->d_name) + 2;
char *fname = malloc(strlen(tDataDir) + strlen(dp->d_name) + 2); char * fname1 = malloc(tsize);
if (fname == NULL) goto _err; if (fname1 == NULL) {
sprintf(fname, "%s/%s", tDataDir, dp->d_name); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
(void)remove(fname); goto _err;
free(fname); }
sprintf(fname1, "%s/%s", tDataDir, dp->d_name);
tsize = tsize + 64;
char *fname2 = malloc(tsize);
if (fname2 == NULL) {
free(fname1);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
sprintf(fname2, "%s/%s_back_%" PRId64, tDataDir, dp->d_name, taosGetTimestamp(TSDB_TIME_PRECISION_MILLI));
(void)rename(fname1, fname2);
tsdbDebug("vgId:%d file %s exists, backup it as %s", REPO_ID(pRepo), fname1, fname2);
free(fname1);
free(fname2);
continue;
} else if (code == REG_NOMATCH) { } else if (code == REG_NOMATCH) {
tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name);
continue; continue;
@ -146,6 +178,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
pFileH->pFGroup[pFileH->nFGroups++] = fileGroup; pFileH->pFGroup[pFileH->nFGroups++] = fileGroup;
qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup);
tsdbDebug("vgId:%d file group %d is restored, nFGroups %d", REPO_ID(pRepo), fileGroup.fileId, pFileH->nFGroups);
} }
regfree(&regex1); regfree(&regex1);
@ -179,8 +212,18 @@ void tsdbCloseFileH(STsdbRepo *pRepo) {
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) { SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
STsdbFileH *pFileH = pRepo->tsdbFileH; STsdbFileH *pFileH = pRepo->tsdbFileH;
STsdbCfg * pCfg = &(pRepo->config);
if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL; if (pFileH->nFGroups >= pFileH->maxFGroups) {
int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile);
if (pFileH->pFGroup[0].fileId < mfid) {
pthread_rwlock_wrlock(&pFileH->fhlock);
tsdbRemoveFileGroup(pRepo, &(pFileH->pFGroup[0]));
pthread_rwlock_unlock(&pFileH->fhlock);
}
}
ASSERT(pFileH->nFGroups < pFileH->maxFGroups);
SFileGroup fGroup; SFileGroup fGroup;
SFileGroup *pFGroup = &fGroup; SFileGroup *pFGroup = &fGroup;
@ -342,8 +385,7 @@ void tsdbFitRetention(STsdbRepo *pRepo) {
STsdbFileH *pFileH = pRepo->tsdbFileH; STsdbFileH *pFileH = pRepo->tsdbFileH;
SFileGroup *pGroup = pFileH->pFGroup; SFileGroup *pGroup = pFileH->pFGroup;
int mfid = (int)(TSDB_KEY_FILEID(taosGetTimestamp(pCfg->precision), pCfg->daysPerFile, pCfg->precision) - int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile);
TSDB_MAX_FILE(pCfg->keep, pCfg->daysPerFile));
pthread_rwlock_wrlock(&(pFileH->fhlock)); pthread_rwlock_wrlock(&(pFileH->fhlock));
@ -547,3 +589,11 @@ static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) {
} }
} }
} }
static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep) {
return (TSKEY)(taosGetTimestamp(precision) - keep * tsMsPerDay[precision]);
}
static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) {
return (int)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision));
}

View File

@ -767,7 +767,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
int32_t lsize = tsize; int32_t lsize = tsize;
int32_t keyLen = 0; int32_t keyLen = 0;
for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) { for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) {
if (tcol >= nColsNotAllNull) break; if (ncol != 0 && tcol >= nColsNotAllNull) break;
SDataCol *pDataCol = pDataCols->cols + ncol; SDataCol *pDataCol = pDataCols->cols + ncol;
SCompCol *pCompCol = pCompData->cols + tcol; SCompCol *pCompCol = pCompData->cols + tcol;

View File

@ -270,7 +270,7 @@ void taosReadGlobalLogCfg() {
} }
wordfree(&full_path); wordfree(&full_path);
taosReadLogOption("tsLogDir", tsLogDir); taosReadLogOption("logDir", tsLogDir);
sprintf(fileName, "%s/taos.cfg", configDir); sprintf(fileName, "%s/taos.cfg", configDir);
fp = fopen(fileName, "r"); fp = fopen(fileName, "r");
@ -288,9 +288,9 @@ void taosReadGlobalLogCfg() {
option = value = NULL; option = value = NULL;
olen = vlen = 0; olen = vlen = 0;
taosGetline(&line, &len, fp); taosGetline(&line, &len, fp);
line[len - 1] = 0; line[len - 1] = 0;
paGetToken(line, &option, &olen); paGetToken(line, &option, &olen);
if (olen == 0) continue; if (olen == 0) continue;
option[olen] = 0; option[olen] = 0;

View File

@ -62,6 +62,7 @@ typedef struct {
pthread_mutex_t logMutex; pthread_mutex_t logMutex;
} SLogObj; } SLogObj;
int32_t tsLogKeepDays = 0;
int32_t tsAsyncLog = 1; int32_t tsAsyncLog = 1;
float tsTotalLogDirGB = 0; float tsTotalLogDirGB = 0;
float tsAvailLogDirGB = 0; float tsAvailLogDirGB = 0;
@ -78,6 +79,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen
static SLogBuff *taosLogBuffNew(int32_t bufSize); static SLogBuff *taosLogBuffNew(int32_t bufSize);
static void taosCloseLogByFd(int32_t oldFd); static void taosCloseLogByFd(int32_t oldFd);
static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum); static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum);
extern void taosPrintGlobalCfg();
static int32_t taosStartLog() { static int32_t taosStartLog() {
pthread_attr_t threadAttr; pthread_attr_t threadAttr;
@ -136,11 +138,24 @@ static void taosUnLockFile(int32_t fd) {
} }
} }
static void taosKeepOldLog(char *oldName) {
if (tsLogKeepDays <= 0) return;
int64_t fileSec = taosGetTimestampSec();
char fileName[LOG_FILE_NAME_LEN + 20];
snprintf(fileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64, tsLogObj.logName, fileSec);
taosRename(oldName, fileName);
taosRemoveOldLogFiles(tsLogDir, tsLogKeepDays);
}
static void *taosThreadToOpenNewFile(void *param) { static void *taosThreadToOpenNewFile(void *param) {
char name[LOG_FILE_NAME_LEN + 20]; char keepName[LOG_FILE_NAME_LEN + 20];
sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag);
tsLogObj.flag ^= 1; tsLogObj.flag ^= 1;
tsLogObj.lines = 0; tsLogObj.lines = 0;
char name[LOG_FILE_NAME_LEN + 20];
sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag); sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag);
umask(0); umask(0);
@ -150,6 +165,7 @@ static void *taosThreadToOpenNewFile(void *param) {
uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno)); uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno));
return NULL; return NULL;
} }
taosLockFile(fd); taosLockFile(fd);
(void)lseek(fd, 0, SEEK_SET); (void)lseek(fd, 0, SEEK_SET);
@ -157,9 +173,13 @@ static void *taosThreadToOpenNewFile(void *param) {
tsLogObj.logHandle->fd = fd; tsLogObj.logHandle->fd = fd;
tsLogObj.lines = 0; tsLogObj.lines = 0;
tsLogObj.openInProgress = 0; tsLogObj.openInProgress = 0;
uInfo("new log file is opened!!!");
taosCloseLogByFd(oldFd); taosCloseLogByFd(oldFd);
uInfo(" new log file:%d is opened", tsLogObj.flag);
uInfo("==================================");
taosPrintGlobalCfg();
taosKeepOldLog(keepName);
return NULL; return NULL;
} }
@ -264,20 +284,23 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
strcat(name, ".0"); strcat(name, ".0");
} }
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
strcpy(name, fn);
strcat(name, ".1");
}
bool log0Exist = stat(name, &logstat0) >= 0;
bool log1Exist = stat(name, &logstat1) >= 0;
// if none of the log files exist, open 0, if both exists, open the old one // if none of the log files exist, open 0, if both exists, open the old one
if (stat(name, &logstat0) < 0) { if (!log0Exist && !log1Exist) {
tsLogObj.flag = 0; tsLogObj.flag = 0;
} else if (!log1Exist) {
tsLogObj.flag = 0;
} else if (!log0Exist) {
tsLogObj.flag = 1;
} else { } else {
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1;
strcpy(name, fn);
strcat(name, ".1");
}
if (stat(name, &logstat1) < 0) {
tsLogObj.flag = 1;
} else {
tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1;
}
} }
char fileName[LOG_FILE_NAME_LEN + 50] = "\0"; char fileName[LOG_FILE_NAME_LEN + 50] = "\0";

View File

@ -57,6 +57,9 @@ void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {}
#endif #endif
int32_t vnodeInitResources() { int32_t vnodeInitResources() {
int code = syncInit();
if (code != 0) return code;
vnodeInitWriteFp(); vnodeInitWriteFp();
vnodeInitReadFp(); vnodeInitReadFp();
@ -70,11 +73,12 @@ int32_t vnodeInitResources() {
} }
void vnodeCleanupResources() { void vnodeCleanupResources() {
if (tsDnodeVnodesHash != NULL) { if (tsDnodeVnodesHash != NULL) {
taosHashCleanup(tsDnodeVnodesHash); taosHashCleanup(tsDnodeVnodesHash);
tsDnodeVnodesHash = NULL; tsDnodeVnodesHash = NULL;
} }
syncCleanUp();
} }
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
@ -382,7 +386,13 @@ void vnodeRelease(void *pVnodeRaw) {
char newDir[TSDB_FILENAME_LEN] = {0}; char newDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId); sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId); sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId);
taosRename(rootDir, newDir);
if (0 == tsEnableVnodeBak) {
vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId);
} else {
taosRename(rootDir, newDir);
}
taosRemoveDir(rootDir); taosRemoveDir(rootDir);
dnodeSendStatusMsgToMnode(); dnodeSendStatusMsgToMnode();
} }
@ -671,9 +681,13 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum); len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
vInfo("vgId:%d, save vnode cfg, replica:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.replications);
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId); len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId);
len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp); len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp);
vInfo("vgId:%d, save vnode cfg, nodeId:%d nodeEp:%s", pVnodeCfg->cfg.vgId, pVnodeCfg->nodes[i].nodeId,
pVnodeCfg->nodes[i].nodeEp);
if (i < pVnodeCfg->cfg.replications - 1) { if (i < pVnodeCfg->cfg.replications - 1) {
len += snprintf(content + len, maxLen - len, " },{\n"); len += snprintf(content + len, maxLen - len, " },{\n");

View File

@ -93,11 +93,12 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void** handle,
vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle);
} }
} else { } else {
SRetrieveTableRsp* pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRsp, 0, sizeof(SRetrieveTableRsp)); memset(pRsp, 0, sizeof(SRetrieveTableRsp));
pRsp->completed = true; pRsp->completed = true;
pRet->rsp = pRsp; pRet->rsp = pRsp;
pRet->len = sizeof(SRetrieveTableRsp);
*freeHandle = true; *freeHandle = true;
} }
@ -270,6 +271,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
//TODO handle malloc failure //TODO handle malloc failure
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
freeHandle = true; freeHandle = true;
} else { // result is not ready, return immediately } else { // result is not ready, return immediately

View File

@ -1,85 +1,91 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>SpringJdbcTemplate</artifactId> <artifactId>SpringJdbcTemplate</artifactId>
<version>1.0-SNAPSHOT</version> <version>1.0-SNAPSHOT</version>
<name>SpringJdbcTemplate</name> <name>SpringJdbcTemplate</name>
<url>http://www.taosdata.com</url> <url>http://www.taosdata.com</url>
<properties> <properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source> <maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target> <maven.compiler.target>1.8</maven.compiler.target>
</properties> </properties>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.springframework</groupId> <groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId> <artifactId>spring-context</artifactId>
<version>4.3.2.RELEASE</version> <version>5.2.8.RELEASE</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.springframework</groupId> <groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId> <artifactId>spring-jdbc</artifactId>
<version>4.3.2.RELEASE</version> <version>5.1.9.RELEASE</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>org.springframework</groupId>
<artifactId>junit</artifactId> <artifactId>spring-test</artifactId>
<version>4.11</version> <version>5.1.9.RELEASE</version>
<scope>test</scope> </dependency>
</dependency>
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>junit</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>junit</artifactId>
<version>2.0.2</version> <version>4.13</version>
</dependency> <scope>test</scope>
</dependency>
</dependencies> <dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
</dependency>
<build> </dependencies>
<plugins>
<plugin> <build>
<artifactId>maven-compiler-plugin</artifactId> <plugins>
<version>3.8.0</version> <plugin>
<configuration> <artifactId>maven-compiler-plugin</artifactId>
<source>1.8</source> <version>3.8.0</version>
<target>1.8</target> <configuration>
</configuration> <source>1.8</source>
</plugin> <target>1.8</target>
<plugin> </configuration>
<groupId>org.apache.maven.plugins</groupId> </plugin>
<artifactId>maven-assembly-plugin</artifactId> <plugin>
<version>3.1.0</version> <groupId>org.apache.maven.plugins</groupId>
<configuration> <artifactId>maven-assembly-plugin</artifactId>
<archive> <version>3.1.0</version>
<manifest> <configuration>
<mainClass>com.taosdata.jdbc.App</mainClass> <archive>
</manifest> <manifest>
</archive> <mainClass>com.taosdata.jdbc.example.jdbcTemplate.App</mainClass>
<descriptorRefs> </manifest>
<descriptorRef>jar-with-dependencies</descriptorRef> </archive>
</descriptorRefs> <descriptorRefs>
</configuration> <descriptorRef>jar-with-dependencies</descriptorRef>
<executions> </descriptorRefs>
<execution> </configuration>
<id>make-assembly</id> <executions>
<phase>package</phase> <execution>
<goals> <id>make-assembly</id>
<goal>single</goal> <phase>package</phase>
</goals> <goals>
</execution> <goal>single</goal>
</executions> </goals>
</plugin> </execution>
</plugins> </executions>
</build> </plugin>
</plugins>
</build>
</project> </project>

View File

@ -1,44 +0,0 @@
package com.taosdata.jdbc;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.util.CollectionUtils;
import java.util.List;
import java.util.Map;
public class App {
public static void main( String[] args ) {
ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml");
JdbcTemplate jdbcTemplate = (JdbcTemplate) ctx.getBean("jdbcTemplate");
// create database
jdbcTemplate.execute("create database if not exists db ");
// create table
jdbcTemplate.execute("create table if not exists db.tb (ts timestamp, temperature int, humidity float)");
String insertSql = "insert into db.tb values(now, 23, 10.3) (now + 1s, 20, 9.3)";
// insert rows
int affectedRows = jdbcTemplate.update(insertSql);
System.out.println("insert success " + affectedRows + " rows.");
// query for list
List<Map<String, Object>> resultList = jdbcTemplate.queryForList("select * from db.tb");
if(!CollectionUtils.isEmpty(resultList)){
for (Map<String, Object> row : resultList){
System.out.printf("%s, %d, %s\n", row.get("ts"), row.get("temperature"), row.get("humidity"));
}
}
}
}

View File

@ -0,0 +1,48 @@
package com.taosdata.jdbc.example.jdbcTemplate;
import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement;
import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao;
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import java.sql.Timestamp;
import java.util.Date;
import java.util.List;
import java.util.Random;
public class App {
private static Random random = new Random(System.currentTimeMillis());
public static void main(String[] args) {
ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml");
ExecuteAsStatement executor = ctx.getBean(ExecuteAsStatement.class);
// drop database
executor.doExecute("drop database if exists test");
// create database
executor.doExecute("create database if not exists test");
//use database
executor.doExecute("use test");
// create table
executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)");
WeatherDao weatherDao = ctx.getBean(WeatherDao.class);
Weather weather = new Weather(new Timestamp(new Date().getTime()), random.nextFloat() * 50.0f, random.nextInt(100));
// insert rows
int affectedRows = weatherDao.add(weather);
System.out.println("insert success " + affectedRows + " rows.");
// query for list
int limit = 10, offset = 0;
List<Weather> weatherList = weatherDao.queryForList(limit, offset);
for (Weather w : weatherList) {
System.out.println(w);
}
}
}

View File

@ -0,0 +1,6 @@
package com.taosdata.jdbc.example.jdbcTemplate.dao;
public interface ExecuteAsStatement{
void doExecute(String sql);
}

View File

@ -0,0 +1,17 @@
package com.taosdata.jdbc.example.jdbcTemplate.dao;
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
import java.util.List;
public interface WeatherDao {
int add(Weather weather);
int[] batchInsert(List<Weather> weatherList);
List<Weather> queryForList(int limit, int offset);
int count();
}

View File

@ -0,0 +1,19 @@
package com.taosdata.jdbc.example.jdbcTemplate.dao.impl;
import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Repository;
@Repository
public class ExecuteAsStatementImpl implements ExecuteAsStatement {
@Autowired
private JdbcTemplate jdbcTemplate;
@Override
public void doExecute(String sql) {
jdbcTemplate.execute(sql);
}
}

View File

@ -0,0 +1,64 @@
package com.taosdata.jdbc.example.jdbcTemplate.dao.impl;
import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao;
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.namedparam.SqlParameterSourceUtils;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import org.springframework.stereotype.Repository;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Repository
public class WeatherDaoImpl implements WeatherDao {
@Autowired
private JdbcTemplate jdbcTemplate;
@Override
public int add(Weather weather) {
return jdbcTemplate.update(
"insert into test.weather(ts, temperature, humidity) VALUES(?,?,?)",
weather.getTs(), weather.getTemperature(), weather.getHumidity()
);
}
@Override
public int[] batchInsert(List<Weather> weatherList) {
return jdbcTemplate.batchUpdate("insert into test.weather(ts, temperature, humidity) values( ?, ?, ?)", new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setTimestamp(1, weatherList.get(i).getTs());
ps.setFloat(2, weatherList.get(i).getTemperature());
ps.setInt(3, weatherList.get(i).getHumidity());
}
@Override
public int getBatchSize() {
return weatherList.size();
}
});
}
@Override
public List<Weather> queryForList(int limit, int offset) {
return jdbcTemplate.query("select * from test.weather limit ? offset ?", (rs, rowNum) -> {
Timestamp ts = rs.getTimestamp("ts");
float temperature = rs.getFloat("temperature");
int humidity = rs.getInt("humidity");
return new Weather(ts, temperature, humidity);
}, limit, offset);
}
@Override
public int count() {
return jdbcTemplate.queryForObject("select count(*) from test.weather", Integer.class);
}
}

View File

@ -0,0 +1,54 @@
package com.taosdata.jdbc.example.jdbcTemplate.domain;
import java.sql.Timestamp;
public class Weather {
private Timestamp ts;
private float temperature;
private int humidity;
public Weather() {
}
public Weather(Timestamp ts, float temperature, int humidity) {
this.ts = ts;
this.temperature = temperature;
this.humidity = humidity;
}
@Override
public String toString() {
return "Weather{" +
"ts=" + ts +
", temperature=" + temperature +
", humidity=" + humidity +
'}';
}
public Timestamp getTs() {
return ts;
}
public void setTs(Timestamp ts) {
this.ts = ts;
}
public float getTemperature() {
return temperature;
}
public void setTemperature(float temperature) {
this.temperature = temperature;
}
public int getHumidity() {
return humidity;
}
public void setHumidity(int humidity) {
this.humidity = humidity;
}
}

View File

@ -5,20 +5,21 @@
xsi:schemaLocation=" xsi:schemaLocation="
http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd"
"
default-autowire="byName"> default-autowire="byName">
<bean id="dataSource" class="org.springframework.jdbc.datasource.DriverManagerDataSource"> <bean id="dataSource" class="org.springframework.jdbc.datasource.DriverManagerDataSource">
<property name="driverClassName" value="com.taosdata.jdbc.TSDBDriver"></property> <property name="driverClassName" value="com.taosdata.jdbc.TSDBDriver"></property>
<property name="url" value="jdbc:TAOS://127.0.0.1:6030/log"></property> <property name="url" value="jdbc:TAOS://192.168.236.137:6030/"></property>
<property name="username" value="root"></property> <property name="username" value="root"></property>
<property name="password" value="taosdata"></property> <property name="password" value="taosdata"></property>
</bean> </bean>
<bean id = "jdbcTemplate" class="org.springframework.jdbc.core.JdbcTemplate" > <bean id="jdbcTemplate" class="org.springframework.jdbc.core.JdbcTemplate">
<property name="dataSource" ref = "dataSource" ></property> <property name="dataSource" ref="dataSource"></property>
</bean> </bean>
<context:component-scan base-package="com.taosdata.jdbc.example.jdbcTemplate"/>
</beans> </beans>

View File

@ -7,14 +7,12 @@ import org.junit.Test;
/** /**
* Unit test for simple App. * Unit test for simple App.
*/ */
public class AppTest public class AppTest {
{
/** /**
* Rigorous Test :-) * Rigorous Test :-)
*/ */
@Test @Test
public void shouldAnswerWithTrue() public void shouldAnswerWithTrue() {
{ assertTrue(true);
assertTrue( true );
} }
} }

View File

@ -0,0 +1,64 @@
package com.taosdata.jdbc.example.jdbcTemplate;
import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement;
import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao;
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import static org.junit.Assert.assertEquals;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration({"classpath:applicationContext.xml"})
public class BatcherInsertTest {
@Autowired
private WeatherDao weatherDao;
@Autowired
private ExecuteAsStatement executor;
private static final int numOfRecordsPerTable = 1000;
private static long ts = 1496732686000l;
private static Random random = new Random(System.currentTimeMillis());
@Before
public void before() {
// drop database
executor.doExecute("drop database if exists test");
// create database
executor.doExecute("create database if not exists test");
//use database
executor.doExecute("use test");
// create table
executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)");
}
@Test
public void batchInsert() {
List<Weather> weatherList = new ArrayList<>();
for (int i = 0; i < numOfRecordsPerTable; i++) {
ts += 1000;
Weather weather = new Weather(new Timestamp(ts), random.nextFloat() * 50.0f, random.nextInt(100));
weatherList.add(weather);
}
long start = System.currentTimeMillis();
weatherDao.batchInsert(weatherList);
long end = System.currentTimeMillis();
System.out.println("batch insert(" + numOfRecordsPerTable + " rows) time cost ==========> " + (end - start) + " ms");
int count = weatherDao.count();
assertEquals(count, numOfRecordsPerTable);
}
}

View File

@ -63,7 +63,7 @@
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.2</version> <version>2.0.4</version>
</dependency> </dependency>
<dependency> <dependency>
@ -76,6 +76,24 @@
</dependencies> </dependencies>
<build> <build>
<resources>
<resource>
<directory>src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
</includes>
<filtering>true</filtering>
</resource>
<resource>
<directory>src/main/java</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
</includes>
</resource>
</resources>
<plugins> <plugins>
<plugin> <plugin>
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>

View File

@ -0,0 +1,28 @@
package com.taosdata.jdbc.springbootdemo.controller;
import com.taosdata.jdbc.springbootdemo.domain.Rainfall;
import com.taosdata.jdbc.springbootdemo.service.RainStationService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
@RestController
@RequestMapping("/rainstation")
public class RainStationController {
@Autowired
private RainStationService service;
@GetMapping("/init")
public boolean init() {
service.init();
service.createTable();
return true;
}
@PostMapping("/insert")
public int insert(@RequestBody Rainfall rainfall){
return service.insert(rainfall);
}
}

View File

@ -16,43 +16,47 @@ public class WeatherController {
/** /**
* create database and table * create database and table
*
* @return * @return
*/ */
@GetMapping("/init") @GetMapping("/init")
public boolean init(){ public boolean init() {
return weatherService.init(); return weatherService.init();
} }
/** /**
* Pagination Query * Pagination Query
*
* @param limit * @param limit
* @param offset * @param offset
* @return * @return
*/ */
@GetMapping("/{limit}/{offset}") @GetMapping("/{limit}/{offset}")
public List<Weather> queryWeather(@PathVariable Long limit, @PathVariable Long offset){ public List<Weather> queryWeather(@PathVariable Long limit, @PathVariable Long offset) {
return weatherService.query(limit, offset); return weatherService.query(limit, offset);
} }
/** /**
* upload single weather info * upload single weather info
*
* @param temperature * @param temperature
* @param humidity * @param humidity
* @return * @return
*/ */
@PostMapping("/{temperature}/{humidity}") @PostMapping("/{temperature}/{humidity}")
public int saveWeather(@PathVariable int temperature, @PathVariable float humidity){ public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) {
return weatherService.save(temperature, humidity); return weatherService.save(temperature, humidity);
} }
/** /**
* upload multi weather info * upload multi weather info
*
* @param weatherList * @param weatherList
* @return * @return
*/ */
@PostMapping("/batch") @PostMapping("/batch")
public int batchSaveWeather(@RequestBody List<Weather> weatherList){ public int batchSaveWeather(@RequestBody List<Weather> weatherList) {
return weatherService.save(weatherList); return weatherService.save(weatherList);
} }

View File

@ -0,0 +1,15 @@
package com.taosdata.jdbc.springbootdemo.dao;
import java.util.Map;
public interface DatabaseMapper {
int createDatabase(String dbname);
int dropDatabase(String dbname);
int creatDatabaseWithParameters(Map<String,String> map);
int useDatabase(String dbname);
}

View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper">
<update id="createDatabase" parameterType="java.lang.String">
create database if not exists ${dbname}
</update>
<update id="dropDatabase" parameterType="java.lang.String">
DROP database if exists ${dbname}
</update>
<update id="creatDatabaseWithParameters" parameterType="map">
CREATE database if not EXISTS ${dbname}
<if test="keep != null">
KEEP ${keep}
</if>
<if test="days != null">
DAYS ${days}
</if>
<if test="replica != null">
REPLICA ${replica}
</if>
<if test="cache != null">
cache ${cache}
</if>
<if test="blocks != null">
blocks ${blocks}
</if>
<if test="minrows != null">
minrows ${minrows}
</if>
<if test="maxrows != null">
maxrows ${maxrows}
</if>
</update>
<update id="useDatabase" parameterType="java.lang.String">
use ${dbname}
</update>
</mapper>

View File

@ -0,0 +1,9 @@
package com.taosdata.jdbc.springbootdemo.dao;
import java.util.Map;
public interface RainfallMapper {
int save(Map<String, Object> map);
}

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.RainfallMapper">
<insert id="save" parameterType="map">
INSERT INTO ${table} using ${dbname}.${stable} tags(#{values.station_code}, #{values.station_name}) (ts, name, code, rainfall) values (#{values.ts}, #{values.name}, #{values.code}, #{values.rainfall})
</insert>
</mapper>

View File

@ -0,0 +1,8 @@
package com.taosdata.jdbc.springbootdemo.dao;
import com.taosdata.jdbc.springbootdemo.domain.TableMetadata;
public interface TableMapper {
boolean createSTable(TableMetadata tableMetadata);
}

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.TableMapper">
<update id="createSTable" parameterType="com.taosdata.jdbc.springbootdemo.domain.TableMetadata">
create table if not exists ${dbname}.${tablename}
<foreach collection="fields" item="field" index="index" open="(" close=")" separator=",">
${field.name} ${field.type}
</foreach>
TAGS
<foreach collection="tags" item="tag" index="index" open="(" close=")" separator=",">
${tag.name} ${tag.type}
</foreach>
</update>
<update id="dropTable" parameterType="java.lang.String">
drop ${tablename}
</update>
</mapper>

View File

@ -0,0 +1,28 @@
package com.taosdata.jdbc.springbootdemo.domain;
public class FieldMetadata {
private String name;
private String type;
public FieldMetadata(String name, String type) {
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}

View File

@ -0,0 +1,64 @@
package com.taosdata.jdbc.springbootdemo.domain;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.sql.Timestamp;
public class Rainfall {
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
private Timestamp ts;
private String name;
private String code;
private float rainfall;
private String station_code;
private String station_name;
public Timestamp getTs() {
return ts;
}
public void setTs(Timestamp ts) {
this.ts = ts;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public float getRainfall() {
return rainfall;
}
public void setRainfall(float rainfall) {
this.rainfall = rainfall;
}
public String getStation_code() {
return station_code;
}
public void setStation_code(String station_code) {
this.station_code = station_code;
}
public String getStation_name() {
return station_name;
}
public void setStation_name(String station_name) {
this.station_name = station_name;
}
}

View File

@ -0,0 +1,43 @@
package com.taosdata.jdbc.springbootdemo.domain;
import java.util.List;
public class TableMetadata {
private String dbname;
private String tablename;
private List<FieldMetadata> fields;
private List<TagMetadata> tags;
public String getDbname() {
return dbname;
}
public void setDbname(String dbname) {
this.dbname = dbname;
}
public String getTablename() {
return tablename;
}
public void setTablename(String tablename) {
this.tablename = tablename;
}
public List<FieldMetadata> getFields() {
return fields;
}
public void setFields(List<FieldMetadata> fields) {
this.fields = fields;
}
public List<TagMetadata> getTags() {
return tags;
}
public void setTags(List<TagMetadata> tags) {
this.tags = tags;
}
}

View File

@ -0,0 +1,27 @@
package com.taosdata.jdbc.springbootdemo.domain;
public class TagMetadata {
private String name;
private String type;
public TagMetadata(String name, String type) {
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}

View File

@ -1,9 +1,12 @@
package com.taosdata.jdbc.springbootdemo.domain; package com.taosdata.jdbc.springbootdemo.domain;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.sql.Timestamp; import java.sql.Timestamp;
public class Weather { public class Weather {
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
private Timestamp ts; private Timestamp ts;
private int temperature; private int temperature;

View File

@ -0,0 +1,72 @@
package com.taosdata.jdbc.springbootdemo.service;
import com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper;
import com.taosdata.jdbc.springbootdemo.dao.RainfallMapper;
import com.taosdata.jdbc.springbootdemo.dao.TableMapper;
import com.taosdata.jdbc.springbootdemo.domain.FieldMetadata;
import com.taosdata.jdbc.springbootdemo.domain.Rainfall;
import com.taosdata.jdbc.springbootdemo.domain.TableMetadata;
import com.taosdata.jdbc.springbootdemo.domain.TagMetadata;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Service
public class RainStationService {
@Autowired
private DatabaseMapper databaseMapper;
@Autowired
private TableMapper tableMapper;
@Autowired
private RainfallMapper rainfallMapper;
public boolean init() {
databaseMapper.dropDatabase("rainstation");
Map<String, String> map = new HashMap<>();
map.put("dbname", "rainstation");
map.put("keep", "36500");
map.put("days", "30");
map.put("blocks", "4");
databaseMapper.creatDatabaseWithParameters(map);
databaseMapper.useDatabase("rainstation");
return true;
}
public boolean createTable() {
TableMetadata tableMetadata = new TableMetadata();
tableMetadata.setDbname("rainstation");
tableMetadata.setTablename("monitoring");
List<FieldMetadata> fields = new ArrayList<>();
fields.add(new FieldMetadata("ts", "timestamp"));
fields.add(new FieldMetadata("name", "NCHAR(10)"));
fields.add(new FieldMetadata("code", " BINARY(8)"));
fields.add(new FieldMetadata("rainfall", "float"));
tableMetadata.setFields(fields);
List<TagMetadata> tags = new ArrayList<>();
tags.add(new TagMetadata("station_code", "BINARY(8)"));
tags.add(new TagMetadata("station_name", "NCHAR(10)"));
tableMetadata.setTags(tags);
tableMapper.createSTable(tableMetadata);
return true;
}
public int insert(Rainfall rainfall) {
Map<String, Object> map = new HashMap<>();
map.put("dbname", "rainstation");
map.put("table", "S_53646");
map.put("stable", "monitoring");
map.put("values", rainfall);
return rainfallMapper.save(map);
}
}

View File

@ -14,10 +14,8 @@ public class WeatherService {
private WeatherMapper weatherMapper; private WeatherMapper weatherMapper;
public boolean init() { public boolean init() {
weatherMapper.createDB(); weatherMapper.createDB();
weatherMapper.createTable(); weatherMapper.createTable();
return true; return true;
} }

View File

@ -1,6 +1,6 @@
# datasource config # datasource config
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log spring.datasource.url=jdbc:TAOS://localhost:6030/log
spring.datasource.username=root spring.datasource.username=root
spring.datasource.password=taosdata spring.datasource.password=taosdata

View File

@ -0,0 +1,49 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute('create table xcxlog (ts timestamp, user_id int, path BINARY(40),scene int) tags(appid bigint, adzone_id int,ip bigint,session_id bigint)')
tdSql.error("insert into d1000004(user_id,path,scene,ts) using xcxlog tags(1000004,145,97160) values (97160,'pagex/goods/taoke',1086,now)")
tdSql.execute("insert into d1000004_145(user_id,path,scene,ts) using xcxlog(appid,adzone_id,session_id,ip) tags(1000004,145,97160,1717171445) values (97160,'pagex/goods/taoke',1086,now)")
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'd1000004_145')
tdSql.query("select * from xcxlog")
tdSql.checkRows(1)
tdSql.checkData(0, 4, 1000004)
tdSql.checkData(0, 5, 145)
tdSql.checkData(0, 6, 1717171445)
tdSql.checkData(0, 7, 97160)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -25,7 +25,7 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql) tdSql.init(conn.cursor(), logSql)
self.numOfRecords = 10 self.types = ["tinyint", "smallint", "int", "bigint", "float", "double", "bool", "binary(10)", "nchar(10)"]
self.ts = 1537146000000 self.ts = 1537146000000
def checkNullValue(self, result): def checkNullValue(self, result):
@ -38,139 +38,41 @@ class TDTestCase:
return False return False
return True return True
def restartTaosd(self):
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
print("==============step1")
tdSql.execute(
"create table meters (ts timestamp, col1 int) tags(tgcol1 int)")
tdSql.execute("create table t0 using meters tags(NULL)")
for i in range (self.numOfRecords):
tdSql.execute("insert into t0 values (%d, %d)" % (self.ts + i, i));
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col2 tinyint")
tdSql.execute("alter table meters drop column col1")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col2 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col1 int")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col1 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col3 smallint")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col3 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col4 bigint")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col4 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col5 float")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col5 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col6 double")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col6 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col7 bool")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col7 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col8 binary(20)")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col8 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add column col9 nchar(20)")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select col9 from meters")
tdSql.checkRows(10)
tdSql.execute("alter table meters add tag tgcol2 tinyint") for i in range(len(self.types)):
tdSql.query("select * from meters") print("======== checking type %s ==========" % self.types[i])
tdSql.checkRows(10) tdSql.execute("create table t0 (ts timestamp, col %s)" % self.types[i])
tdSql.query("select tgcol2 from meters") tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts))
tdSql.checkRows(1)
tdDnodes.stop(1)
tdLog.sleep(10)
tdDnodes.start(1)
tdSql.execute("use db")
tdSql.query("select * from t0")
tdSql.checkRows(1)
tdSql.execute("alter table meters add tag tgcol3 smallint") if self.checkNullValue(tdSql.queryResult) is False:
tdSql.query("select * from meters") tdLog.exit("no None value is detected")
tdSql.checkRows(10)
tdSql.query("select tgcol3 from meters")
tdSql.checkRows(1)
tdSql.execute("alter table meters add tag tgcol4 bigint") tdSql.execute("create table t1 (ts timestamp, col %s)" % self.types[i])
tdSql.query("select * from meters") tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts))
tdSql.checkRows(10) tdDnodes.stop(1)
tdSql.query("select tgcol4 from meters") tdLog.sleep(10)
tdSql.checkRows(1) tdDnodes.start(1)
tdSql.execute("use db")
tdSql.execute("alter table meters add tag tgcol5 float") for j in range(150):
tdSql.query("select * from meters") tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts + j + 1));
tdSql.checkRows(10)
tdSql.query("select tgcol5 from meters") tdSql.query("select * from t1")
tdSql.checkRows(1) tdSql.checkRows(151)
tdSql.execute("alter table meters add tag tgcol6 double") if self.checkNullValue(tdSql.queryResult) is False:
tdSql.query("select * from meters") tdLog.exit("no None value is detected")
tdSql.checkRows(10)
tdSql.query("select tgcol6 from meters")
tdSql.checkRows(1)
tdSql.execute("alter table meters add tag tgcol7 bool") print("======== None value check for type %s is OK ==========" % self.types[i])
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select tgcol7 from meters")
tdSql.checkRows(1)
tdSql.execute("alter table meters add tag tgcol8 binary(20)")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select tgcol8 from meters")
tdSql.checkRows(1)
tdSql.execute("alter table meters add tag tgcol9 nchar(20)")
tdSql.query("select * from meters")
tdSql.checkRows(10)
tdSql.query("select tgcol9 from meters")
tdSql.checkRows(1)
self.restartTaosd()
tdSql.query("select * from meters")
tdSql.checkRows(10)
if self.checkNullValue(tdSql.queryResult) is False:
tdLog.exit("non None value is detected")
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -52,6 +52,12 @@ if $data00 != $rowNum then
return -1 return -1
endi endi
sql select count(1) from $tb
print ===> select count(1) from $tb => $data00
if $data00 != $rowNum then
return -1
endi
sql select count(tbcol) from $tb sql select count(tbcol) from $tb
print ===> $data00 print ===> $data00
if $data00 != $rowNum then if $data00 != $rowNum then
@ -102,13 +108,20 @@ if $data00 != $totalNum then
return -1 return -1
endi endi
print =============== step8
sql select count(1) from $mt
print ===> $data00
if $data00 != $totalNum then
return -1
endi
sql select count(tbcol) from $mt sql select count(tbcol) from $mt
print ===> $data00 print ===> $data00
if $data00 != $totalNum then if $data00 != $totalNum then
return -1 return -1
endi endi
print =============== step8 print =============== step10
sql select count(tbcol) as c from $mt where ts < now + 4m sql select count(tbcol) as c from $mt where ts < now + 4m
print ===> $data00 print ===> $data00
if $data00 != 50 then if $data00 != 50 then
@ -171,4 +184,4 @@ if $rows != 0 then
return -1 return -1
endi endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -7,11 +7,10 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
print ============================ dnode1 start
print ============= create database
sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
sql show databases sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data00 != db then if $data00 != db then
return -1 return -1
endi endi
@ -37,27 +36,240 @@ if $data09 != 4 then
return -1 return -1
endi endi
print =============== step2 print ============== step name
system sh/exec.sh -n dnode1 -s stop -x SIGINT sql_error alter database db name d1
return sql_error alter database db name d2
sql_error alter database db cache 256
sql_error alter database db blocks 1 print ============== step ntables
sql_error alter database db days 10 sql_error alter database db ntables -1
sql_error alter database db keep 10 sql_error alter database db ntables 0
sql_error alter database db minRows 350 sql_error alter database db ntables 1
sql_error alter database db minRows 550 sql_error alter database db ntables 10
sql_error alter database db ctime 5000
sql_error alter database db precision "us" print ============== step vgroups
sql_error alter database db comp 3 sql_error alter database db vgroups -1
sql_error alter database db wal 1 sql_error alter database db vgroups 0
sql_error alter database db vgroups 1
sql_error alter database db vgroups 10
print ============== step replica
sql_error alter database db replica 2 sql_error alter database db replica 2
sql_error alter database db replica 3
sql_error alter database db replica 0
sql alter database db replica 1
sql show databases
print replica $data4_db
if $data4_db != 1 then
return -1
endi
print ============== step quorum
sql show databases
print quorum $data5_db
if $data5_db != 1 then
return -1
endi
sql alter database db quorum 1
sql show databases
print quorum $data5_db
if $data5_db != 1 then
return -1
endi
sql alter database db quorum 2
sql show databases
print quorum $data5_db
if $data5_db != 2 then
return -1
endi
sql alter database db quorum 3
sql show databases
print quorum $data5_db
if $data5_db != 3 then
return -1
endi
sql alter database db quorum 3
sql alter database db quorum 2
sql alter database db quorum 1
sql_error alter database db quorum 0
sql_error alter database db quorum 4
sql_error alter database db quorum 5
sql_error alter database db quorum -1
print ============== step days
sql_error alter database db days 0
sql_error alter database db days 1
sql_error alter database db days 2
sql_error alter database db days 10
sql_error alter database db days 50
sql_error alter database db days 100
print ============== step keep
sql show databases
print keep $data7_db
if $data7_db != 20,20,20 then
return -1
endi
sql alter database db keep 10
sql show databases
print keep $data7_db
if $data7_db != 20,20,10 then
return -1
endi
sql alter database db keep 20
sql show databases
print keep $data7_db
if $data7_db != 20,20,20 then
return -1
endi
print ============== step3
sql alter database db comp 1
sql alter database db blocks 40
sql alter database db keep 30 sql alter database db keep 30
sql show databases
print keep $data7_db
if $data7_db != 20,20,30 then
return -1
endi
sql alter database db keep 40
sql alter database db keep 30
sql alter database db keep 20
sql alter database db keep 10
sql_error alter database db keep 9
sql_error alter database db keep 1
sql alter database db keep 0
sql alter database db keep -1
sql_error alter database db keep 365001
print ============== step cache
sql_error alter database db cache 60
sql_error alter database db cache 50
sql_error alter database db cache 20
sql_error alter database db cache 3
sql_error alter database db cache 129
sql_error alter database db cache 300
sql_error alter database db cache 0
sql_error alter database db cache -1
print ============== step blocks
sql show databases
print blocks $data9_db
if $data9_db != 4 then
return -1
endi
sql alter database db blocks 10
sql show databases
print blocks $data9_db
if $data9_db != 10 then
return -1
endi
sql alter database db blocks 20
sql show databases
print blocks $data9_db
if $data9_db != 20 then
return -1
endi
sql alter database db blocks 30
sql show databases
print blocks $data9_db
if $data9_db != 30 then
return -1
endi
sql alter database db blocks 40
sql alter database db blocks 30
sql alter database db blocks 20
sql alter database db blocks 10
sql_error alter database db blocks 2
sql_error alter database db blocks 1
sql alter database db blocks 0
sql_error alter database db blocks -1
sql_error alter database db blocks 10001
print ============== step minrows
sql_error alter database db minrows 1
sql_error alter database db minrows 100
sql_error alter database db minrows 1000
print ============== step maxrows
sql_error alter database db maxrows 1
sql_error alter database db maxrows 100
sql_error alter database db maxrows 1000
print ============== step wallevel
sql show databases
print wallevel $data12_db
if $data12_db != 1 then
return -1
endi
sql alter database db wal 1
sql show databases
print wal $data12_db
if $data12_db != 1 then
return -1
endi
sql_error alter database db wal 2
sql_error alter database db wal 0
sql_error alter database db wal 3
sql_error alter database db wal 4
sql_error alter database db wal -1
sql_error alter database db wal 1000
print ============== step fsync
sql_error alter database db fsync 2
sql_error alter database db fsync 3
sql_error alter database db fsync 4
sql_error alter database db fsync -1
sql_error alter database db fsync 1000
print ============== step comp
sql show databases
print comp $data14_db
if $data14_db != 2 then
return -1
endi
sql alter database db comp 1
sql show databases
print comp $data14_db
if $data14_db != 1 then
return -1
endi
sql alter database db comp 2
sql show databases
print comp $data14_db
if $data14_db != 2 then
return -1
endi
sql alter database db comp 0
sql show databases
print comp $data14_db
if $data14_db != 0 then
return -1
endi
sql_error alter database db comp 3
sql_error alter database db comp 4
sql_error alter database db comp 5
sql_error alter database db comp -1
print ============== step precision
sql_error alter database db prec 'us'
#system sh/exec.sh -n dnode1 -s stop -x SIGINT print ============== step status
sql_error alter database db status 'delete'
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -253,6 +253,7 @@ cd ../../../debug; make
./test.sh -f unique/cluster/balance2.sim ./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim ./test.sh -f unique/cluster/balance3.sim
./test.sh -f unique/cluster/cache.sim ./test.sh -f unique/cluster/cache.sim
./test.sh -f unique/cluster/vgroup100.sim
./test.sh -f unique/column/replica3.sim ./test.sh -f unique/column/replica3.sim

View File

@ -14,10 +14,10 @@ print $data00 $data01 $data02 $data03 $data04 $data05 $data06
if $data00 != root then if $data00 != root then
return -1 return -1
endi endi
if $data02 != 3/10 then if $data02 != 3/128 then
return -1 return -1
endi endi
if $data03 != 0/64 then if $data03 != 0/128 then
return -1 return -1
endi endi
if $data04 != 0/2147483647 then if $data04 != 0/2147483647 then

View File

@ -53,7 +53,7 @@ system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2 sql create dnode $hostname2
sql create dnode $hostname3 sql create dnode $hostname3
sleep 3000 sleep 5000
$sleepTimer = 3000 $sleepTimer = 3000
@ -225,6 +225,7 @@ if $data00 != $totalRows then
endi endi
print ============== step5: stop dnode2, and remove its vnode print ============== step5: stop dnode2, and remove its vnode
sleep 5000
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer sleep $sleepTimer

View File

@ -193,6 +193,7 @@ if $data00 != $totalRows then
endi endi
print ============== step5: stop dnode1 print ============== step5: stop dnode1
sleep 5000
system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode1 -s stop
sleep 3000 sleep 3000

View File

@ -105,6 +105,15 @@ if $dnode4Vnodes != null then
goto show1 goto show1
endi endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step2 print ============================== step2
print ========= start dnode4 print ========= start dnode4
sql create dnode $hostname4 sql create dnode $hostname4
@ -132,6 +141,15 @@ if $dnode4Vnodes != 2 then
goto show2 goto show2
endi endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step3 print ============================== step3
print ========= drop dnode2 print ========= drop dnode2
sql drop dnode $hostname2 sql drop dnode $hostname2
@ -167,6 +185,15 @@ if $dnode4Vnodes != 3 then
goto show3 goto show3
endi endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============================== step4 print ============================== step4
@ -195,6 +222,15 @@ if $dnode5Vnodes != 2 then
goto show4 goto show4
endi endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step5 print ============================== step5
print ========= drop dnode3 print ========= drop dnode3
sql drop dnode $hostname3 sql drop dnode $hostname3
@ -232,6 +268,15 @@ endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step6 print ============================== step6
sql create dnode $hostname6 sql create dnode $hostname6
system sh/exec.sh -n dnode6 -s start system sh/exec.sh -n dnode6 -s start
@ -258,6 +303,15 @@ if $dnode6Vnodes != 2 then
goto show6 goto show6
endi endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step7 print ============================== step7
print ========= drop dnode4 print ========= drop dnode4
sql drop dnode $hostname4 sql drop dnode $hostname4
@ -294,6 +348,14 @@ if $dnode4Vnodes != null then
endi endi
system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode4 -s stop -x SIGINT
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step8 print ============================== step8
sql create dnode $hostname7 sql create dnode $hostname7
@ -321,6 +383,15 @@ if $dnode7Vnodes != 2 then
goto show8 goto show8
endi endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step9 print ============================== step9
print ========= drop dnode1 print ========= drop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
@ -335,15 +406,20 @@ sql show mnodes
$dnode1Role = $data2_1 $dnode1Role = $data2_1
$dnode4Role = $data2_4 $dnode4Role = $data2_4
$dnode5Role = $data2_5 $dnode5Role = $data2_5
print dnode1 ==> $dnode1Role print dnode1 ==> $data2_1
print dnode4 ==> $dnode4Role print dnode2 ==> $data2_2
print dnode5 ==> $dnode5Role print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
if $dnode1Role != offline then if $dnode1Role != offline then
return -1 return -1
endi endi
print ============================== step9.1 print ============================== step9.1
sleep 2000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
$x = 0 $x = 0
@ -353,6 +429,19 @@ show9:
if $x == 20 then if $x == 20 then
return -1 return -1
endi endi
sql show mnodes
$dnode1Role = $data2_1
$dnode4Role = $data2_4
$dnode5Role = $data2_5
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
sql show dnodes -x show9 sql show dnodes -x show9
$dnode5Vnodes = $data2_5 $dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes print dnode5 $dnode5Vnodes
@ -374,6 +463,15 @@ endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 5000 sleep 5000
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step11 print ============================== step11
print ========= add db4 print ========= add db4

View File

@ -0,0 +1,127 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c maxTables -v 4
system sh/cfg.sh -n dnode2 -c maxTables -v 4
system sh/cfg.sh -n dnode3 -c maxTables -v 4
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
print ============================== step1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================== step2
print ========= start dnode2
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
sleep 5000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print $dnode1Role
print $dnode2Role
print $dnode3Role
print ============================== step3
$count = 2
while $count < 102
$db = d . $count
$tb = $db . .t
sql create database $db replica 3 cache 1 blocks 3
sql create table $tb (ts timestamp, i int)
sql insert into $tb values(now, 1)
$count = $count + 1
print insert into $tb values(now, 1) ==> finished
endw
print ============================== step4
$count = 2
while $count < 102
$db = d . $count
$tb = $db . .t
sql select * from $tb
if $rows != 1 then
print select * from $tb
return -1
endi
$count = $count + 1
print select * from $tb ==> rows: $rows
endw
print ============================== step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
sleep 5000
print ============================== step6
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 3000
print ============================== step7
$x = 0
show7:
$x = $x + 1
sleep 2000
if $x == 50 then
return -1
endi
sql show mnodes -x show7
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
if $dnode1Role != master then
goto show7
endi
if $dnode2Role != slave then
goto show7
endi
if $dnode2Role != slave then
goto show7
endi
print ============================== step8
$x = 0
show8:
$x = $x + 1
sleep 2000
if $x == 20 then
return -1
endi
$count = 2
while $count < 102
$db = d . $count
$tb = $db . .t
sql select * from $tb
if $rows != 1 then
print select * from $tb
goto show8
endi
$count = $count + 1
print select * from $tb ==> rows: $rows
endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -21,9 +21,10 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sleep 3000
print ======== step1 print ======== step1
sql create database db replica 3 blocks 3 sql create database db replica 3 blocks 3

View File

@ -18,13 +18,14 @@ system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
sleep 3000
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sleep 3000 sleep 5000
sql show dnodes sql show dnodes
print dnode1 $data5_1 print dnode1 $data5_1

View File

@ -31,8 +31,8 @@ sleep 3000
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start

View File

@ -13,8 +13,8 @@ system sh/exec.sh -n dnode1 -s start
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
$x = 0 $x = 0

View File

@ -17,8 +17,8 @@ system sh/exec.sh -n dnode1 -s start
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
$x = 0 $x = 0
createDnode: createDnode:

View File

@ -21,10 +21,10 @@ system sh/exec.sh -n dnode1 -s start
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname4 sql create dnode $hostname4
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start system sh/exec.sh -n dnode4 -s start
$x = 0 $x = 0

View File

@ -18,8 +18,8 @@ system sh/exec.sh -n dnode1 -s start
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
$x = 0 $x = 0
createDnode: createDnode:

View File

@ -21,10 +21,10 @@ system sh/exec.sh -n dnode1 -s start
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname4 sql create dnode $hostname4
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start system sh/exec.sh -n dnode4 -s start
$x = 0 $x = 0
createDnode: createDnode:

View File

@ -25,8 +25,8 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sleep 3000 sleep 3000

View File

@ -667,7 +667,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
TAOS_RES* pSql = NULL; TAOS_RES* pSql = NULL;
for (int attempt = 0; attempt < 3; ++attempt) { for (int attempt = 0; attempt < 10; ++attempt) {
simLogSql(rest, false); simLogSql(rest, false);
pSql = taos_query(script->taos, rest); pSql = taos_query(script->taos, rest);
ret = taos_errno(pSql); ret = taos_errno(pSql);