Merge branch 'develop' into feature/query
This commit is contained in:
commit
5e8ff5103c
|
@ -121,7 +121,21 @@ func (alert *Alert) refresh(rule *Rule, values map[string]interface{}) {
|
|||
alert.Values = values
|
||||
res := rule.Expr.Eval(func(key string) interface{} {
|
||||
// ToLower is required as column name in result is in lower case
|
||||
return alert.Values[strings.ToLower(key)]
|
||||
i := alert.Values[strings.ToLower(key)]
|
||||
switch v := i.(type) {
|
||||
case int8:
|
||||
return int64(v)
|
||||
case int16:
|
||||
return int64(v)
|
||||
case int:
|
||||
return int64(v)
|
||||
case int32:
|
||||
return int64(v)
|
||||
case float32:
|
||||
return float64(v)
|
||||
default:
|
||||
return v
|
||||
}
|
||||
})
|
||||
|
||||
val, ok := res.(bool)
|
||||
|
|
|
@ -119,7 +119,7 @@ WantedBy=multi-user.target
|
|||
return nil
|
||||
}
|
||||
|
||||
const version = "TDengine alert v2.0.0.0"
|
||||
const version = "TDengine alert v2.0.0.1"
|
||||
|
||||
func main() {
|
||||
var (
|
||||
|
|
|
@ -128,24 +128,84 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
|
|||
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
||||
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
|
||||
- arbitrator: 系统中裁决器的end point,缺省为空
|
||||
- timezone:时区。从系统中动态获取当前的时区设置。
|
||||
- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- timezone、locale、charset 的配置见客户端配置。
|
||||
|
||||
## 客户端配置
|
||||
|
||||
TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
|
||||
TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
|
||||
|
||||
客户端配置参数列表及解释
|
||||
|
||||
- firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
|
||||
- secondEp: taos启动时,如果first连接不上,尝试连接集群中第二个taosd实例的end point, 缺省值为空。
|
||||
- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
|
||||
- locale
|
||||
|
||||
> 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
|
||||
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。
|
||||
|
||||
客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。
|
||||
|
||||
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
|
||||
|
||||
- charset
|
||||
|
||||
> 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
|
||||
如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。
|
||||
|
||||
在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如:
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
```
|
||||
在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置:
|
||||
```
|
||||
charset CP936
|
||||
```
|
||||
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
|
||||
|
||||
在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
charset GBK
|
||||
```
|
||||
则charset的有效值是GBK。
|
||||
```
|
||||
charset GBK
|
||||
locale zh_CN.UTF-8
|
||||
```
|
||||
charset的有效值是UTF-8。
|
||||
|
||||
日志的配置参数,与server 的配置参数完全一样。
|
||||
|
||||
启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。
|
||||
- timezone
|
||||
|
||||
默认值:从系统中动态获取当前的时区设置
|
||||
|
||||
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||
|
||||
在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
|
||||
```
|
||||
timezone UTC-8
|
||||
timezone GMT-8
|
||||
timezone Asia/Shanghai
|
||||
```
|
||||
均是合法的设置东八区时区的格式。
|
||||
|
||||
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如:
|
||||
```
|
||||
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
|
||||
```
|
||||
在东八区,SQL语句等效于
|
||||
```
|
||||
SELECT count(*) FROM table_name WHERE TS<1554955268000;
|
||||
```
|
||||
在UTC时区,SQL语句等效于
|
||||
```
|
||||
SELECT count(*) FROM table_name WHERE TS<1554984068000;
|
||||
```
|
||||
为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
|
||||
|
||||
## 用户管理
|
||||
|
||||
|
|
|
@ -95,6 +95,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
|||
- [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等
|
||||
- [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
|
||||
- [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理
|
||||
- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制):TAOS SQL的边界限制
|
||||
- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code):TDengine 2.0 错误码以及对应的十进制码
|
||||
|
||||
## TDengine的技术设计
|
||||
|
||||
|
|
|
@ -86,13 +86,29 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
```mysql
|
||||
ALTER DATABASE db_name COMP 2;
|
||||
```
|
||||
修改数据库文件压缩标志位,有效数字为0,1,2. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。修改后可以使用show databases命令查看是否修改成功
|
||||
COMP参数是指修改数据库文件压缩标志位,取值范围为[0, 2]. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。
|
||||
|
||||
```mysql
|
||||
ALTER DATABASE db_name REPLICA 2;
|
||||
```
|
||||
修改数据库副本数,有效副本数为1到3。在集群中使用,副本数必须小于dnode的数目。修改后可以使用show databases命令查看是否修改成功
|
||||
REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。
|
||||
|
||||
```mysql
|
||||
ALTER DATABASE db_name KEEP 365;
|
||||
```
|
||||
KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。
|
||||
|
||||
```mysql
|
||||
ALTER DATABASE db_name QUORUM 365;
|
||||
```
|
||||
QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
|
||||
|
||||
```mysql
|
||||
ALTER DATABASE db_name BLOCKS 365;
|
||||
```
|
||||
BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。
|
||||
|
||||
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
|
||||
|
||||
- **显示系统所有数据库**
|
||||
```mysql
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
# TDengine 2.0 错误码以及对应的十进制码
|
||||
|
||||
|
||||
| Code | bit | error code | 错误描述 | 十进制错误码 |
|
||||
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|
||||
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|
||||
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|
||||
|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
|
||||
|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
|
||||
|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
|
||||
|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
|
||||
|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
|
||||
|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
|
||||
|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
|
||||
|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
|
||||
|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
|
||||
|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
|
||||
|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
|
||||
|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
|
||||
|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
|
||||
|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
|
||||
|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
|
||||
|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
|
||||
|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
|
||||
|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
|
||||
|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
|
||||
|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
|
||||
|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
|
||||
|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
|
||||
|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
|
||||
|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
|
||||
|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
|
||||
|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
|
||||
|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
|
||||
|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
|
||||
|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
|
||||
|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
|
||||
|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
|
||||
|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
|
||||
|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
|
||||
|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
|
||||
|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
|
||||
|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
|
||||
|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
|
||||
|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
|
||||
|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
|
||||
|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
|
||||
|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
|
||||
|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
|
||||
|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
|
||||
|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
|
||||
|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
|
||||
|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
|
||||
|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
|
||||
|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
|
||||
|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
|
||||
|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
|
||||
|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
|
||||
|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
|
||||
|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
|
||||
|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
|
||||
|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
|
||||
|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
|
||||
|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
|
||||
|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
|
||||
|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
|
||||
|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
|
||||
|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
|
||||
|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
|
||||
|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
|
||||
|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
|
||||
|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
|
||||
|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
|
||||
|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
|
||||
|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
|
||||
|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
|
||||
|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
|
||||
|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
|
||||
|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
|
||||
|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
|
||||
|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
|
||||
|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
|
||||
|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
|
||||
|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
|
||||
|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
|
||||
|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
|
||||
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|
||||
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|
||||
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|
||||
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|
||||
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|
||||
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|
||||
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|
||||
|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
|
||||
|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
|
||||
|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
|
||||
|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
|
||||
|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
|
||||
|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
|
||||
|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
|
||||
|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
|
||||
|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
|
||||
|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
|
||||
|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
|
||||
|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
|
||||
|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
|
||||
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|
||||
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|
||||
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|
||||
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|
||||
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|
||||
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|
||||
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|
||||
|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
|
||||
|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
|
||||
|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
|
||||
|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
|
||||
|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
|
||||
|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
|
||||
|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
|
||||
|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
|
||||
|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
|
||||
|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
|
||||
|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
|
||||
|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
|
||||
|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
|
||||
|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
|
||||
|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
|
||||
|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
|
||||
|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
|
||||
|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
|
||||
|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
|
||||
|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
|
||||
|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
|
||||
|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
|
||||
|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
|
||||
|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
|
||||
|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
|
||||
|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
|
||||
|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
|
||||
|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
|
||||
|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
|
||||
|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
|
||||
|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
|
||||
|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
|
||||
|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
|
||||
|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
|
||||
|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
|
||||
|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
|
||||
|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
|
||||
|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
|
||||
|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
|
||||
|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
|
||||
|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
|
||||
|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
|
||||
|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
|
||||
|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
|
||||
|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
|
||||
|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
|
||||
|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
|
||||
|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
|
||||
|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
|
||||
|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
|
||||
|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
|
||||
|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
|
||||
|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
|
||||
|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
|
||||
|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
|
||||
|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
|
||||
|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
|
||||
|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
|
||||
|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
|
||||
|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|
|
|
@ -93,6 +93,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
|
|||
- role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode
|
||||
- debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。
|
||||
- numOfLogLines:单个日志文件允许的最大行数。默认值:10,000,000行。
|
||||
- logKeepDays:日志文件的最长保存时间。大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳,单位为秒。默认值:0天。
|
||||
- maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。
|
||||
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。
|
||||
|
||||
|
@ -130,24 +131,84 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
|
|||
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
||||
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
|
||||
- arbitrator: 系统中裁决器的end point,缺省为空
|
||||
- timezone:时区。从系统中动态获取当前的时区设置。
|
||||
- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- timezone、locale、charset 的配置见客户端配置。
|
||||
|
||||
## 客户端配置
|
||||
|
||||
TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
|
||||
TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
|
||||
|
||||
客户端配置参数列表及解释
|
||||
客户端配置参数
|
||||
|
||||
- firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
|
||||
- secondEp: taos启动时,如果first连接不上,尝试连接集群中第二个taosd实例的end point, 缺省值为空。
|
||||
- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
- maxBinaryDisplayWidth:Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width *nn* 动态修改此选项。
|
||||
- locale
|
||||
|
||||
> 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
|
||||
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。
|
||||
|
||||
客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。
|
||||
|
||||
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
|
||||
|
||||
- charset
|
||||
|
||||
> 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
|
||||
如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。
|
||||
|
||||
在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如:
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
```
|
||||
在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置:
|
||||
```
|
||||
charset CP936
|
||||
```
|
||||
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
|
||||
|
||||
在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
charset GBK
|
||||
```
|
||||
则charset的有效值是GBK。
|
||||
```
|
||||
charset GBK
|
||||
locale zh_CN.UTF-8
|
||||
```
|
||||
charset的有效值是UTF-8。
|
||||
|
||||
日志的配置参数,与server 的配置参数完全一样。
|
||||
|
||||
- timezone
|
||||
|
||||
默认值:从系统中动态获取当前的时区设置
|
||||
|
||||
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||
|
||||
在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
|
||||
```
|
||||
timezone UTC-8
|
||||
timezone GMT-8
|
||||
timezone Asia/Shanghai
|
||||
```
|
||||
均是合法的设置东八区时区的格式。
|
||||
|
||||
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如:
|
||||
```
|
||||
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
|
||||
```
|
||||
在东八区,SQL语句等效于
|
||||
```
|
||||
SELECT count(*) FROM table_name WHERE TS<1554955268000;
|
||||
```
|
||||
在UTC时区,SQL语句等效于
|
||||
```
|
||||
SELECT count(*) FROM table_name WHERE TS<1554984068000;
|
||||
```
|
||||
为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
|
||||
|
||||
启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。
|
||||
|
||||
## 用户管理
|
||||
|
|
|
@ -2,13 +2,15 @@
|
|||
|
||||
多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。
|
||||
|
||||
集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||
集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||
|
||||
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
|
||||
|
||||
## 准备工作
|
||||
|
||||
**第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/);
|
||||
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
||||
|
||||
**第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
||||
|
||||
|
@ -23,7 +25,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
|||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
|
||||
|
||||
```
|
||||
// firstEp 是每个节点启动后连接的第一个节点
|
||||
// firstEp 集群中所有节点的配置都是一致的,对其第一次访问后,就获得了整个集群的信息
|
||||
firstEp h1.taosdata.com:6030
|
||||
|
||||
// 配置本节点的FQDN,如果本机只有一个hostname, 无需配置
|
||||
|
@ -32,7 +34,7 @@ fqdn h1.taosdata.com
|
|||
// 配置本节点的端口号,缺省是6030
|
||||
serverPort 6030
|
||||
|
||||
// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
||||
// 服务端节点数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
||||
arbitrator ha.taosdata.com:6042
|
||||
```
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
|
||||
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
|
||||
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
|
||||
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
|
||||
|
||||
|
@ -47,69 +47,14 @@
|
|||
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
|
||||
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。
|
||||
|
||||
taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。
|
||||
|
||||
1)首先在服务器上停止taosd服务;
|
||||
|
||||
2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000;
|
||||
|
||||
3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000;
|
||||
|
||||
-n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
|
||||
|
||||
-h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。
|
||||
|
||||
-P :检测的起始端口号,缺省值是6030;
|
||||
|
||||
-e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042;
|
||||
|
||||
-l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同;
|
||||
|
||||
服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
|
||||
|
||||
对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。
|
||||
|
||||
客户端运行的输出样例:
|
||||
|
||||
`sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
|
||||
|
||||
`host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
|
||||
|
||||
`tcp port:6030 test ok. udp port:6030 test ok.`
|
||||
|
||||
`tcp port:6031 test ok. udp port:6031 test ok.`
|
||||
|
||||
`tcp port:6032 test ok. udp port:6032 test ok.`
|
||||
|
||||
`tcp port:6033 test ok. udp port:6033 test ok.`
|
||||
|
||||
`tcp port:6034 test ok. udp port:6034 test ok.`
|
||||
|
||||
`tcp port:6035 test ok. udp port:6035 test ok.`
|
||||
|
||||
`tcp port:6036 test ok. udp port:6036 test ok.`
|
||||
|
||||
`tcp port:6037 test ok. udp port:6037 test ok.`
|
||||
|
||||
`tcp port:6038 test ok. udp port:6038 test ok.`
|
||||
|
||||
`tcp port:6039 test ok. udp port:6039 test ok.`
|
||||
|
||||
`tcp port:6040 test ok. udp port:6040 test ok.`
|
||||
|
||||
`tcp port:6041 test ok. udp port:6041 test ok.`
|
||||
|
||||
`tcp port:6042 test ok. udp port:6042 test ok.`
|
||||
|
||||
如果某个端口不通,会输出 `port:xxxx test fail`的信息。
|
||||
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
|
||||
|
||||
|
||||
## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
|
||||
|
||||
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办?
|
||||
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
|
||||
|
||||
1. 请检查连接的服务器的FQDN是否正确
|
||||
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
2. 如果网络配置有DNS server, 请检查是否正常工作
|
||||
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。
|
||||
4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法链接服务器的
|
||||
|
|
|
@ -189,6 +189,9 @@
|
|||
# max number of rows per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# time of keeping log files, days
|
||||
# logKeepDays 0
|
||||
|
||||
# enable/disable async log
|
||||
# asyncLog 1
|
||||
|
||||
|
|
|
@ -232,8 +232,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
|
||||
assert(pInfo->pDCLInfo->nTokens == 1);
|
||||
|
||||
if (tscSetTableFullName(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
|
||||
if(code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
|
||||
pzName->n = strdequote(pzName->z);
|
||||
|
@ -348,8 +349,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
case TSDB_SQL_DESCRIBE_TABLE: {
|
||||
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
|
||||
const char* msg2 = "table name is too long";
|
||||
const char* msg1 = "invalid table name";
|
||||
const char* msg2 = "table name is too long";
|
||||
|
||||
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
|
@ -710,7 +711,9 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
|||
}
|
||||
|
||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
|
||||
const char* msg = "name too long";
|
||||
const char* msg1 = "name too long";
|
||||
const char* msg2 = "invalid db name";
|
||||
const char *msg = msg1;
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -728,15 +731,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa
|
|||
} else { // get current DB name first, then set it into path
|
||||
SStrToken t = {0};
|
||||
getCurrentDBName(pSql, &t);
|
||||
|
||||
if (t.n == 0) {
|
||||
msg = msg2;
|
||||
}
|
||||
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
free(oldName);
|
||||
return code;
|
||||
}
|
||||
|
@ -1072,7 +1073,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
|
|||
|
||||
/* db name is not specified, the tableName dose not include db name */
|
||||
if (pDB != NULL) {
|
||||
if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) {
|
||||
if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
|
@ -1596,13 +1597,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
|
||||
|
||||
if (pItem->pNode->pParam != NULL) {
|
||||
SStrToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
|
||||
if (pToken->z == NULL || pToken->n == 0) {
|
||||
tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0];
|
||||
SStrToken* pToken = &pParamElem->pNode->colInfo;
|
||||
short sqlOptr = pParamElem->pNode->nSQLOptr;
|
||||
if ((pToken->z == NULL || pToken->n == 0)
|
||||
&& (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0];
|
||||
if (pParamElem->pNode->nSQLOptr == TK_ALL) {
|
||||
if (sqlOptr == TK_ALL) {
|
||||
// select table.*
|
||||
// check if the table name is valid or not
|
||||
SStrToken tmpToken = pParamElem->pNode->colInfo;
|
||||
|
@ -1614,6 +1616,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
|
||||
} else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
|
||||
char buf[8] = {0};
|
||||
int64_t val = -1;
|
||||
tVariant* pVariant = &pParamElem->pNode->val;
|
||||
if (pVariant->nType == TSDB_DATA_TYPE_BIGINT) {
|
||||
tVariantDump(pVariant, buf, TSDB_DATA_TYPE_BIGINT, true);
|
||||
val = GET_INT64_VAL(buf);
|
||||
}
|
||||
if (val == 1) {
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
|
||||
} else {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
} else {
|
||||
// count the number of meters created according to the super table
|
||||
if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
|
||||
|
@ -2741,25 +2758,29 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
|||
}
|
||||
}
|
||||
|
||||
int32_t retVal = TSDB_CODE_SUCCESS;
|
||||
if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) {
|
||||
tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false);
|
||||
} else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
|
||||
if (colType == TSDB_DATA_TYPE_BINARY) {
|
||||
retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false);
|
||||
|
||||
// TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
|
||||
} else if (colType == TSDB_DATA_TYPE_BINARY) {
|
||||
pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE);
|
||||
pColumnFilter->len = pRight->val.nLen;
|
||||
retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
|
||||
|
||||
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
|
||||
} else if (colType == TSDB_DATA_TYPE_NCHAR) {
|
||||
// pRight->val.nLen + 1 is larger than the actual nchar string length
|
||||
pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE);
|
||||
|
||||
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
|
||||
|
||||
retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
|
||||
size_t len = twcslen((wchar_t*)pColumnFilter->pz);
|
||||
pColumnFilter->len = len * TSDB_NCHAR_SIZE;
|
||||
|
||||
} else {
|
||||
tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
|
||||
retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
|
||||
}
|
||||
|
||||
if (retVal != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
switch (pExpr->nSQLOptr) {
|
||||
|
@ -4424,7 +4445,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
const int32_t DEFAULT_TABLE_INDEX = 0;
|
||||
|
||||
const char* msg1 = "invalid table name";
|
||||
const char* msg2 = "table name too long";
|
||||
const char* msg3 = "manipulation of tag available for super table";
|
||||
const char* msg4 = "set tag value only available for table";
|
||||
const char* msg5 = "only support add one tag";
|
||||
|
@ -4457,7 +4477,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
|
@ -5734,7 +5754,6 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
|
|||
|
||||
int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) {
|
||||
const char* msg1 = "invalid table name";
|
||||
const char* msg2 = "table name too long";
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
|
||||
|
@ -5755,7 +5774,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
|
|||
}
|
||||
|
||||
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
if (!validateTableColumnInfo(pFieldList, pCmd) ||
|
||||
|
@ -5810,7 +5829,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
// get meter meta from mnode
|
||||
|
@ -6002,7 +6021,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0));
|
||||
|
||||
const char* msg0 = "invalid table name";
|
||||
const char* msg1 = "table name too long";
|
||||
//const char* msg1 = "table name too long";
|
||||
const char* msg2 = "point interpolation query needs timestamp";
|
||||
const char* msg5 = "fill only available for interval query";
|
||||
const char* msg6 = "start(end) time of query range required or time range too large";
|
||||
|
@ -6075,7 +6094,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
|
||||
SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz};
|
||||
if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar;
|
||||
|
|
|
@ -2019,7 +2019,8 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) {
|
||||
int tscProcessDropDbRsp(SSqlObj *pSql) {
|
||||
pSql->pTscObj->db[0] = 0;
|
||||
taosCacheEmpty(tscCacheHandle);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2095,6 +2096,10 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
|
|||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
|
||||
if (pRetrieve == NULL) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
pRes->numOfRows = htonl(pRetrieve->numOfRows);
|
||||
pRes->precision = htons(pRetrieve->precision);
|
||||
|
|
|
@ -325,8 +325,6 @@ void tdResetKVRowBuilder(SKVRowBuilder *pBuilder);
|
|||
SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
|
||||
|
||||
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
|
||||
ASSERT(pBuilder->nCols == 0 || colId > pBuilder->pColIdx[pBuilder->nCols - 1].colId);
|
||||
|
||||
if (pBuilder->nCols >= pBuilder->tCols) {
|
||||
pBuilder->tCols *= 2;
|
||||
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
|
||||
|
|
|
@ -158,6 +158,7 @@ extern char buildinfo[];
|
|||
// log
|
||||
extern int32_t tsAsyncLog;
|
||||
extern int32_t tsNumOfLogLines;
|
||||
extern int32_t tsLogKeepDays;
|
||||
extern int32_t dDebugFlag;
|
||||
extern int32_t vDebugFlag;
|
||||
extern int32_t mDebugFlag;
|
||||
|
|
|
@ -254,7 +254,7 @@ bool taosCfgDynamicOptions(char *msg) {
|
|||
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
|
||||
if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue;
|
||||
|
||||
int32_t cfgLen = strlen(cfg->option);
|
||||
int32_t cfgLen = (int32_t)strlen(cfg->option);
|
||||
if (cfgLen != olen) continue;
|
||||
if (strncasecmp(option, cfg->option, olen) != 0) continue;
|
||||
*((int32_t *)cfg->ptr) = vint;
|
||||
|
@ -1013,12 +1013,22 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.ptr = &tsNumOfLogLines;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
|
||||
cfg.minValue = 10000;
|
||||
cfg.minValue = 1000;
|
||||
cfg.maxValue = 2000000000;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "logKeepDays";
|
||||
cfg.ptr = &tsLogKeepDays;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 365000;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "asyncLog";
|
||||
cfg.ptr = &tsAsyncLog;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT16;
|
||||
|
|
|
@ -93,14 +93,13 @@
|
|||
<version>3.6.1</version>
|
||||
<configuration>
|
||||
<encoding>UTF-8</encoding>
|
||||
<source>11</source>
|
||||
<target>11</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
<debug>true</debug>
|
||||
<showDeprecation>true</showDeprecation>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
|
|
|
@ -91,7 +91,7 @@ public class TSDBConnection implements Connection {
|
|||
/**
|
||||
* @param cfgDirPath
|
||||
* @return return the config dir
|
||||
* **/
|
||||
**/
|
||||
private File loadConfigDir(String cfgDirPath) {
|
||||
if (cfgDirPath == null)
|
||||
return loadDefaultConfigDir();
|
||||
|
@ -103,7 +103,7 @@ public class TSDBConnection implements Connection {
|
|||
|
||||
/**
|
||||
* @return search the default config dir, if the config dir is not exist will return null
|
||||
* */
|
||||
*/
|
||||
private File loadDefaultConfigDir() {
|
||||
File cfgDir;
|
||||
File cfgDir_linux = new File("/etc/taos");
|
||||
|
@ -132,7 +132,9 @@ public class TSDBConnection implements Connection {
|
|||
|
||||
public Statement createStatement() throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBStatement(this.connector);
|
||||
TSDBStatement statement = new TSDBStatement(this, this.connector);
|
||||
statement.setConnection(this);
|
||||
return statement;
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
|
@ -153,7 +155,7 @@ public class TSDBConnection implements Connection {
|
|||
|
||||
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBPreparedStatement(this.connector, sql);
|
||||
return new TSDBPreparedStatement(this, this.connector, sql);
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
|
|
|
@ -42,8 +42,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
|
||||
private SavedPreparedStatement savedPreparedStatement;
|
||||
|
||||
TSDBPreparedStatement(TSDBJNIConnector connecter, String sql) {
|
||||
super(connecter);
|
||||
TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
|
||||
super(connection, connecter);
|
||||
init(sql);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,14 @@ import java.util.List;
|
|||
public class TSDBStatement implements Statement {
|
||||
private TSDBJNIConnector connecter = null;
|
||||
|
||||
/** To store batched commands */
|
||||
/**
|
||||
* To store batched commands
|
||||
*/
|
||||
protected List<String> batchedArgs;
|
||||
|
||||
/** Timeout for a query */
|
||||
/**
|
||||
* Timeout for a query
|
||||
*/
|
||||
protected int queryTimeout = 0;
|
||||
|
||||
private Long pSql = 0l;
|
||||
|
@ -35,7 +39,14 @@ public class TSDBStatement implements Statement {
|
|||
private boolean isClosed = true;
|
||||
private int affectedRows = 0;
|
||||
|
||||
TSDBStatement(TSDBJNIConnector connecter) {
|
||||
private TSDBConnection connection;
|
||||
|
||||
public void setConnection(TSDBConnection connection) {
|
||||
this.connection = connection;
|
||||
}
|
||||
|
||||
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) {
|
||||
this.connection = connection;
|
||||
this.connecter = connecter;
|
||||
this.isClosed = false;
|
||||
}
|
||||
|
@ -256,6 +267,8 @@ public class TSDBStatement implements Statement {
|
|||
}
|
||||
|
||||
public Connection getConnection() throws SQLException {
|
||||
if (this.connecter != null)
|
||||
return this.connection;
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.sql.SQLException;
|
|||
public class SqlSyntaxValidator {
|
||||
|
||||
private TSDBConnection tsdbConnection;
|
||||
|
||||
public SqlSyntaxValidator(Connection connection) {
|
||||
this.tsdbConnection = (TSDBConnection) connection;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ public class BaseTest {
|
|||
private static boolean testCluster = false;
|
||||
private static TDNodes nodes = new TDNodes();
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void setupEnv() {
|
||||
try{
|
||||
|
@ -19,11 +18,9 @@ public class BaseTest {
|
|||
nodes.getTDNode(1).setRunning(1);
|
||||
nodes.stop(1);
|
||||
}
|
||||
|
||||
nodes.setTestCluster(testCluster);
|
||||
nodes.deploy(1);
|
||||
nodes.start(1);
|
||||
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
|
|
@ -7,13 +7,11 @@ import org.junit.Test;
|
|||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class BatchInsertTest extends BaseTest {
|
||||
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class QueryDataTest extends BaseTest {
|
||||
|
||||
static Connection connection = null;
|
||||
static Statement statement = null;
|
||||
static String dbName = "test";
|
||||
static String stbName = "meters";
|
||||
static String host = "localhost";
|
||||
static int numOfTables = 30;
|
||||
final static int numOfRecordsPerTable = 1000;
|
||||
static long ts = 1496732686000l;
|
||||
final static String tablePrefix = "t";
|
||||
|
||||
@Before
|
||||
public void createDatabase() throws SQLException {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
} catch (ClassNotFoundException e) {
|
||||
return;
|
||||
}
|
||||
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
statement.executeUpdate("create database if not exists " + dbName);
|
||||
statement.executeUpdate("use " + dbName);
|
||||
|
||||
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))";
|
||||
statement.executeUpdate(createTableSql);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQueryBinaryData() throws SQLException{
|
||||
|
||||
String insertSql = "insert into " + stbName + " values(now, 'taosda')";
|
||||
System.out.println(insertSql);
|
||||
|
||||
statement.executeUpdate(insertSql);
|
||||
|
||||
String querySql = "select * from " + stbName;
|
||||
ResultSet rs = statement.executeQuery(querySql);
|
||||
|
||||
while(rs.next()) {
|
||||
String name = rs.getString(2) + "001";
|
||||
System.out.println("name = " + name);
|
||||
assertEquals(name, "taosda001");
|
||||
}
|
||||
rs.close();
|
||||
}
|
||||
|
||||
|
||||
@After
|
||||
public void close() throws Exception {
|
||||
statement.close();
|
||||
connection.close();
|
||||
Thread.sleep(10);
|
||||
}
|
||||
|
||||
}
|
|
@ -34,7 +34,6 @@ public class SelectTest extends BaseTest {
|
|||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
statement.executeUpdate("create database if not exists " + dbName);
|
||||
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -66,6 +65,5 @@ public class SelectTest extends BaseTest {
|
|||
statement.close();
|
||||
connection.close();
|
||||
Thread.sleep(10);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import com.taosdata.jdbc.lib.TSDBCommon;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class BatchInsertTest {
|
||||
|
||||
static String host = "localhost";
|
||||
static String dbName = "test";
|
||||
static String stbName = "meters";
|
||||
static int numOfTables = 30;
|
||||
final static int numOfRecordsPerTable = 1000;
|
||||
static long ts = 1496732686000l;
|
||||
final static String tablePrefix = "t";
|
||||
|
||||
private Connection connection;
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
try {
|
||||
connection = TSDBCommon.getConn(host);
|
||||
TSDBCommon.createDatabase(connection, dbName);
|
||||
TSDBCommon.createStable(connection, stbName);
|
||||
TSDBCommon.createTables(connection, numOfTables, stbName, tablePrefix);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBatchInsert(){
|
||||
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
|
||||
for (int i = 0; i < numOfTables; i++) {
|
||||
final int index = i;
|
||||
executorService.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
long startTime = System.currentTimeMillis();
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
|
||||
Random rand = new Random();
|
||||
for (int j = 1; j <= numOfRecordsPerTable; j++) {
|
||||
sb.append("(" + (ts + j) + ", ");
|
||||
sb.append(rand.nextInt(100) + ", ");
|
||||
sb.append(rand.nextInt(100) + ", ");
|
||||
sb.append(rand.nextInt(100) + ")");
|
||||
}
|
||||
statement.addBatch(sb.toString());
|
||||
statement.executeBatch();
|
||||
long endTime = System.currentTimeMillis();
|
||||
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
|
||||
connection.commit();
|
||||
statement.close();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
executorService.shutdown();
|
||||
try {
|
||||
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
try{
|
||||
Statement statement = connection.createStatement();
|
||||
ResultSet rs = statement.executeQuery("select * from meters");
|
||||
int num = 0;
|
||||
while (rs.next()) {
|
||||
num++;
|
||||
}
|
||||
assertEquals(num, numOfTables * numOfRecordsPerTable);
|
||||
rs.close();
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
try {
|
||||
if (connection != null)
|
||||
connection.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package com.taosdata.jdbc.lib;
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TSDBCommon {
|
||||
|
||||
public static Connection getConn(String host) throws SQLException, ClassNotFoundException {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
return DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
}
|
||||
|
||||
public static void createDatabase(Connection connection, String dbName) throws SQLException {
|
||||
Statement statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
statement.executeUpdate("create database if not exists " + dbName);
|
||||
statement.executeUpdate("use " + dbName);
|
||||
statement.close();
|
||||
}
|
||||
|
||||
public static void createStable(Connection connection, String stbName) throws SQLException {
|
||||
Statement statement = connection.createStatement();
|
||||
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
|
||||
statement.executeUpdate(createTableSql);
|
||||
statement.close();
|
||||
}
|
||||
|
||||
public static void createTables(Connection connection, int numOfTables, String stbName,String tablePrefix) throws SQLException {
|
||||
Statement statement = connection.createStatement();
|
||||
for(int i = 0; i < numOfTables; i++) {
|
||||
String loc = i % 2 == 0 ? "beijing" : "shanghai";
|
||||
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
|
||||
statement.executeUpdate(createSubTalbesSql);
|
||||
}
|
||||
statement.close();
|
||||
}
|
||||
}
|
|
@ -16,11 +16,13 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "taosdef.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tglobal.h"
|
||||
#include "mnode.h"
|
||||
#include "http.h"
|
||||
#include "tmqtt.h"
|
||||
#include "monitor.h"
|
||||
#include "dnode.h"
|
||||
#include "dnodeInt.h"
|
||||
#include "dnodeModule.h"
|
||||
|
||||
|
@ -129,17 +131,34 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) {
|
|||
for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) {
|
||||
bool enableModule = moduleStatus & (1 << module);
|
||||
if (!tsModule[module].enable && enableModule) {
|
||||
dInfo("module status:%u is received, start %s module", tsModuleStatus, tsModule[module].name);
|
||||
dInfo("module status:%u is set, start %s module", moduleStatus, tsModule[module].name);
|
||||
tsModule[module].enable = true;
|
||||
dnodeSetModuleStatus(module);
|
||||
(*tsModule[module].startFp)();
|
||||
}
|
||||
|
||||
if (tsModule[module].enable && !enableModule) {
|
||||
dInfo("module status:%u is received, stop %s module", tsModuleStatus, tsModule[module].name);
|
||||
dInfo("module status:%u is set, stop %s module", moduleStatus, tsModule[module].name);
|
||||
tsModule[module].enable = false;
|
||||
dnodeUnSetModuleStatus(module);
|
||||
(*tsModule[module].stopFp)();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool dnodeCheckMnodeStarting() {
|
||||
if (tsModuleStatus & TSDB_MOD_MNODE) return false;
|
||||
|
||||
SDMMnodeInfos *mnodes = dnodeGetMnodeInfos();
|
||||
for (int32_t i = 0; i < mnodes->nodeNum; ++i) {
|
||||
SDMMnodeInfo *node = &mnodes->nodeInfos[i];
|
||||
if (node->nodeId == dnodeGetDnodeId()) {
|
||||
uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE);;
|
||||
dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus);
|
||||
dnodeProcessModuleStatus(moduleStatus);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -154,15 +154,15 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
|
|||
rpcMsg.contLen = sizeof(SDMAuthMsg);
|
||||
rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH;
|
||||
|
||||
dDebug("user:%s, send auth msg to mnode", user);
|
||||
dDebug("user:%s, send auth msg to mnodes", user);
|
||||
SRpcMsg rpcRsp = {0};
|
||||
dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp);
|
||||
|
||||
if (rpcRsp.code != 0) {
|
||||
dError("user:%s, auth msg received from mnode, error:%s", user, tstrerror(rpcRsp.code));
|
||||
dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code));
|
||||
} else {
|
||||
SDMAuthRsp *pRsp = rpcRsp.pCont;
|
||||
dDebug("user:%s, auth msg received from mnode", user);
|
||||
dDebug("user:%s, auth msg received from mnodes", user);
|
||||
memcpy(secret, pRsp->secret, TSDB_KEY_LEN);
|
||||
memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN);
|
||||
*spi = pRsp->spi;
|
||||
|
|
|
@ -43,6 +43,7 @@ void dnodeGetMnodeEpSetForPeer(void *epSet);
|
|||
void dnodeGetMnodeEpSetForShell(void *epSet);
|
||||
void * dnodeGetMnodeInfos();
|
||||
int32_t dnodeGetDnodeId();
|
||||
bool dnodeCheckMnodeStarting();
|
||||
|
||||
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
|
||||
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);
|
||||
|
|
|
@ -103,6 +103,9 @@ typedef struct {
|
|||
|
||||
typedef void* tsync_h;
|
||||
|
||||
int32_t syncInit();
|
||||
void syncCleanUp();
|
||||
|
||||
tsync_h syncStart(const SSyncInfo *);
|
||||
void syncStop(tsync_h shandle);
|
||||
int32_t syncReconfig(tsync_h shandle, const SSyncCfg *);
|
||||
|
|
|
@ -786,7 +786,7 @@ void read_history() {
|
|||
}
|
||||
|
||||
void write_history() {
|
||||
char f_history[128];
|
||||
char f_history[TSDB_FILENAME_LEN];
|
||||
get_history_path(f_history);
|
||||
|
||||
FILE *f = fopen(f_history, "w");
|
||||
|
|
|
@ -409,7 +409,7 @@ void set_terminal_mode() {
|
|||
}
|
||||
}
|
||||
|
||||
void get_history_path(char *history) { sprintf(history, "%s/%s", getenv("HOME"), HISTORY_FILE); }
|
||||
void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
|
||||
|
||||
void clearScreen(int ecmd_pos, int cursor_pos) {
|
||||
struct winsize w;
|
||||
|
|
|
@ -211,8 +211,8 @@ static int32_t mnodeCreateRootAcct() {
|
|||
strcpy(pAcct->user, TSDB_DEFAULT_USER);
|
||||
taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass);
|
||||
pAcct->cfg = (SAcctCfg){
|
||||
.maxUsers = 10,
|
||||
.maxDbs = 64,
|
||||
.maxUsers = 128,
|
||||
.maxDbs = 128,
|
||||
.maxTimeSeries = INT32_MAX,
|
||||
.maxConnections = 1024,
|
||||
.maxStreams = 1000,
|
||||
|
|
|
@ -242,6 +242,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
|
|||
return TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) {
|
||||
mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep);
|
||||
return TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||
|
@ -251,6 +252,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
|
|||
mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2);
|
||||
return TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) {
|
||||
mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock,
|
||||
|
|
|
@ -72,6 +72,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
|
|||
pDnode->lastAccess = tsAccessSquence;
|
||||
}
|
||||
|
||||
mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ static int32_t mnodeMnodeActionInsert(SSdbOper *pOper) {
|
|||
pDnode->isMgmt = true;
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
|
||||
mInfo("mnode:%d, fqdn:%s ep:%s port:%d, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "tqueue.h"
|
||||
#include "twal.h"
|
||||
#include "tsync.h"
|
||||
#include "ttimer.h"
|
||||
#include "tglobal.h"
|
||||
#include "dnode.h"
|
||||
#include "mnode.h"
|
||||
|
@ -64,6 +65,7 @@ typedef struct _SSdbTable {
|
|||
int32_t (*encodeFp)(SSdbOper *pOper);
|
||||
int32_t (*destroyFp)(SSdbOper *pOper);
|
||||
int32_t (*restoredFp)();
|
||||
pthread_mutex_t mutex;
|
||||
} SSdbTable;
|
||||
|
||||
typedef struct {
|
||||
|
@ -88,6 +90,8 @@ typedef struct {
|
|||
SSdbWriteWorker *writeWorker;
|
||||
} SSdbWriteWorkerPool;
|
||||
|
||||
extern void * tsMnodeTmr;
|
||||
static void * tsUpdateSyncTmr;
|
||||
static SSdbObject tsSdbObj = {0};
|
||||
static taos_qset tsSdbWriteQset;
|
||||
static taos_qall tsSdbWriteQall;
|
||||
|
@ -290,11 +294,17 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
|
|||
taosFreeQitem(pOper);
|
||||
}
|
||||
|
||||
static void sdbUpdateSyncTmrFp(void *param, void *tmrId) { sdbUpdateSync(); }
|
||||
|
||||
void sdbUpdateSync() {
|
||||
if (!mnodeIsRunning()) {
|
||||
mDebug("mnode not start yet, update sync info later");
|
||||
if (dnodeCheckMnodeStarting()) {
|
||||
taosTmrReset(sdbUpdateSyncTmrFp, 1000, NULL, tsMnodeTmr, &tsUpdateSyncTmr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
mDebug("update sync info in sdb");
|
||||
|
||||
SSyncCfg syncCfg = {0};
|
||||
int32_t index = 0;
|
||||
|
@ -387,8 +397,6 @@ int32_t sdbInit() {
|
|||
tsSdbObj.role = TAOS_SYNC_ROLE_MASTER;
|
||||
}
|
||||
|
||||
sdbUpdateSync();
|
||||
|
||||
tsSdbObj.status = SDB_STATUS_SERVING;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -448,8 +456,9 @@ static void *sdbGetRowMeta(SSdbTable *pTable, void *key) {
|
|||
}
|
||||
|
||||
void **ppRow = (void **)taosHashGet(pTable->iHandle, key, keySize);
|
||||
if (ppRow == NULL) return NULL;
|
||||
return *ppRow;
|
||||
if (ppRow != NULL) return *ppRow;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) {
|
||||
|
@ -457,13 +466,14 @@ static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) {
|
|||
}
|
||||
|
||||
void *sdbGetRow(void *handle, void *key) {
|
||||
SSdbTable *pTable = handle;
|
||||
|
||||
pthread_mutex_lock(&pTable->mutex);
|
||||
void *pRow = sdbGetRowMeta(handle, key);
|
||||
if (pRow) {
|
||||
sdbIncRef(handle, pRow);
|
||||
if (pRow) sdbIncRef(handle, pRow);
|
||||
pthread_mutex_unlock(&pTable->mutex);
|
||||
|
||||
return pRow;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void *sdbGetRowFromObj(SSdbTable *pTable, void *key) {
|
||||
|
@ -478,7 +488,9 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
keySize = strlen((char *)key);
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&pTable->mutex);
|
||||
taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t));
|
||||
pthread_mutex_unlock(&pTable->mutex);
|
||||
|
||||
sdbIncRef(pTable, pOper->pObj);
|
||||
atomic_add_fetch_32(&pTable->numOfRows, 1);
|
||||
|
@ -519,7 +531,10 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
keySize = strlen((char *)key);
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&pTable->mutex);
|
||||
taosHashRemove(pTable->iHandle, key, keySize);
|
||||
pthread_mutex_unlock(&pTable->mutex);
|
||||
|
||||
atomic_sub_fetch_32(&pTable->numOfRows, 1);
|
||||
|
||||
sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
|
@ -612,8 +627,8 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
} else if (action == SDB_ACTION_DELETE) {
|
||||
void *pRow = sdbGetRowMeta(pTable, pHead->cont);
|
||||
if (pRow == NULL) {
|
||||
sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName,
|
||||
pHead->cont);
|
||||
sdbDebug("table:%s, object:%s not exist in hash, ignore delete action", pTable->tableName,
|
||||
sdbGetKeyStr(pTable, pHead->cont));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSdbOper oper = {.table = pTable, .pObj = pRow};
|
||||
|
@ -621,8 +636,8 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
} else if (action == SDB_ACTION_UPDATE) {
|
||||
void *pRow = sdbGetRowMeta(pTable, pHead->cont);
|
||||
if (pRow == NULL) {
|
||||
sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName,
|
||||
pHead->cont);
|
||||
sdbDebug("table:%s, object:%s not exist in hash, ignore update action", pTable->tableName,
|
||||
sdbGetKeyStr(pTable, pHead->cont));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
|
||||
|
@ -861,6 +876,7 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) {
|
|||
|
||||
if (pTable == NULL) return NULL;
|
||||
|
||||
pthread_mutex_init(&pTable->mutex, NULL);
|
||||
tstrncpy(pTable->tableName, pDesc->tableName, SDB_TABLE_LEN);
|
||||
pTable->keyType = pDesc->keyType;
|
||||
pTable->tableId = pDesc->tableId;
|
||||
|
@ -908,6 +924,7 @@ void sdbCloseTable(void *handle) {
|
|||
|
||||
taosHashDestroyIter(pIter);
|
||||
taosHashCleanup(pTable->iHandle);
|
||||
pthread_mutex_destroy(&pTable->mutex);
|
||||
|
||||
sdbDebug("table:%s, is closed, numOfTables:%d", pTable->tableName, tsSdbObj.numOfTables);
|
||||
free(pTable);
|
||||
|
|
|
@ -24,6 +24,7 @@ extern "C" {
|
|||
void taosRemoveDir(char *rootDir);
|
||||
int taosMkDir(const char *pathname, mode_t mode);
|
||||
void taosRename(char* oldName, char *newName);
|
||||
void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <Winsock2.h>
|
||||
#include <time.h>
|
||||
#include <inttypes.h>
|
||||
#include <conio.h>
|
||||
#include "msvcProcess.h"
|
||||
#include "msvcDirect.h"
|
||||
#include "msvcFcntl.h"
|
||||
|
@ -58,8 +59,6 @@ extern "C" {
|
|||
int32_t BUILDIN_CTZL(uint64_t val);
|
||||
int32_t BUILDIN_CTZ(uint32_t val);
|
||||
|
||||
#define TAOS_OS_FUNC_DIR
|
||||
|
||||
#define TAOS_OS_FUNC_FILE
|
||||
#define TAOS_OS_FUNC_FILE_ISREG
|
||||
#define TAOS_OS_FUNC_FILE_ISDIR
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#include "tglobal.h"
|
||||
#include "tulog.h"
|
||||
|
||||
#ifndef TAOS_OS_FUNC_DIR
|
||||
|
||||
void taosRemoveDir(char *rootDir) {
|
||||
DIR *dir = opendir(rootDir);
|
||||
if (dir == NULL) return;
|
||||
|
@ -51,18 +49,54 @@ int taosMkDir(const char *path, mode_t mode) {
|
|||
}
|
||||
|
||||
void taosRename(char* oldName, char *newName) {
|
||||
if (0 == tsEnableVnodeBak) {
|
||||
uInfo("vnode backup not enabled");
|
||||
return;
|
||||
}
|
||||
|
||||
// if newName in not empty, rename return fail.
|
||||
// the newName must be empty or does not exist
|
||||
if (rename(oldName, newName)) {
|
||||
uError("%s is modify to %s fail, reason:%s", oldName, newName, strerror(errno));
|
||||
uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno));
|
||||
} else {
|
||||
uInfo("%s is modify to %s success!", oldName, newName);
|
||||
uInfo("successfully to rename file %s to %s", oldName, newName);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) {
|
||||
DIR *dir = opendir(rootDir);
|
||||
if (dir == NULL) return;
|
||||
|
||||
int64_t sec = taosGetTimestampSec();
|
||||
struct dirent *de = NULL;
|
||||
|
||||
while ((de = readdir(dir)) != NULL) {
|
||||
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
|
||||
|
||||
char filename[1024];
|
||||
snprintf(filename, 1023, "%s/%s", rootDir, de->d_name);
|
||||
if (de->d_type & DT_DIR) {
|
||||
continue;
|
||||
} else {
|
||||
// struct stat fState;
|
||||
// if (stat(fname, &fState) < 0) {
|
||||
// continue;
|
||||
// }
|
||||
int32_t len = (int32_t)strlen(filename);
|
||||
int64_t fileSec = 0;
|
||||
for (int i = len - 1; i >= 0; i--) {
|
||||
if (filename[i] == '.') {
|
||||
fileSec = atoll(filename + i + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (fileSec <= 100) continue;
|
||||
int32_t days = (int32_t)(ABS(sec - fileSec) / 86400 + 1);
|
||||
if (days > keepDays) {
|
||||
(void)remove(filename);
|
||||
uInfo("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays);
|
||||
} else {
|
||||
uTrace("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
rmdir(rootDir);
|
||||
}
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "tulog.h"
|
||||
|
||||
void taosRemoveDir(char *rootDir) {
|
||||
uError("%s not implemented yet", __FUNCTION__);
|
||||
}
|
||||
|
||||
int taosMkDir(const char *path, mode_t mode) {
|
||||
uError("%s not implemented yet", __FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void taosMvDir(char* destDir, char *srcDir) {
|
||||
uError("%s not implemented yet", __FUNCTION__);
|
||||
}
|
|
@ -58,11 +58,20 @@ char *strsep(char **stringp, const char *delim) {
|
|||
|
||||
char *getpass(const char *prefix) {
|
||||
static char passwd[TSDB_KEY_LEN] = {0};
|
||||
|
||||
memset(passwd, 0, TSDB_KEY_LEN);
|
||||
printf("%s", prefix);
|
||||
scanf("%s", passwd);
|
||||
|
||||
char n = getchar();
|
||||
int32_t index = 0;
|
||||
char ch;
|
||||
while (index < TSDB_KEY_LEN) {
|
||||
ch = getch();
|
||||
if (ch == '\n' || ch == '\r') {
|
||||
break;
|
||||
} else {
|
||||
passwd[index++] = ch;
|
||||
}
|
||||
}
|
||||
|
||||
return passwd;
|
||||
}
|
||||
|
||||
|
|
|
@ -115,6 +115,10 @@ static void *monitorThreadFunc(void *param) {
|
|||
monitorDebug("starting to initialize monitor module ...");
|
||||
|
||||
while (1) {
|
||||
static int32_t accessTimes = 0;
|
||||
accessTimes++;
|
||||
taosMsleep(1000);
|
||||
|
||||
if (tsMonitor.quiting) {
|
||||
tsMonitor.state = MON_STATE_NOT_INIT;
|
||||
monitorInfo("monitor thread will quit, for taosd is quiting");
|
||||
|
@ -127,10 +131,6 @@ static void *monitorThreadFunc(void *param) {
|
|||
continue;
|
||||
}
|
||||
|
||||
static int32_t accessTimes = 0;
|
||||
accessTimes++;
|
||||
taosMsleep(1000);
|
||||
|
||||
if (dnodeGetDnodeId() <= 0) {
|
||||
monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module");
|
||||
continue;
|
||||
|
|
|
@ -723,7 +723,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
|
|||
}
|
||||
|
||||
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
|
||||
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
|
||||
tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid, hashstr);
|
||||
}
|
||||
|
||||
return pConn;
|
||||
|
|
|
@ -35,18 +35,14 @@ int tsSyncTcpThreads = 2;
|
|||
int tsMaxWatchFiles = 500;
|
||||
int tsMaxFwdInfo = 200;
|
||||
int tsSyncTimer = 1;
|
||||
//int sDebugFlag = 135;
|
||||
//char tsArbitrator[TSDB_FQDN_LEN] = {0};
|
||||
|
||||
// module global, not configurable
|
||||
int tsSyncNum; // number of sync in process in whole system
|
||||
char tsNodeFqdn[TSDB_FQDN_LEN];
|
||||
|
||||
static int tsNodeNum; // number of nodes in system
|
||||
static ttpool_h tsTcpPool;
|
||||
static void * syncTmrCtrl = NULL;
|
||||
static void * vgIdHash;
|
||||
static pthread_once_t syncModuleInit = PTHREAD_ONCE_INIT;
|
||||
|
||||
// local functions
|
||||
static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer);
|
||||
|
@ -75,7 +71,7 @@ char* syncRole[] = {
|
|||
"master"
|
||||
};
|
||||
|
||||
static void syncModuleInitFunc() {
|
||||
int32_t syncInit() {
|
||||
SPoolInfo info;
|
||||
|
||||
info.numOfThreads = tsSyncTcpThreads;
|
||||
|
@ -87,25 +83,52 @@ static void syncModuleInitFunc() {
|
|||
info.processIncomingConn = syncProcessIncommingConnection;
|
||||
|
||||
tsTcpPool = taosOpenTcpThreadPool(&info);
|
||||
if (tsTcpPool == NULL) return;
|
||||
if (tsTcpPool == NULL) {
|
||||
sError("failed to init tcpPool");
|
||||
return -1;
|
||||
}
|
||||
|
||||
syncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC");
|
||||
if (syncTmrCtrl == NULL) {
|
||||
sError("failed to init tmrCtrl");
|
||||
taosCloseTcpThreadPool(tsTcpPool);
|
||||
tsTcpPool = NULL;
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true);
|
||||
if (vgIdHash == NULL) {
|
||||
sError("failed to init vgIdHash");
|
||||
taosTmrCleanUp(syncTmrCtrl);
|
||||
taosCloseTcpThreadPool(tsTcpPool);
|
||||
tsTcpPool = NULL;
|
||||
syncTmrCtrl = NULL;
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tstrncpy(tsNodeFqdn, tsLocalFqdn, sizeof(tsNodeFqdn));
|
||||
sInfo("sync module initialized successfully");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void syncCleanUp() {
|
||||
if (tsTcpPool) {
|
||||
taosCloseTcpThreadPool(tsTcpPool);
|
||||
tsTcpPool = NULL;
|
||||
}
|
||||
|
||||
if (syncTmrCtrl) {
|
||||
taosTmrCleanUp(syncTmrCtrl);
|
||||
syncTmrCtrl = NULL;
|
||||
}
|
||||
|
||||
if (vgIdHash) {
|
||||
taosHashCleanup(vgIdHash);
|
||||
vgIdHash = NULL;
|
||||
}
|
||||
|
||||
sInfo("sync module is cleaned up");
|
||||
}
|
||||
|
||||
void *syncStart(const SSyncInfo *pInfo) {
|
||||
|
@ -118,15 +141,6 @@ void *syncStart(const SSyncInfo *pInfo) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pthread_once(&syncModuleInit, syncModuleInitFunc);
|
||||
if (tsTcpPool == NULL) {
|
||||
free(pNode);
|
||||
syncModuleInit = PTHREAD_ONCE_INIT;
|
||||
sError("failed to init sync module(%s)", tstrerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_add_fetch_32(&tsNodeNum, 1);
|
||||
tstrncpy(pNode->path, pInfo->path, sizeof(pNode->path));
|
||||
pthread_mutex_init(&pNode->mutex, NULL);
|
||||
|
||||
|
@ -148,9 +162,10 @@ void *syncStart(const SSyncInfo *pInfo) {
|
|||
for (int i = 0; i < pCfg->replica; ++i) {
|
||||
const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i;
|
||||
pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo);
|
||||
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort))
|
||||
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) {
|
||||
pNode->selfIndex = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (pNode->selfIndex < 0) {
|
||||
sInfo("vgId:%d, this node is not configured", pNode->vgId);
|
||||
|
@ -182,8 +197,9 @@ void *syncStart(const SSyncInfo *pInfo) {
|
|||
syncAddNodeRef(pNode);
|
||||
taosHashPut(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *));
|
||||
|
||||
if (pNode->notifyRole)
|
||||
if (pNode->notifyRole) {
|
||||
(*pNode->notifyRole)(pNode->ahandle, nodeRole);
|
||||
}
|
||||
|
||||
return pNode;
|
||||
}
|
||||
|
@ -218,8 +234,8 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
|
|||
int i, j;
|
||||
|
||||
if (pNode == NULL) return TSDB_CODE_SYN_INVALID_CONFIG;
|
||||
sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole],
|
||||
pNewCfg->replica, pNode->replica);
|
||||
sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], pNewCfg->replica,
|
||||
pNode->replica);
|
||||
|
||||
pthread_mutex_lock(&(pNode->mutex));
|
||||
|
||||
|
@ -252,17 +268,19 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
|
|||
newPeers[i] = pNode->peerInfo[j];
|
||||
}
|
||||
|
||||
if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort))
|
||||
if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort)) {
|
||||
pNode->selfIndex = i;
|
||||
}
|
||||
}
|
||||
|
||||
pNode->replica = pNewCfg->replica;
|
||||
pNode->quorum = pNewCfg->quorum;
|
||||
if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica;
|
||||
memcpy(pNode->peerInfo, newPeers, sizeof(SSyncPeer *) * pNewCfg->replica);
|
||||
|
||||
for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i)
|
||||
for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i) {
|
||||
pNode->peerInfo[i] = NULL;
|
||||
}
|
||||
|
||||
syncAddArbitrator(pNode);
|
||||
|
||||
|
@ -274,7 +292,8 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) {
|
|||
|
||||
pthread_mutex_unlock(&(pNode->mutex));
|
||||
|
||||
sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, syncRole[nodeRole]);
|
||||
sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum,
|
||||
syncRole[nodeRole]);
|
||||
syncBroadcastStatus(pNode);
|
||||
|
||||
return 0;
|
||||
|
@ -423,29 +442,16 @@ static void syncAddArbitrator(SSyncNode *pNode) {
|
|||
pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = syncAddPeer(pNode, &nodeInfo);
|
||||
}
|
||||
|
||||
static void syncAddNodeRef(SSyncNode *pNode)
|
||||
{
|
||||
static void syncAddNodeRef(SSyncNode *pNode) {
|
||||
atomic_add_fetch_8(&pNode->refCount, 1);
|
||||
}
|
||||
|
||||
static void syncDecNodeRef(SSyncNode *pNode)
|
||||
{
|
||||
static void syncDecNodeRef(SSyncNode *pNode) {
|
||||
if (atomic_sub_fetch_8(&pNode->refCount, 1) == 0) {
|
||||
pthread_mutex_destroy(&pNode->mutex);
|
||||
taosTFree(pNode->pRecv);
|
||||
taosTFree(pNode->pSyncFwds);
|
||||
taosTFree(pNode);
|
||||
|
||||
if (atomic_sub_fetch_32(&tsNodeNum, 1) == 0) {
|
||||
if (tsTcpPool) taosCloseTcpThreadPool(tsTcpPool);
|
||||
if (syncTmrCtrl) taosTmrCleanUp(syncTmrCtrl);
|
||||
if (vgIdHash) taosHashCleanup(vgIdHash);
|
||||
syncTmrCtrl = NULL;
|
||||
tsTcpPool = NULL;
|
||||
vgIdHash = NULL;
|
||||
syncModuleInit = PTHREAD_ONCE_INIT;
|
||||
sDebug("sync module is cleaned up");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -506,7 +512,9 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
|||
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
sDebug("%s, start to check peer connection", pPeer->id);
|
||||
taosTmrReset(syncCheckPeerConnection, 100 + (pNode->vgId*10)%100, pPeer, syncTmrCtrl, &pPeer->timer);
|
||||
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
||||
if (pNode->vgId) checkMs = tsStatusInterval * 2000 + 100;
|
||||
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
|
||||
syncAddNodeRef(pNode);
|
||||
|
@ -542,18 +550,20 @@ static void syncChooseMaster(SSyncNode *pNode) {
|
|||
sDebug("vgId:%d, choose master", pNode->vgId);
|
||||
|
||||
for (int i = 0; i < pNode->replica; ++i) {
|
||||
if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE)
|
||||
if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) {
|
||||
onlineNum++;
|
||||
}
|
||||
}
|
||||
|
||||
if (onlineNum == pNode->replica) {
|
||||
// if all peers are online, peer with highest version shall be master
|
||||
index = 0;
|
||||
for (int i = 1; i < pNode->replica; ++i) {
|
||||
if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version)
|
||||
if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) {
|
||||
index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add arbitrator connection
|
||||
SSyncPeer *pArb = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA];
|
||||
|
@ -568,11 +578,12 @@ static void syncChooseMaster(SSyncNode *pNode) {
|
|||
//slave with highest version shall be master
|
||||
pPeer = pNode->peerInfo[i];
|
||||
if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
if (index < 0 || pPeer->version > pNode->peerInfo[index]->version)
|
||||
if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) {
|
||||
index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (index >= 0) {
|
||||
if (index == pNode->selfIndex) {
|
||||
|
@ -595,9 +606,10 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
|
|||
int replica = pNode->replica;
|
||||
|
||||
for (int i = 0; i < pNode->replica; ++i) {
|
||||
if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE)
|
||||
if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) {
|
||||
onlineNum++;
|
||||
}
|
||||
}
|
||||
|
||||
// add arbitrator connection
|
||||
SSyncPeer *pArb = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA];
|
||||
|
@ -661,8 +673,7 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne
|
|||
pNode->peerInfo[pNode->selfIndex]->version = nodeVersion;
|
||||
pPeer->role = newRole;
|
||||
|
||||
sDebug("%s, own role:%s, new peer role:%s", pPeer->id,
|
||||
syncRole[nodeRole], syncRole[pPeer->role]);
|
||||
sDebug("%s, own role:%s, new peer role:%s", pPeer->id, syncRole[nodeRole], syncRole[pPeer->role]);
|
||||
|
||||
SSyncPeer *pMaster = syncCheckMaster(pNode);
|
||||
|
||||
|
@ -699,20 +710,23 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne
|
|||
if (pNode->replica == 2) consistent = 1;
|
||||
}
|
||||
|
||||
if (consistent)
|
||||
if (consistent) {
|
||||
syncChooseMaster(pNode);
|
||||
}
|
||||
}
|
||||
|
||||
if (syncRequired) {
|
||||
syncRecoverFromMaster(pMaster);
|
||||
}
|
||||
|
||||
if (peerOldRole != newRole || nodeRole != selfOldRole)
|
||||
if (peerOldRole != newRole || nodeRole != selfOldRole) {
|
||||
syncBroadcastStatus(pNode);
|
||||
}
|
||||
|
||||
if (nodeRole != TAOS_SYNC_ROLE_MASTER)
|
||||
if (nodeRole != TAOS_SYNC_ROLE_MASTER) {
|
||||
syncResetFlowCtrl(pNode);
|
||||
}
|
||||
}
|
||||
|
||||
static void syncRestartPeer(SSyncPeer *pPeer) {
|
||||
sDebug("%s, restart connection", pPeer->id);
|
||||
|
@ -722,9 +736,10 @@ static void syncRestartPeer(SSyncPeer *pPeer) {
|
|||
pPeer->sstatus = TAOS_SYNC_STATUS_INIT;
|
||||
|
||||
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort))
|
||||
if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
}
|
||||
|
||||
void syncRestartConnection(SSyncPeer *pPeer) {
|
||||
if (pPeer->ip == 0) return;
|
||||
|
@ -797,13 +812,15 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
|
|||
}
|
||||
|
||||
taosTmrStopA(&pPeer->timer);
|
||||
if (tsSyncNum >= tsMaxSyncNum) {
|
||||
|
||||
// Ensure the sync of mnode not interrupted
|
||||
if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) {
|
||||
sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum);
|
||||
taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, syncTmrCtrl, &pPeer->timer);
|
||||
return;
|
||||
}
|
||||
|
||||
sDebug("%s, try to sync", pPeer->id)
|
||||
sDebug("%s, try to sync", pPeer->id);
|
||||
|
||||
SFirstPkt firstPkt;
|
||||
memset(&firstPkt, 0, sizeof(firstPkt));
|
||||
|
@ -820,8 +837,6 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
|
|||
nodeSStatus = TAOS_SYNC_STATUS_START;
|
||||
sInfo("%s, sync-req is sent", pPeer->id);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) {
|
||||
|
@ -875,9 +890,10 @@ static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) {
|
|||
pPeer->version = pPeersStatus->version;
|
||||
syncCheckRole(pPeer, pPeersStatus->peersStatus, pPeersStatus->role);
|
||||
|
||||
if (pPeersStatus->ack)
|
||||
if (pPeersStatus->ack) {
|
||||
syncSendPeersStatusMsgToPeer(pPeer, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) {
|
||||
if (pPeer->peerFd < 0) return -1;
|
||||
|
@ -1063,8 +1079,7 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) {
|
|||
SSyncPeer *pPeer;
|
||||
for (i = 0; i < pNode->replica; ++i) {
|
||||
pPeer = pNode->peerInfo[i];
|
||||
if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port))
|
||||
break;
|
||||
if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port)) break;
|
||||
}
|
||||
|
||||
pPeer = (i < pNode->replica) ? pNode->peerInfo[i] : NULL;
|
||||
|
@ -1089,8 +1104,6 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) {
|
|||
}
|
||||
|
||||
pthread_mutex_unlock(&(pNode->mutex));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void syncProcessBrokenLink(void *param) {
|
||||
|
@ -1121,8 +1134,10 @@ static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) {
|
|||
pSyncFwds->fwds--;
|
||||
}
|
||||
|
||||
if (pSyncFwds->fwds > 0)
|
||||
if (pSyncFwds->fwds > 0) {
|
||||
pSyncFwds->last = (pSyncFwds->last + 1) % tsMaxFwdInfo;
|
||||
}
|
||||
|
||||
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last;
|
||||
pFwdInfo->version = version;
|
||||
pFwdInfo->mhandle = mhandle;
|
||||
|
@ -1157,13 +1172,15 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code
|
|||
|
||||
if (code == 0) {
|
||||
pFwdInfo->acks++;
|
||||
if (pFwdInfo->acks >= pNode->quorum-1)
|
||||
if (pFwdInfo->acks >= pNode->quorum - 1) {
|
||||
confirm = 1;
|
||||
}
|
||||
} else {
|
||||
pFwdInfo->nacks++;
|
||||
if (pFwdInfo->nacks > pNode->replica-pNode->quorum)
|
||||
if (pFwdInfo->nacks > pNode->replica - pNode->quorum) {
|
||||
confirm = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (confirm && pFwdInfo->confirmed == 0) {
|
||||
sDebug("vgId:%d, forward is confirmed, ver:%" PRIu64 " code:%x", pNode->vgId, pFwdInfo->version, pFwdInfo->code);
|
||||
|
|
|
@ -31,6 +31,8 @@ static void tsdbDestroyFile(SFile *pFile);
|
|||
static int compFGroup(const void *arg1, const void *arg2);
|
||||
static int keyFGroupCompFunc(const void *key, const void *fgroup);
|
||||
static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo);
|
||||
static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep);
|
||||
static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days);
|
||||
|
||||
// ---------------- INTERNAL FUNCTIONS ----------------
|
||||
STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) {
|
||||
|
@ -79,9 +81,11 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
int vid = 0;
|
||||
regex_t regex1, regex2;
|
||||
int code = 0;
|
||||
char fname[TSDB_FILENAME_LEN] = "\0";
|
||||
|
||||
SFileGroup fileGroup = {0};
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
|
||||
tDataDir = tsdbGetDataDirName(pRepo->rootDir);
|
||||
if (tDataDir == NULL) {
|
||||
|
@ -108,6 +112,8 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile);
|
||||
|
||||
struct dirent *dp = NULL;
|
||||
while ((dp = readdir(dir)) != NULL) {
|
||||
if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue;
|
||||
|
@ -120,6 +126,14 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (fid < mfid) {
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
tsdbGetDataFileName(pRepo->rootDir, pCfg->tsdbId, fid, type, fname);
|
||||
(void)remove(fname);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue;
|
||||
memset((void *)(&fileGroup), 0, sizeof(SFileGroup));
|
||||
fileGroup.fileId = fid;
|
||||
|
@ -128,12 +142,30 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
} else if (code == REG_NOMATCH) {
|
||||
code = regexec(®ex2, dp->d_name, 0, NULL, 0);
|
||||
if (code == 0) {
|
||||
tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name);
|
||||
char *fname = malloc(strlen(tDataDir) + strlen(dp->d_name) + 2);
|
||||
if (fname == NULL) goto _err;
|
||||
sprintf(fname, "%s/%s", tDataDir, dp->d_name);
|
||||
(void)remove(fname);
|
||||
free(fname);
|
||||
size_t tsize = strlen(tDataDir) + strlen(dp->d_name) + 2;
|
||||
char * fname1 = malloc(tsize);
|
||||
if (fname1 == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
sprintf(fname1, "%s/%s", tDataDir, dp->d_name);
|
||||
|
||||
tsize = tsize + 64;
|
||||
char *fname2 = malloc(tsize);
|
||||
if (fname2 == NULL) {
|
||||
free(fname1);
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
sprintf(fname2, "%s/%s_back_%" PRId64, tDataDir, dp->d_name, taosGetTimestamp(TSDB_TIME_PRECISION_MILLI));
|
||||
|
||||
(void)rename(fname1, fname2);
|
||||
|
||||
tsdbDebug("vgId:%d file %s exists, backup it as %s", REPO_ID(pRepo), fname1, fname2);
|
||||
|
||||
free(fname1);
|
||||
free(fname2);
|
||||
continue;
|
||||
} else if (code == REG_NOMATCH) {
|
||||
tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name);
|
||||
continue;
|
||||
|
@ -146,6 +178,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
|
||||
pFileH->pFGroup[pFileH->nFGroups++] = fileGroup;
|
||||
qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup);
|
||||
tsdbDebug("vgId:%d file group %d is restored, nFGroups %d", REPO_ID(pRepo), fileGroup.fileId, pFileH->nFGroups);
|
||||
}
|
||||
|
||||
regfree(®ex1);
|
||||
|
@ -179,8 +212,18 @@ void tsdbCloseFileH(STsdbRepo *pRepo) {
|
|||
|
||||
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
|
||||
if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL;
|
||||
if (pFileH->nFGroups >= pFileH->maxFGroups) {
|
||||
int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile);
|
||||
if (pFileH->pFGroup[0].fileId < mfid) {
|
||||
pthread_rwlock_wrlock(&pFileH->fhlock);
|
||||
tsdbRemoveFileGroup(pRepo, &(pFileH->pFGroup[0]));
|
||||
pthread_rwlock_unlock(&pFileH->fhlock);
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(pFileH->nFGroups < pFileH->maxFGroups);
|
||||
|
||||
SFileGroup fGroup;
|
||||
SFileGroup *pFGroup = &fGroup;
|
||||
|
@ -342,8 +385,7 @@ void tsdbFitRetention(STsdbRepo *pRepo) {
|
|||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
SFileGroup *pGroup = pFileH->pFGroup;
|
||||
|
||||
int mfid = (int)(TSDB_KEY_FILEID(taosGetTimestamp(pCfg->precision), pCfg->daysPerFile, pCfg->precision) -
|
||||
TSDB_MAX_FILE(pCfg->keep, pCfg->daysPerFile));
|
||||
int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile);
|
||||
|
||||
pthread_rwlock_wrlock(&(pFileH->fhlock));
|
||||
|
||||
|
@ -547,3 +589,11 @@ static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep) {
|
||||
return (TSKEY)(taosGetTimestamp(precision) - keep * tsMsPerDay[precision]);
|
||||
}
|
||||
|
||||
static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) {
|
||||
return (int)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision));
|
||||
}
|
|
@ -767,7 +767,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
int32_t lsize = tsize;
|
||||
int32_t keyLen = 0;
|
||||
for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) {
|
||||
if (tcol >= nColsNotAllNull) break;
|
||||
if (ncol != 0 && tcol >= nColsNotAllNull) break;
|
||||
|
||||
SDataCol *pDataCol = pDataCols->cols + ncol;
|
||||
SCompCol *pCompCol = pCompData->cols + tcol;
|
||||
|
|
|
@ -270,7 +270,7 @@ void taosReadGlobalLogCfg() {
|
|||
}
|
||||
wordfree(&full_path);
|
||||
|
||||
taosReadLogOption("tsLogDir", tsLogDir);
|
||||
taosReadLogOption("logDir", tsLogDir);
|
||||
|
||||
sprintf(fileName, "%s/taos.cfg", configDir);
|
||||
fp = fopen(fileName, "r");
|
||||
|
|
|
@ -62,6 +62,7 @@ typedef struct {
|
|||
pthread_mutex_t logMutex;
|
||||
} SLogObj;
|
||||
|
||||
int32_t tsLogKeepDays = 0;
|
||||
int32_t tsAsyncLog = 1;
|
||||
float tsTotalLogDirGB = 0;
|
||||
float tsAvailLogDirGB = 0;
|
||||
|
@ -78,6 +79,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen
|
|||
static SLogBuff *taosLogBuffNew(int32_t bufSize);
|
||||
static void taosCloseLogByFd(int32_t oldFd);
|
||||
static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum);
|
||||
extern void taosPrintGlobalCfg();
|
||||
|
||||
static int32_t taosStartLog() {
|
||||
pthread_attr_t threadAttr;
|
||||
|
@ -136,11 +138,24 @@ static void taosUnLockFile(int32_t fd) {
|
|||
}
|
||||
}
|
||||
|
||||
static void taosKeepOldLog(char *oldName) {
|
||||
if (tsLogKeepDays <= 0) return;
|
||||
|
||||
int64_t fileSec = taosGetTimestampSec();
|
||||
char fileName[LOG_FILE_NAME_LEN + 20];
|
||||
snprintf(fileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64, tsLogObj.logName, fileSec);
|
||||
|
||||
taosRename(oldName, fileName);
|
||||
taosRemoveOldLogFiles(tsLogDir, tsLogKeepDays);
|
||||
}
|
||||
|
||||
static void *taosThreadToOpenNewFile(void *param) {
|
||||
char name[LOG_FILE_NAME_LEN + 20];
|
||||
char keepName[LOG_FILE_NAME_LEN + 20];
|
||||
sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag);
|
||||
|
||||
tsLogObj.flag ^= 1;
|
||||
tsLogObj.lines = 0;
|
||||
char name[LOG_FILE_NAME_LEN + 20];
|
||||
sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag);
|
||||
|
||||
umask(0);
|
||||
|
@ -150,6 +165,7 @@ static void *taosThreadToOpenNewFile(void *param) {
|
|||
uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
taosLockFile(fd);
|
||||
(void)lseek(fd, 0, SEEK_SET);
|
||||
|
||||
|
@ -157,9 +173,13 @@ static void *taosThreadToOpenNewFile(void *param) {
|
|||
tsLogObj.logHandle->fd = fd;
|
||||
tsLogObj.lines = 0;
|
||||
tsLogObj.openInProgress = 0;
|
||||
uInfo("new log file is opened!!!");
|
||||
|
||||
taosCloseLogByFd(oldFd);
|
||||
|
||||
uInfo(" new log file:%d is opened", tsLogObj.flag);
|
||||
uInfo("==================================");
|
||||
taosPrintGlobalCfg();
|
||||
taosKeepOldLog(keepName);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -264,21 +284,24 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
|
|||
strcat(name, ".0");
|
||||
}
|
||||
|
||||
// if none of the log files exist, open 0, if both exists, open the old one
|
||||
if (stat(name, &logstat0) < 0) {
|
||||
tsLogObj.flag = 0;
|
||||
} else {
|
||||
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
|
||||
strcpy(name, fn);
|
||||
strcat(name, ".1");
|
||||
}
|
||||
|
||||
if (stat(name, &logstat1) < 0) {
|
||||
bool log0Exist = stat(name, &logstat0) >= 0;
|
||||
bool log1Exist = stat(name, &logstat1) >= 0;
|
||||
|
||||
// if none of the log files exist, open 0, if both exists, open the old one
|
||||
if (!log0Exist && !log1Exist) {
|
||||
tsLogObj.flag = 0;
|
||||
} else if (!log1Exist) {
|
||||
tsLogObj.flag = 0;
|
||||
} else if (!log0Exist) {
|
||||
tsLogObj.flag = 1;
|
||||
} else {
|
||||
tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
char fileName[LOG_FILE_NAME_LEN + 50] = "\0";
|
||||
sprintf(fileName, "%s.%d", tsLogObj.logName, tsLogObj.flag);
|
||||
|
|
|
@ -57,6 +57,9 @@ void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {}
|
|||
#endif
|
||||
|
||||
int32_t vnodeInitResources() {
|
||||
int code = syncInit();
|
||||
if (code != 0) return code;
|
||||
|
||||
vnodeInitWriteFp();
|
||||
vnodeInitReadFp();
|
||||
|
||||
|
@ -70,11 +73,12 @@ int32_t vnodeInitResources() {
|
|||
}
|
||||
|
||||
void vnodeCleanupResources() {
|
||||
|
||||
if (tsDnodeVnodesHash != NULL) {
|
||||
taosHashCleanup(tsDnodeVnodesHash);
|
||||
tsDnodeVnodesHash = NULL;
|
||||
}
|
||||
|
||||
syncCleanUp();
|
||||
}
|
||||
|
||||
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
||||
|
@ -382,7 +386,13 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
char newDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
|
||||
sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId);
|
||||
|
||||
if (0 == tsEnableVnodeBak) {
|
||||
vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId);
|
||||
} else {
|
||||
taosRename(rootDir, newDir);
|
||||
}
|
||||
|
||||
taosRemoveDir(rootDir);
|
||||
dnodeSendStatusMsgToMnode();
|
||||
}
|
||||
|
@ -671,9 +681,13 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
|
||||
|
||||
vInfo("vgId:%d, save vnode cfg, replica:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.replications);
|
||||
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId);
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp);
|
||||
vInfo("vgId:%d, save vnode cfg, nodeId:%d nodeEp:%s", pVnodeCfg->cfg.vgId, pVnodeCfg->nodes[i].nodeId,
|
||||
pVnodeCfg->nodes[i].nodeEp);
|
||||
|
||||
if (i < pVnodeCfg->cfg.replications - 1) {
|
||||
len += snprintf(content + len, maxLen - len, " },{\n");
|
||||
|
|
|
@ -98,6 +98,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void** handle,
|
|||
pRsp->completed = true;
|
||||
|
||||
pRet->rsp = pRsp;
|
||||
pRet->len = sizeof(SRetrieveTableRsp);
|
||||
*freeHandle = true;
|
||||
}
|
||||
|
||||
|
@ -270,6 +271,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
//TODO handle malloc failure
|
||||
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
|
||||
pRet->len = sizeof(SRetrieveTableRsp);
|
||||
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
|
||||
freeHandle = true;
|
||||
} else { // result is not ready, return immediately
|
||||
|
|
|
@ -22,26 +22,32 @@
|
|||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-context</artifactId>
|
||||
<version>4.3.2.RELEASE</version>
|
||||
<version>5.2.8.RELEASE</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-jdbc</artifactId>
|
||||
<version>4.3.2.RELEASE</version>
|
||||
<version>5.1.9.RELEASE</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-test</artifactId>
|
||||
<version>5.1.9.RELEASE</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<version>4.13</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.2</version>
|
||||
<version>2.0.4</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
@ -63,7 +69,7 @@
|
|||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>com.taosdata.jdbc.App</mainClass>
|
||||
<mainClass>com.taosdata.jdbc.example.jdbcTemplate.App</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
<descriptorRefs>
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.support.ClassPathXmlApplicationContext;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class App {
|
||||
|
||||
public static void main( String[] args ) {
|
||||
|
||||
ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml");
|
||||
|
||||
JdbcTemplate jdbcTemplate = (JdbcTemplate) ctx.getBean("jdbcTemplate");
|
||||
|
||||
// create database
|
||||
jdbcTemplate.execute("create database if not exists db ");
|
||||
|
||||
// create table
|
||||
jdbcTemplate.execute("create table if not exists db.tb (ts timestamp, temperature int, humidity float)");
|
||||
|
||||
String insertSql = "insert into db.tb values(now, 23, 10.3) (now + 1s, 20, 9.3)";
|
||||
|
||||
// insert rows
|
||||
int affectedRows = jdbcTemplate.update(insertSql);
|
||||
|
||||
System.out.println("insert success " + affectedRows + " rows.");
|
||||
|
||||
// query for list
|
||||
List<Map<String, Object>> resultList = jdbcTemplate.queryForList("select * from db.tb");
|
||||
|
||||
if(!CollectionUtils.isEmpty(resultList)){
|
||||
for (Map<String, Object> row : resultList){
|
||||
System.out.printf("%s, %d, %s\n", row.get("ts"), row.get("temperature"), row.get("humidity"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement;
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao;
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.support.ClassPathXmlApplicationContext;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
public class App {
|
||||
|
||||
private static Random random = new Random(System.currentTimeMillis());
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml");
|
||||
|
||||
ExecuteAsStatement executor = ctx.getBean(ExecuteAsStatement.class);
|
||||
// drop database
|
||||
executor.doExecute("drop database if exists test");
|
||||
// create database
|
||||
executor.doExecute("create database if not exists test");
|
||||
//use database
|
||||
executor.doExecute("use test");
|
||||
// create table
|
||||
executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)");
|
||||
|
||||
WeatherDao weatherDao = ctx.getBean(WeatherDao.class);
|
||||
Weather weather = new Weather(new Timestamp(new Date().getTime()), random.nextFloat() * 50.0f, random.nextInt(100));
|
||||
// insert rows
|
||||
int affectedRows = weatherDao.add(weather);
|
||||
System.out.println("insert success " + affectedRows + " rows.");
|
||||
|
||||
// query for list
|
||||
int limit = 10, offset = 0;
|
||||
List<Weather> weatherList = weatherDao.queryForList(limit, offset);
|
||||
for (Weather w : weatherList) {
|
||||
System.out.println(w);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate.dao;
|
||||
|
||||
public interface ExecuteAsStatement{
|
||||
|
||||
void doExecute(String sql);
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate.dao;
|
||||
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface WeatherDao {
|
||||
|
||||
|
||||
int add(Weather weather);
|
||||
|
||||
int[] batchInsert(List<Weather> weatherList);
|
||||
|
||||
List<Weather> queryForList(int limit, int offset);
|
||||
|
||||
int count();
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate.dao.impl;
|
||||
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
|
||||
@Repository
|
||||
public class ExecuteAsStatementImpl implements ExecuteAsStatement {
|
||||
|
||||
@Autowired
|
||||
private JdbcTemplate jdbcTemplate;
|
||||
|
||||
@Override
|
||||
public void doExecute(String sql) {
|
||||
jdbcTemplate.execute(sql);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate.dao.impl;
|
||||
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao;
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.jdbc.core.namedparam.SqlParameterSourceUtils;
|
||||
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Repository
|
||||
public class WeatherDaoImpl implements WeatherDao {
|
||||
|
||||
@Autowired
|
||||
private JdbcTemplate jdbcTemplate;
|
||||
|
||||
@Override
|
||||
public int add(Weather weather) {
|
||||
return jdbcTemplate.update(
|
||||
"insert into test.weather(ts, temperature, humidity) VALUES(?,?,?)",
|
||||
weather.getTs(), weather.getTemperature(), weather.getHumidity()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int[] batchInsert(List<Weather> weatherList) {
|
||||
return jdbcTemplate.batchUpdate("insert into test.weather(ts, temperature, humidity) values( ?, ?, ?)", new BatchPreparedStatementSetter() {
|
||||
@Override
|
||||
public void setValues(PreparedStatement ps, int i) throws SQLException {
|
||||
ps.setTimestamp(1, weatherList.get(i).getTs());
|
||||
ps.setFloat(2, weatherList.get(i).getTemperature());
|
||||
ps.setInt(3, weatherList.get(i).getHumidity());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getBatchSize() {
|
||||
return weatherList.size();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Weather> queryForList(int limit, int offset) {
|
||||
return jdbcTemplate.query("select * from test.weather limit ? offset ?", (rs, rowNum) -> {
|
||||
Timestamp ts = rs.getTimestamp("ts");
|
||||
float temperature = rs.getFloat("temperature");
|
||||
int humidity = rs.getInt("humidity");
|
||||
return new Weather(ts, temperature, humidity);
|
||||
}, limit, offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int count() {
|
||||
return jdbcTemplate.queryForObject("select count(*) from test.weather", Integer.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate.domain;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
public class Weather {
|
||||
|
||||
private Timestamp ts;
|
||||
private float temperature;
|
||||
private int humidity;
|
||||
|
||||
public Weather() {
|
||||
}
|
||||
|
||||
public Weather(Timestamp ts, float temperature, int humidity) {
|
||||
this.ts = ts;
|
||||
this.temperature = temperature;
|
||||
this.humidity = humidity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Weather{" +
|
||||
"ts=" + ts +
|
||||
", temperature=" + temperature +
|
||||
", humidity=" + humidity +
|
||||
'}';
|
||||
}
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public float getTemperature() {
|
||||
return temperature;
|
||||
}
|
||||
|
||||
public void setTemperature(float temperature) {
|
||||
this.temperature = temperature;
|
||||
}
|
||||
|
||||
public int getHumidity() {
|
||||
return humidity;
|
||||
}
|
||||
|
||||
public void setHumidity(int humidity) {
|
||||
this.humidity = humidity;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -5,13 +5,12 @@
|
|||
xsi:schemaLocation="
|
||||
http://www.springframework.org/schema/beans
|
||||
http://www.springframework.org/schema/beans/spring-beans.xsd
|
||||
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd
|
||||
"
|
||||
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd"
|
||||
default-autowire="byName">
|
||||
|
||||
<bean id="dataSource" class="org.springframework.jdbc.datasource.DriverManagerDataSource">
|
||||
<property name="driverClassName" value="com.taosdata.jdbc.TSDBDriver"></property>
|
||||
<property name="url" value="jdbc:TAOS://127.0.0.1:6030/log"></property>
|
||||
<property name="url" value="jdbc:TAOS://192.168.236.137:6030/"></property>
|
||||
<property name="username" value="root"></property>
|
||||
<property name="password" value="taosdata"></property>
|
||||
</bean>
|
||||
|
@ -21,4 +20,6 @@
|
|||
<property name="dataSource" ref="dataSource"></property>
|
||||
</bean>
|
||||
|
||||
<context:component-scan base-package="com.taosdata.jdbc.example.jdbcTemplate"/>
|
||||
|
||||
</beans>
|
||||
|
|
|
@ -7,14 +7,12 @@ import org.junit.Test;
|
|||
/**
|
||||
* Unit test for simple App.
|
||||
*/
|
||||
public class AppTest
|
||||
{
|
||||
public class AppTest {
|
||||
/**
|
||||
* Rigorous Test :-)
|
||||
*/
|
||||
@Test
|
||||
public void shouldAnswerWithTrue()
|
||||
{
|
||||
public void shouldAnswerWithTrue() {
|
||||
assertTrue(true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
package com.taosdata.jdbc.example.jdbcTemplate;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement;
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao;
|
||||
import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@ContextConfiguration({"classpath:applicationContext.xml"})
|
||||
public class BatcherInsertTest {
|
||||
|
||||
|
||||
@Autowired
|
||||
private WeatherDao weatherDao;
|
||||
@Autowired
|
||||
private ExecuteAsStatement executor;
|
||||
|
||||
private static final int numOfRecordsPerTable = 1000;
|
||||
private static long ts = 1496732686000l;
|
||||
private static Random random = new Random(System.currentTimeMillis());
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
// drop database
|
||||
executor.doExecute("drop database if exists test");
|
||||
// create database
|
||||
executor.doExecute("create database if not exists test");
|
||||
//use database
|
||||
executor.doExecute("use test");
|
||||
// create table
|
||||
executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void batchInsert() {
|
||||
List<Weather> weatherList = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRecordsPerTable; i++) {
|
||||
ts += 1000;
|
||||
Weather weather = new Weather(new Timestamp(ts), random.nextFloat() * 50.0f, random.nextInt(100));
|
||||
weatherList.add(weather);
|
||||
}
|
||||
long start = System.currentTimeMillis();
|
||||
weatherDao.batchInsert(weatherList);
|
||||
long end = System.currentTimeMillis();
|
||||
System.out.println("batch insert(" + numOfRecordsPerTable + " rows) time cost ==========> " + (end - start) + " ms");
|
||||
|
||||
int count = weatherDao.count();
|
||||
assertEquals(count, numOfRecordsPerTable);
|
||||
}
|
||||
|
||||
}
|
|
@ -63,7 +63,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.2</version>
|
||||
<version>2.0.4</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -76,6 +76,24 @@
|
|||
</dependencies>
|
||||
|
||||
<build>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/main/resources</directory>
|
||||
<includes>
|
||||
<include>**/*.properties</include>
|
||||
<include>**/*.xml</include>
|
||||
</includes>
|
||||
<filtering>true</filtering>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>src/main/java</directory>
|
||||
<includes>
|
||||
<include>**/*.properties</include>
|
||||
<include>**/*.xml</include>
|
||||
</includes>
|
||||
</resource>
|
||||
|
||||
</resources>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package com.taosdata.jdbc.springbootdemo.controller;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.springbootdemo.domain.Rainfall;
|
||||
import com.taosdata.jdbc.springbootdemo.service.RainStationService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/rainstation")
|
||||
public class RainStationController {
|
||||
|
||||
@Autowired
|
||||
private RainStationService service;
|
||||
|
||||
@GetMapping("/init")
|
||||
public boolean init() {
|
||||
service.init();
|
||||
service.createTable();
|
||||
return true;
|
||||
}
|
||||
|
||||
@PostMapping("/insert")
|
||||
public int insert(@RequestBody Rainfall rainfall){
|
||||
return service.insert(rainfall);
|
||||
}
|
||||
|
||||
}
|
|
@ -16,6 +16,7 @@ public class WeatherController {
|
|||
|
||||
/**
|
||||
* create database and table
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
@GetMapping("/init")
|
||||
|
@ -25,6 +26,7 @@ public class WeatherController {
|
|||
|
||||
/**
|
||||
* Pagination Query
|
||||
*
|
||||
* @param limit
|
||||
* @param offset
|
||||
* @return
|
||||
|
@ -36,6 +38,7 @@ public class WeatherController {
|
|||
|
||||
/**
|
||||
* upload single weather info
|
||||
*
|
||||
* @param temperature
|
||||
* @param humidity
|
||||
* @return
|
||||
|
@ -48,6 +51,7 @@ public class WeatherController {
|
|||
|
||||
/**
|
||||
* upload multi weather info
|
||||
*
|
||||
* @param weatherList
|
||||
* @return
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package com.taosdata.jdbc.springbootdemo.dao;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public interface DatabaseMapper {
|
||||
|
||||
int createDatabase(String dbname);
|
||||
|
||||
int dropDatabase(String dbname);
|
||||
|
||||
int creatDatabaseWithParameters(Map<String,String> map);
|
||||
|
||||
int useDatabase(String dbname);
|
||||
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
|
||||
|
||||
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper">
|
||||
|
||||
<update id="createDatabase" parameterType="java.lang.String">
|
||||
create database if not exists ${dbname}
|
||||
</update>
|
||||
|
||||
<update id="dropDatabase" parameterType="java.lang.String">
|
||||
DROP database if exists ${dbname}
|
||||
</update>
|
||||
|
||||
|
||||
<update id="creatDatabaseWithParameters" parameterType="map">
|
||||
CREATE database if not EXISTS ${dbname}
|
||||
<if test="keep != null">
|
||||
KEEP ${keep}
|
||||
</if>
|
||||
<if test="days != null">
|
||||
DAYS ${days}
|
||||
</if>
|
||||
<if test="replica != null">
|
||||
REPLICA ${replica}
|
||||
</if>
|
||||
<if test="cache != null">
|
||||
cache ${cache}
|
||||
</if>
|
||||
<if test="blocks != null">
|
||||
blocks ${blocks}
|
||||
</if>
|
||||
<if test="minrows != null">
|
||||
minrows ${minrows}
|
||||
</if>
|
||||
<if test="maxrows != null">
|
||||
maxrows ${maxrows}
|
||||
</if>
|
||||
</update>
|
||||
|
||||
<update id="useDatabase" parameterType="java.lang.String">
|
||||
use ${dbname}
|
||||
</update>
|
||||
|
||||
</mapper>
|
|
@ -0,0 +1,9 @@
|
|||
package com.taosdata.jdbc.springbootdemo.dao;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public interface RainfallMapper {
|
||||
|
||||
|
||||
int save(Map<String, Object> map);
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
|
||||
|
||||
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.RainfallMapper">
|
||||
|
||||
<insert id="save" parameterType="map">
|
||||
INSERT INTO ${table} using ${dbname}.${stable} tags(#{values.station_code}, #{values.station_name}) (ts, name, code, rainfall) values (#{values.ts}, #{values.name}, #{values.code}, #{values.rainfall})
|
||||
</insert>
|
||||
|
||||
|
||||
</mapper>
|
|
@ -0,0 +1,8 @@
|
|||
package com.taosdata.jdbc.springbootdemo.dao;
|
||||
|
||||
import com.taosdata.jdbc.springbootdemo.domain.TableMetadata;
|
||||
|
||||
public interface TableMapper {
|
||||
|
||||
boolean createSTable(TableMetadata tableMetadata);
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
|
||||
|
||||
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.TableMapper">
|
||||
|
||||
<update id="createSTable" parameterType="com.taosdata.jdbc.springbootdemo.domain.TableMetadata">
|
||||
create table if not exists ${dbname}.${tablename}
|
||||
<foreach collection="fields" item="field" index="index" open="(" close=")" separator=",">
|
||||
${field.name} ${field.type}
|
||||
</foreach>
|
||||
TAGS
|
||||
<foreach collection="tags" item="tag" index="index" open="(" close=")" separator=",">
|
||||
${tag.name} ${tag.type}
|
||||
</foreach>
|
||||
</update>
|
||||
|
||||
<update id="dropTable" parameterType="java.lang.String">
|
||||
drop ${tablename}
|
||||
</update>
|
||||
|
||||
</mapper>
|
|
@ -0,0 +1,28 @@
|
|||
package com.taosdata.jdbc.springbootdemo.domain;
|
||||
|
||||
public class FieldMetadata {
|
||||
|
||||
private String name;
|
||||
private String type;
|
||||
|
||||
public FieldMetadata(String name, String type) {
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package com.taosdata.jdbc.springbootdemo.domain;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonFormat;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
public class Rainfall {
|
||||
|
||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
|
||||
private Timestamp ts;
|
||||
private String name;
|
||||
private String code;
|
||||
private float rainfall;
|
||||
private String station_code;
|
||||
private String station_name;
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(String code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public float getRainfall() {
|
||||
return rainfall;
|
||||
}
|
||||
|
||||
public void setRainfall(float rainfall) {
|
||||
this.rainfall = rainfall;
|
||||
}
|
||||
|
||||
public String getStation_code() {
|
||||
return station_code;
|
||||
}
|
||||
|
||||
public void setStation_code(String station_code) {
|
||||
this.station_code = station_code;
|
||||
}
|
||||
|
||||
public String getStation_name() {
|
||||
return station_name;
|
||||
}
|
||||
|
||||
public void setStation_name(String station_name) {
|
||||
this.station_name = station_name;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package com.taosdata.jdbc.springbootdemo.domain;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TableMetadata {
|
||||
|
||||
private String dbname;
|
||||
private String tablename;
|
||||
private List<FieldMetadata> fields;
|
||||
private List<TagMetadata> tags;
|
||||
|
||||
public String getDbname() {
|
||||
return dbname;
|
||||
}
|
||||
|
||||
public void setDbname(String dbname) {
|
||||
this.dbname = dbname;
|
||||
}
|
||||
|
||||
public String getTablename() {
|
||||
return tablename;
|
||||
}
|
||||
|
||||
public void setTablename(String tablename) {
|
||||
this.tablename = tablename;
|
||||
}
|
||||
|
||||
public List<FieldMetadata> getFields() {
|
||||
return fields;
|
||||
}
|
||||
|
||||
public void setFields(List<FieldMetadata> fields) {
|
||||
this.fields = fields;
|
||||
}
|
||||
|
||||
public List<TagMetadata> getTags() {
|
||||
return tags;
|
||||
}
|
||||
|
||||
public void setTags(List<TagMetadata> tags) {
|
||||
this.tags = tags;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package com.taosdata.jdbc.springbootdemo.domain;
|
||||
|
||||
public class TagMetadata {
|
||||
private String name;
|
||||
private String type;
|
||||
|
||||
public TagMetadata(String name, String type) {
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
}
|
|
@ -1,9 +1,12 @@
|
|||
package com.taosdata.jdbc.springbootdemo.domain;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonFormat;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
public class Weather {
|
||||
|
||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
|
||||
private Timestamp ts;
|
||||
|
||||
private int temperature;
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
package com.taosdata.jdbc.springbootdemo.service;
|
||||
|
||||
import com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper;
|
||||
import com.taosdata.jdbc.springbootdemo.dao.RainfallMapper;
|
||||
import com.taosdata.jdbc.springbootdemo.dao.TableMapper;
|
||||
import com.taosdata.jdbc.springbootdemo.domain.FieldMetadata;
|
||||
import com.taosdata.jdbc.springbootdemo.domain.Rainfall;
|
||||
import com.taosdata.jdbc.springbootdemo.domain.TableMetadata;
|
||||
import com.taosdata.jdbc.springbootdemo.domain.TagMetadata;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Service
|
||||
public class RainStationService {
|
||||
|
||||
@Autowired
|
||||
private DatabaseMapper databaseMapper;
|
||||
@Autowired
|
||||
private TableMapper tableMapper;
|
||||
@Autowired
|
||||
private RainfallMapper rainfallMapper;
|
||||
|
||||
public boolean init() {
|
||||
databaseMapper.dropDatabase("rainstation");
|
||||
|
||||
Map<String, String> map = new HashMap<>();
|
||||
map.put("dbname", "rainstation");
|
||||
map.put("keep", "36500");
|
||||
map.put("days", "30");
|
||||
map.put("blocks", "4");
|
||||
databaseMapper.creatDatabaseWithParameters(map);
|
||||
|
||||
databaseMapper.useDatabase("rainstation");
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean createTable() {
|
||||
TableMetadata tableMetadata = new TableMetadata();
|
||||
tableMetadata.setDbname("rainstation");
|
||||
tableMetadata.setTablename("monitoring");
|
||||
|
||||
List<FieldMetadata> fields = new ArrayList<>();
|
||||
fields.add(new FieldMetadata("ts", "timestamp"));
|
||||
fields.add(new FieldMetadata("name", "NCHAR(10)"));
|
||||
fields.add(new FieldMetadata("code", " BINARY(8)"));
|
||||
fields.add(new FieldMetadata("rainfall", "float"));
|
||||
tableMetadata.setFields(fields);
|
||||
|
||||
List<TagMetadata> tags = new ArrayList<>();
|
||||
tags.add(new TagMetadata("station_code", "BINARY(8)"));
|
||||
tags.add(new TagMetadata("station_name", "NCHAR(10)"));
|
||||
tableMetadata.setTags(tags);
|
||||
|
||||
tableMapper.createSTable(tableMetadata);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
public int insert(Rainfall rainfall) {
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put("dbname", "rainstation");
|
||||
map.put("table", "S_53646");
|
||||
map.put("stable", "monitoring");
|
||||
map.put("values", rainfall);
|
||||
return rainfallMapper.save(map);
|
||||
}
|
||||
}
|
|
@ -14,10 +14,8 @@ public class WeatherService {
|
|||
private WeatherMapper weatherMapper;
|
||||
|
||||
public boolean init() {
|
||||
|
||||
weatherMapper.createDB();
|
||||
weatherMapper.createTable();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# datasource config
|
||||
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
|
||||
spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log
|
||||
spring.datasource.url=jdbc:TAOS://localhost:6030/log
|
||||
spring.datasource.username=root
|
||||
spring.datasource.password=taosdata
|
||||
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('create table xcxlog (ts timestamp, user_id int, path BINARY(40),scene int) tags(appid bigint, adzone_id int,ip bigint,session_id bigint)')
|
||||
tdSql.error("insert into d1000004(user_id,path,scene,ts) using xcxlog tags(1000004,145,97160) values (97160,'pagex/goods/taoke',1086,now)")
|
||||
tdSql.execute("insert into d1000004_145(user_id,path,scene,ts) using xcxlog(appid,adzone_id,session_id,ip) tags(1000004,145,97160,1717171445) values (97160,'pagex/goods/taoke',1086,now)")
|
||||
|
||||
tdSql.query("show tables")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'd1000004_145')
|
||||
|
||||
tdSql.query("select * from xcxlog")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 4, 1000004)
|
||||
tdSql.checkData(0, 5, 145)
|
||||
tdSql.checkData(0, 6, 1717171445)
|
||||
tdSql.checkData(0, 7, 97160)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -25,7 +25,7 @@ class TDTestCase:
|
|||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.numOfRecords = 10
|
||||
self.types = ["tinyint", "smallint", "int", "bigint", "float", "double", "bool", "binary(10)", "nchar(10)"]
|
||||
self.ts = 1537146000000
|
||||
|
||||
def checkNullValue(self, result):
|
||||
|
@ -38,139 +38,41 @@ class TDTestCase:
|
|||
return False
|
||||
return True
|
||||
|
||||
def restartTaosd(self):
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
tdSql.execute("use db")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
print("==============step1")
|
||||
for i in range(len(self.types)):
|
||||
print("======== checking type %s ==========" % self.types[i])
|
||||
tdSql.execute("create table t0 (ts timestamp, col %s)" % self.types[i])
|
||||
tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts))
|
||||
|
||||
tdSql.execute(
|
||||
"create table meters (ts timestamp, col1 int) tags(tgcol1 int)")
|
||||
tdSql.execute("create table t0 using meters tags(NULL)")
|
||||
|
||||
for i in range (self.numOfRecords):
|
||||
tdSql.execute("insert into t0 values (%d, %d)" % (self.ts + i, i));
|
||||
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col2 tinyint")
|
||||
tdSql.execute("alter table meters drop column col1")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col2 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col1 int")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col1 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col3 smallint")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col3 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col4 bigint")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col4 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col5 float")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col5 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col6 double")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col6 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col7 bool")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col7 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col8 binary(20)")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col8 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add column col9 nchar(20)")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select col9 from meters")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol2 tinyint")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol2 from meters")
|
||||
tdDnodes.stop(1)
|
||||
tdLog.sleep(10)
|
||||
tdDnodes.start(1)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select * from t0")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol3 smallint")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol3 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol4 bigint")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol4 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol5 float")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol5 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol6 double")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol6 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol7 bool")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol7 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol8 binary(20)")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol8 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.execute("alter table meters add tag tgcol9 nchar(20)")
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select tgcol9 from meters")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
self.restartTaosd()
|
||||
tdSql.query("select * from meters")
|
||||
tdSql.checkRows(10)
|
||||
if self.checkNullValue(tdSql.queryResult) is False:
|
||||
tdLog.exit("non None value is detected")
|
||||
tdLog.exit("no None value is detected")
|
||||
|
||||
tdSql.execute("create table t1 (ts timestamp, col %s)" % self.types[i])
|
||||
tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts))
|
||||
tdDnodes.stop(1)
|
||||
tdLog.sleep(10)
|
||||
tdDnodes.start(1)
|
||||
tdSql.execute("use db")
|
||||
|
||||
for j in range(150):
|
||||
tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts + j + 1));
|
||||
|
||||
tdSql.query("select * from t1")
|
||||
tdSql.checkRows(151)
|
||||
|
||||
if self.checkNullValue(tdSql.queryResult) is False:
|
||||
tdLog.exit("no None value is detected")
|
||||
|
||||
print("======== None value check for type %s is OK ==========" % self.types[i])
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -52,6 +52,12 @@ if $data00 != $rowNum then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select count(1) from $tb
|
||||
print ===> select count(1) from $tb => $data00
|
||||
if $data00 != $rowNum then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select count(tbcol) from $tb
|
||||
print ===> $data00
|
||||
if $data00 != $rowNum then
|
||||
|
@ -102,13 +108,20 @@ if $data00 != $totalNum then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print =============== step8
|
||||
sql select count(1) from $mt
|
||||
print ===> $data00
|
||||
if $data00 != $totalNum then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select count(tbcol) from $mt
|
||||
print ===> $data00
|
||||
if $data00 != $totalNum then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step8
|
||||
print =============== step10
|
||||
sql select count(tbcol) as c from $mt where ts < now + 4m
|
||||
print ===> $data00
|
||||
if $data00 != 50 then
|
||||
|
|
|
@ -7,11 +7,10 @@ system sh/exec.sh -n dnode1 -s start
|
|||
|
||||
sleep 3000
|
||||
sql connect
|
||||
print ============================ dnode1 start
|
||||
|
||||
print ============= create database
|
||||
sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
|
||||
if $data00 != db then
|
||||
return -1
|
||||
endi
|
||||
|
@ -37,27 +36,240 @@ if $data09 != 4 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print =============== step2
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
return
|
||||
sql_error alter database db cache 256
|
||||
sql_error alter database db blocks 1
|
||||
sql_error alter database db days 10
|
||||
sql_error alter database db keep 10
|
||||
sql_error alter database db minRows 350
|
||||
sql_error alter database db minRows 550
|
||||
sql_error alter database db ctime 5000
|
||||
sql_error alter database db precision "us"
|
||||
sql_error alter database db comp 3
|
||||
sql_error alter database db wal 1
|
||||
print ============== step name
|
||||
sql_error alter database db name d1
|
||||
sql_error alter database db name d2
|
||||
|
||||
print ============== step ntables
|
||||
sql_error alter database db ntables -1
|
||||
sql_error alter database db ntables 0
|
||||
sql_error alter database db ntables 1
|
||||
sql_error alter database db ntables 10
|
||||
|
||||
print ============== step vgroups
|
||||
sql_error alter database db vgroups -1
|
||||
sql_error alter database db vgroups 0
|
||||
sql_error alter database db vgroups 1
|
||||
sql_error alter database db vgroups 10
|
||||
|
||||
print ============== step replica
|
||||
sql_error alter database db replica 2
|
||||
sql_error alter database db replica 3
|
||||
sql_error alter database db replica 0
|
||||
|
||||
sql alter database db replica 1
|
||||
sql show databases
|
||||
print replica $data4_db
|
||||
if $data4_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ============== step quorum
|
||||
sql show databases
|
||||
print quorum $data5_db
|
||||
if $data5_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db quorum 1
|
||||
sql show databases
|
||||
print quorum $data5_db
|
||||
if $data5_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db quorum 2
|
||||
sql show databases
|
||||
print quorum $data5_db
|
||||
if $data5_db != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db quorum 3
|
||||
sql show databases
|
||||
print quorum $data5_db
|
||||
if $data5_db != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db quorum 3
|
||||
sql alter database db quorum 2
|
||||
sql alter database db quorum 1
|
||||
sql_error alter database db quorum 0
|
||||
sql_error alter database db quorum 4
|
||||
sql_error alter database db quorum 5
|
||||
sql_error alter database db quorum -1
|
||||
|
||||
print ============== step days
|
||||
sql_error alter database db days 0
|
||||
sql_error alter database db days 1
|
||||
sql_error alter database db days 2
|
||||
sql_error alter database db days 10
|
||||
sql_error alter database db days 50
|
||||
sql_error alter database db days 100
|
||||
|
||||
print ============== step keep
|
||||
sql show databases
|
||||
print keep $data7_db
|
||||
if $data7_db != 20,20,20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db keep 10
|
||||
sql show databases
|
||||
print keep $data7_db
|
||||
if $data7_db != 20,20,10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db keep 20
|
||||
sql show databases
|
||||
print keep $data7_db
|
||||
if $data7_db != 20,20,20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ============== step3
|
||||
sql alter database db comp 1
|
||||
sql alter database db blocks 40
|
||||
sql alter database db keep 30
|
||||
sql show databases
|
||||
print keep $data7_db
|
||||
if $data7_db != 20,20,30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db keep 40
|
||||
sql alter database db keep 30
|
||||
sql alter database db keep 20
|
||||
sql alter database db keep 10
|
||||
sql_error alter database db keep 9
|
||||
sql_error alter database db keep 1
|
||||
sql alter database db keep 0
|
||||
sql alter database db keep -1
|
||||
sql_error alter database db keep 365001
|
||||
|
||||
print ============== step cache
|
||||
sql_error alter database db cache 60
|
||||
sql_error alter database db cache 50
|
||||
sql_error alter database db cache 20
|
||||
sql_error alter database db cache 3
|
||||
sql_error alter database db cache 129
|
||||
sql_error alter database db cache 300
|
||||
sql_error alter database db cache 0
|
||||
sql_error alter database db cache -1
|
||||
|
||||
print ============== step blocks
|
||||
sql show databases
|
||||
print blocks $data9_db
|
||||
if $data9_db != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db blocks 10
|
||||
sql show databases
|
||||
print blocks $data9_db
|
||||
if $data9_db != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db blocks 20
|
||||
sql show databases
|
||||
print blocks $data9_db
|
||||
if $data9_db != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db blocks 30
|
||||
sql show databases
|
||||
print blocks $data9_db
|
||||
if $data9_db != 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db blocks 40
|
||||
sql alter database db blocks 30
|
||||
sql alter database db blocks 20
|
||||
sql alter database db blocks 10
|
||||
sql_error alter database db blocks 2
|
||||
sql_error alter database db blocks 1
|
||||
sql alter database db blocks 0
|
||||
sql_error alter database db blocks -1
|
||||
sql_error alter database db blocks 10001
|
||||
|
||||
print ============== step minrows
|
||||
sql_error alter database db minrows 1
|
||||
sql_error alter database db minrows 100
|
||||
sql_error alter database db minrows 1000
|
||||
|
||||
print ============== step maxrows
|
||||
sql_error alter database db maxrows 1
|
||||
sql_error alter database db maxrows 100
|
||||
sql_error alter database db maxrows 1000
|
||||
|
||||
print ============== step wallevel
|
||||
sql show databases
|
||||
print wallevel $data12_db
|
||||
if $data12_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db wal 1
|
||||
sql show databases
|
||||
print wal $data12_db
|
||||
if $data12_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error alter database db wal 2
|
||||
sql_error alter database db wal 0
|
||||
sql_error alter database db wal 3
|
||||
sql_error alter database db wal 4
|
||||
sql_error alter database db wal -1
|
||||
sql_error alter database db wal 1000
|
||||
|
||||
print ============== step fsync
|
||||
sql_error alter database db fsync 2
|
||||
sql_error alter database db fsync 3
|
||||
sql_error alter database db fsync 4
|
||||
sql_error alter database db fsync -1
|
||||
sql_error alter database db fsync 1000
|
||||
|
||||
print ============== step comp
|
||||
sql show databases
|
||||
print comp $data14_db
|
||||
if $data14_db != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db comp 1
|
||||
sql show databases
|
||||
print comp $data14_db
|
||||
if $data14_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db comp 2
|
||||
sql show databases
|
||||
print comp $data14_db
|
||||
if $data14_db != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter database db comp 0
|
||||
sql show databases
|
||||
print comp $data14_db
|
||||
if $data14_db != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error alter database db comp 3
|
||||
sql_error alter database db comp 4
|
||||
sql_error alter database db comp 5
|
||||
sql_error alter database db comp -1
|
||||
|
||||
|
||||
print ============== step precision
|
||||
sql_error alter database db prec 'us'
|
||||
|
||||
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
print ============== step status
|
||||
sql_error alter database db status 'delete'
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -253,6 +253,7 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/cluster/balance2.sim
|
||||
./test.sh -f unique/cluster/balance3.sim
|
||||
./test.sh -f unique/cluster/cache.sim
|
||||
./test.sh -f unique/cluster/vgroup100.sim
|
||||
|
||||
./test.sh -f unique/column/replica3.sim
|
||||
|
||||
|
|
|
@ -14,10 +14,10 @@ print $data00 $data01 $data02 $data03 $data04 $data05 $data06
|
|||
if $data00 != root then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 3/10 then
|
||||
if $data02 != 3/128 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 0/64 then
|
||||
if $data03 != 0/128 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != 0/2147483647 then
|
||||
|
|
|
@ -53,7 +53,7 @@ system sh/exec.sh -n dnode2 -s start
|
|||
system sh/exec.sh -n dnode3 -s start
|
||||
sql create dnode $hostname2
|
||||
sql create dnode $hostname3
|
||||
sleep 3000
|
||||
sleep 5000
|
||||
|
||||
$sleepTimer = 3000
|
||||
|
||||
|
@ -225,6 +225,7 @@ if $data00 != $totalRows then
|
|||
endi
|
||||
|
||||
print ============== step5: stop dnode2, and remove its vnode
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
sleep $sleepTimer
|
||||
|
||||
|
|
|
@ -193,6 +193,7 @@ if $data00 != $totalRows then
|
|||
endi
|
||||
|
||||
print ============== step5: stop dnode1
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
sleep 3000
|
||||
|
||||
|
|
|
@ -105,6 +105,15 @@ if $dnode4Vnodes != null then
|
|||
goto show1
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step2
|
||||
print ========= start dnode4
|
||||
sql create dnode $hostname4
|
||||
|
@ -132,6 +141,15 @@ if $dnode4Vnodes != 2 then
|
|||
goto show2
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step3
|
||||
print ========= drop dnode2
|
||||
sql drop dnode $hostname2
|
||||
|
@ -167,6 +185,15 @@ if $dnode4Vnodes != 3 then
|
|||
goto show3
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ============================== step4
|
||||
|
@ -195,6 +222,15 @@ if $dnode5Vnodes != 2 then
|
|||
goto show4
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step5
|
||||
print ========= drop dnode3
|
||||
sql drop dnode $hostname3
|
||||
|
@ -232,6 +268,15 @@ endi
|
|||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step6
|
||||
sql create dnode $hostname6
|
||||
system sh/exec.sh -n dnode6 -s start
|
||||
|
@ -258,6 +303,15 @@ if $dnode6Vnodes != 2 then
|
|||
goto show6
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step7
|
||||
print ========= drop dnode4
|
||||
sql drop dnode $hostname4
|
||||
|
@ -294,6 +348,14 @@ if $dnode4Vnodes != null then
|
|||
endi
|
||||
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step8
|
||||
sql create dnode $hostname7
|
||||
|
@ -321,6 +383,15 @@ if $dnode7Vnodes != 2 then
|
|||
goto show8
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step9
|
||||
print ========= drop dnode1
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
@ -335,15 +406,20 @@ sql show mnodes
|
|||
$dnode1Role = $data2_1
|
||||
$dnode4Role = $data2_4
|
||||
$dnode5Role = $data2_5
|
||||
print dnode1 ==> $dnode1Role
|
||||
print dnode4 ==> $dnode4Role
|
||||
print dnode5 ==> $dnode5Role
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
if $dnode1Role != offline then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ============================== step9.1
|
||||
sleep 2000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$x = 0
|
||||
|
@ -353,6 +429,19 @@ show9:
|
|||
if $x == 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show mnodes
|
||||
$dnode1Role = $data2_1
|
||||
$dnode4Role = $data2_4
|
||||
$dnode5Role = $data2_5
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
sql show dnodes -x show9
|
||||
$dnode5Vnodes = $data2_5
|
||||
print dnode5 $dnode5Vnodes
|
||||
|
@ -374,6 +463,15 @@ endi
|
|||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 5000
|
||||
|
||||
sql show mnodes
|
||||
print dnode1 ==> $data2_1
|
||||
print dnode2 ==> $data2_2
|
||||
print dnode3 ==> $data2_3
|
||||
print dnode4 ==> $data2_4
|
||||
print dnode5 ==> $data2_5
|
||||
print dnode6 ==> $data2_6
|
||||
print dnode7 ==> $data2_7
|
||||
|
||||
print ============================== step11
|
||||
print ========= add db4
|
||||
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxTables -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxTables -v 4
|
||||
system sh/cfg.sh -n dnode3 -c maxTables -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
|
||||
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
|
||||
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
|
||||
|
||||
print ============================== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ============================== step2
|
||||
print ========= start dnode2
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
sleep 5000
|
||||
sql show mnodes
|
||||
$dnode1Role = $data2_1
|
||||
$dnode2Role = $data2_2
|
||||
$dnode3Role = $data2_3
|
||||
|
||||
print $dnode1Role
|
||||
print $dnode2Role
|
||||
print $dnode3Role
|
||||
|
||||
print ============================== step3
|
||||
$count = 2
|
||||
while $count < 102
|
||||
$db = d . $count
|
||||
$tb = $db . .t
|
||||
sql create database $db replica 3 cache 1 blocks 3
|
||||
sql create table $tb (ts timestamp, i int)
|
||||
sql insert into $tb values(now, 1)
|
||||
$count = $count + 1
|
||||
print insert into $tb values(now, 1) ==> finished
|
||||
endw
|
||||
|
||||
print ============================== step4
|
||||
|
||||
$count = 2
|
||||
while $count < 102
|
||||
$db = d . $count
|
||||
$tb = $db . .t
|
||||
sql select * from $tb
|
||||
if $rows != 1 then
|
||||
print select * from $tb
|
||||
return -1
|
||||
endi
|
||||
$count = $count + 1
|
||||
print select * from $tb ==> rows: $rows
|
||||
endw
|
||||
|
||||
print ============================== step5
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
sleep 5000
|
||||
|
||||
print ============================== step6
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
|
||||
print ============================== step7
|
||||
|
||||
$x = 0
|
||||
show7:
|
||||
$x = $x + 1
|
||||
sleep 2000
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show mnodes -x show7
|
||||
$dnode1Role = $data2_1
|
||||
$dnode2Role = $data2_2
|
||||
$dnode3Role = $data2_3
|
||||
if $dnode1Role != master then
|
||||
goto show7
|
||||
endi
|
||||
if $dnode2Role != slave then
|
||||
goto show7
|
||||
endi
|
||||
if $dnode2Role != slave then
|
||||
goto show7
|
||||
endi
|
||||
|
||||
print ============================== step8
|
||||
$x = 0
|
||||
show8:
|
||||
$x = $x + 1
|
||||
sleep 2000
|
||||
if $x == 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$count = 2
|
||||
while $count < 102
|
||||
$db = d . $count
|
||||
$tb = $db . .t
|
||||
sql select * from $tb
|
||||
if $rows != 1 then
|
||||
print select * from $tb
|
||||
goto show8
|
||||
endi
|
||||
$count = $count + 1
|
||||
print select * from $tb ==> rows: $rows
|
||||
endw
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
|
@ -21,9 +21,10 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sleep 3000
|
||||
sql connect
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
|
||||
print ======== step1
|
||||
sql create database db replica 3 blocks 3
|
||||
|
|
|
@ -18,13 +18,14 @@ system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
|||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
sleep 5000
|
||||
|
||||
sql show dnodes
|
||||
print dnode1 $data5_1
|
||||
|
|
|
@ -31,8 +31,8 @@ sleep 3000
|
|||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
|
|
|
@ -17,8 +17,8 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
$x = 0
|
||||
createDnode:
|
||||
|
|
|
@ -21,10 +21,10 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sql create dnode $hostname4
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
|
|
|
@ -18,8 +18,8 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
$x = 0
|
||||
createDnode:
|
||||
|
|
|
@ -21,10 +21,10 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sql create dnode $hostname4
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
$x = 0
|
||||
createDnode:
|
||||
|
|
|
@ -25,8 +25,8 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sleep 3000
|
||||
sql connect
|
||||
sql create dnode $hostname2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
|
||||
|
|
|
@ -667,7 +667,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
|||
|
||||
TAOS_RES* pSql = NULL;
|
||||
|
||||
for (int attempt = 0; attempt < 3; ++attempt) {
|
||||
for (int attempt = 0; attempt < 10; ++attempt) {
|
||||
simLogSql(rest, false);
|
||||
pSql = taos_query(script->taos, rest);
|
||||
ret = taos_errno(pSql);
|
||||
|
|
Loading…
Reference in New Issue