diff --git a/alert/app/rule.go b/alert/app/rule.go index d1ab9776fe..44596ca26d 100644 --- a/alert/app/rule.go +++ b/alert/app/rule.go @@ -121,7 +121,21 @@ func (alert *Alert) refresh(rule *Rule, values map[string]interface{}) { alert.Values = values res := rule.Expr.Eval(func(key string) interface{} { // ToLower is required as column name in result is in lower case - return alert.Values[strings.ToLower(key)] + i := alert.Values[strings.ToLower(key)] + switch v := i.(type) { + case int8: + return int64(v) + case int16: + return int64(v) + case int: + return int64(v) + case int32: + return int64(v) + case float32: + return float64(v) + default: + return v + } }) val, ok := res.(bool) diff --git a/alert/cmd/alert/main.go b/alert/cmd/alert/main.go index f934a11d42..f4c30e156a 100644 --- a/alert/cmd/alert/main.go +++ b/alert/cmd/alert/main.go @@ -119,7 +119,7 @@ WantedBy=multi-user.target return nil } -const version = "TDengine alert v2.0.0.0" +const version = "TDengine alert v2.0.0.1" func main() { var ( diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 45a3501a0f..44b3ad4671 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -128,24 +128,84 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 - arbitrator: 系统中裁决器的end point,缺省为空 -- timezone:时区。从系统中动态获取当前的时区设置。 -- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 +- timezone、locale、charset 的配置见客户端配置。 ## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 客户端配置参数列表及解释 - firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 - secondEp: taos启动时,如果first连接不上,尝试连接集群中第二个taosd实例的end point, 缺省值为空。 -- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -日志的配置参数,与server的配置参数完全一样。 +- locale -启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。 + > 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。 + +客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。 + +在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。 + +- charset + + > 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。 + +在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如: +``` + locale zh_CN.UTF-8 +``` +在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置: +``` + charset CP936 +``` +如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。 + +在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。 +``` + locale zh_CN.UTF-8 + charset GBK +``` +则charset的有效值是GBK。 +``` + charset GBK + locale zh_CN.UTF-8 +``` +charset的有效值是UTF-8。 + +日志的配置参数,与server 的配置参数完全一样。 + +- timezone + + 默认值:从系统中动态获取当前的时区设置 + +客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。 + +在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如: +``` + timezone UTC-8 + timezone GMT-8 + timezone Asia/Shanghai +``` +均是合法的设置东八区时区的格式。 + +时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: +``` + SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; +``` +在东八区,SQL语句等效于 +``` + SELECT count(*) FROM table_name WHERE TS<1554955268000; +``` +在UTC时区,SQL语句等效于 +``` + SELECT count(*) FROM table_name WHERE TS<1554984068000; +``` +为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 ## 用户管理 diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md index 4d593cec90..077a043138 100644 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md @@ -95,6 +95,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 - [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等 - [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 - [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理 +- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制):TAOS SQL的边界限制 +- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code):TDengine 2.0 错误码以及对应的十进制码 ## TDengine的技术设计 diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 29f6e5ee57..ae42582687 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -86,13 +86,29 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name COMP 2; ``` - 修改数据库文件压缩标志位,有效数字为0,1,2. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。修改后可以使用show databases命令查看是否修改成功 + COMP参数是指修改数据库文件压缩标志位,取值范围为[0, 2]. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。 ```mysql ALTER DATABASE db_name REPLICA 2; ``` - 修改数据库副本数,有效副本数为1到3。在集群中使用,副本数必须小于dnode的数目。修改后可以使用show databases命令查看是否修改成功 + REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。 + ```mysql + ALTER DATABASE db_name KEEP 365; + ``` + KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。 + + ```mysql + ALTER DATABASE db_name QUORUM 365; + ``` + QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 + + ```mysql + ALTER DATABASE db_name BLOCKS 365; + ``` + BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。 + + **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 - **显示系统所有数据库** ```mysql diff --git a/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md b/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md new file mode 100644 index 0000000000..6ba0decfd3 --- /dev/null +++ b/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md @@ -0,0 +1,173 @@ +# TDengine 2.0 错误码以及对应的十进制码 + + +| Code | bit | error code | 错误描述 | 十进制错误码 | +|-----------------------| :---: | :---------: | :------------------------ | ---------------- | +|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647| +|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646| +|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645| +|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644| +|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643| +|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642| +|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641| +|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640| +|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639| +|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638| +|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637| +|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636| +|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635| +|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634| +|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633| +|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632| +|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631| +|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630| +|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629| +|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392| +|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391| +|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390| +|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389| +|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388| +|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136| +|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135| +|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134| +|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133| +|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132| +|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131| +|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130| +|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129| +|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128| +|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127| +|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126| +|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125| +|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124| +|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123| +|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122| +|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121| +|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120| +|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119| +|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118| +|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117| +|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116| +|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880| +|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879| +|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878| +|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877| +|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876| +|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875| +|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874| +|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873| +|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872| +|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871| +|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870| +|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869| +|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868| +|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867| +|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866| +|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848| +|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847| +|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846| +|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845| +|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844| +|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843| +|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832| +|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831| +|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830| +|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829| +|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828| +|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827| +|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826| +|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825| +|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824| +|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823| +|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822| +|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821| +|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820| +|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816| +|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815| +|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814| +|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800| +|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799| +|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798| +|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797| +|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796| +|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795| +|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784| +|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783| +|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782| +|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781| +|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780| +|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778| +|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777| +|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776| +|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775| +|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774| +|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773| +|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772| +|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771| +|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752| +|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751| +|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750| +|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749| +|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748| +|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747| +|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746| +|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624| +|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623| +|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622| +|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621| +|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368| +|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367| +|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366| +|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365| +|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364| +|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363| +|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362| +|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361| +|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360| +|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359| +|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352| +|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351| +|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350| +|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112| +|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111| +|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110| +|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109| +|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108| +|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107| +|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106| +|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105| +|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104| +|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103| +|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102| +|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101| +|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100| +|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099| +|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098| +|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097| +|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096| +|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095| +|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094| +|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856| +|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855| +|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854| +|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853| +|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852| +|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851| +|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850| +|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849| +|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848| +|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600| +|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599| +|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598| +|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597| +|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596| +|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595| +|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594| +|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593| +|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592| +|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591| +|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590| +|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589| +|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344| +|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343| +|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552| \ No newline at end of file diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index ff5c0d5713..50b388650b 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -93,6 +93,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 - role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode - debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。 - numOfLogLines:单个日志文件允许的最大行数。默认值:10,000,000行。 +- logKeepDays:日志文件的最长保存时间。大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳,单位为秒。默认值:0天。 - maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 @@ -130,23 +131,83 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 - arbitrator: 系统中裁决器的end point,缺省为空 -- timezone:时区。从系统中动态获取当前的时区设置。 -- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 +- timezone、locale、charset 的配置见客户端配置。 ## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 -客户端配置参数列表及解释 +客户端配置参数 - firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 - secondEp: taos启动时,如果first连接不上,尝试连接集群中第二个taosd实例的end point, 缺省值为空。 -- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- maxBinaryDisplayWidth:Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width *nn* 动态修改此选项。 +- locale -日志的配置参数,与server的配置参数完全一样。 + > 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。 + +客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。 + +在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。 + +- charset + + > 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。 + +在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如: +``` + locale zh_CN.UTF-8 +``` +在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置: +``` + charset CP936 +``` +如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。 + +在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。 +``` + locale zh_CN.UTF-8 + charset GBK +``` +则charset的有效值是GBK。 +``` + charset GBK + locale zh_CN.UTF-8 +``` +charset的有效值是UTF-8。 + +日志的配置参数,与server 的配置参数完全一样。 + +- timezone + + 默认值:从系统中动态获取当前的时区设置 + +客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。 + +在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如: +``` + timezone UTC-8 + timezone GMT-8 + timezone Asia/Shanghai +``` +均是合法的设置东八区时区的格式。 + +时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: +``` + SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; +``` +在东八区,SQL语句等效于 +``` + SELECT count(*) FROM table_name WHERE TS<1554955268000; +``` +在UTC时区,SQL语句等效于 +``` + SELECT count(*) FROM table_name WHERE TS<1554984068000; +``` +为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。 diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index 097433a18a..a1ac1d6fd6 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -2,13 +2,15 @@ 多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。 -集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。 +集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。 TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。 ## 准备工作 **第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) +**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/); +**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 **第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; @@ -23,7 +25,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 **第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关: ``` -// firstEp 是每个节点启动后连接的第一个节点 +// firstEp 集群中所有节点的配置都是一致的,对其第一次访问后,就获得了整个集群的信息 firstEp h1.taosdata.com:6030 // 配置本节点的FQDN,如果本机只有一个hostname, 无需配置 @@ -32,7 +34,7 @@ fqdn h1.taosdata.com // 配置本节点的端口号,缺省是6030 serverPort 6030 -// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分 +// 服务端节点数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分 arbitrator ha.taosdata.com:6042 ``` diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 27c1054dc8..7bbf7531c8 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -32,7 +32,7 @@ 3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* -4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得) +4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件 @@ -47,69 +47,14 @@ 检查服务器侧TCP端口连接是否工作:`nc -l {port}` 检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}` -10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。 - - taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。 - - 1)首先在服务器上停止taosd服务; - - 2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000; - - 3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000; - - -n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测; - - -h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。 - - -P :检测的起始端口号,缺省值是6030; - - -e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042; - - -l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同; - - 服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号; - - 对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。 - - 客户端运行的输出样例: - - `sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6` - - `host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000` - - `tcp port:6030 test ok. udp port:6030 test ok.` - - `tcp port:6031 test ok. udp port:6031 test ok.` - - `tcp port:6032 test ok. udp port:6032 test ok.` - - `tcp port:6033 test ok. udp port:6033 test ok.` - - `tcp port:6034 test ok. udp port:6034 test ok.` - - `tcp port:6035 test ok. udp port:6035 test ok.` - - `tcp port:6036 test ok. udp port:6036 test ok.` - - `tcp port:6037 test ok. udp port:6037 test ok.` - - `tcp port:6038 test ok. udp port:6038 test ok.` - - `tcp port:6039 test ok. udp port:6039 test ok.` - - `tcp port:6040 test ok. udp port:6040 test ok.` - - `tcp port:6041 test ok. udp port:6041 test ok.` - - `tcp port:6042 test ok. udp port:6042 test ok.` - - 如果某个端口不通,会输出 `port:xxxx test fail`的信息。 +10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 -## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办? + +## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办? 产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查: -1. 请检查连接的服务器的FQDN是否正确 +1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 2. 如果网络配置有DNS server, 请检查是否正常工作 3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。 4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法链接服务器的 diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index c12726d292..fdb8466706 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -189,6 +189,9 @@ # max number of rows per log filters # numOfLogLines 10000000 +# time of keeping log files, days +# logKeepDays 0 + # enable/disable async log # asyncLog 1 diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e4654efe1c..de00827c3a 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -232,8 +232,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } else if (pInfo->type == TSDB_SQL_DROP_TABLE) { assert(pInfo->pDCLInfo->nTokens == 1); - if (tscSetTableFullName(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + code = tscSetTableFullName(pTableMetaInfo, pzName, pSql); + if(code != TSDB_CODE_SUCCESS) { + return code; } } else if (pInfo->type == TSDB_SQL_DROP_DNODE) { pzName->n = strdequote(pzName->z); @@ -348,8 +349,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_DESCRIBE_TABLE: { SStrToken* pToken = &pInfo->pDCLInfo->a[0]; - const char* msg2 = "table name is too long"; const char* msg1 = "invalid table name"; + const char* msg2 = "table name is too long"; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -710,7 +711,9 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu } int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) { - const char* msg = "name too long"; + const char* msg1 = "name too long"; + const char* msg2 = "invalid db name"; + const char *msg = msg1; SSqlCmd* pCmd = &pSql->cmd; int32_t code = TSDB_CODE_SUCCESS; @@ -728,16 +731,14 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa } else { // get current DB name first, then set it into path SStrToken t = {0}; getCurrentDBName(pSql, &t); - + if (t.n == 0) { + msg = msg2; + } code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL); } - if (code != TSDB_CODE_SUCCESS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); - } - - if (code != TSDB_CODE_SUCCESS) { - free(oldName); + free(oldName); return code; } @@ -1072,7 +1073,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr /* db name is not specified, the tableName dose not include db name */ if (pDB != NULL) { - if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) { + if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1596,13 +1597,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (pItem->pNode->pParam != NULL) { - SStrToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo; - if (pToken->z == NULL || pToken->n == 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } - tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; - if (pParamElem->pNode->nSQLOptr == TK_ALL) { + SStrToken* pToken = &pParamElem->pNode->colInfo; + short sqlOptr = pParamElem->pNode->nSQLOptr; + if ((pToken->z == NULL || pToken->n == 0) + && (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + if (sqlOptr == TK_ALL) { // select table.* // check if the table name is valid or not SStrToken tmpToken = pParamElem->pNode->colInfo; @@ -1614,6 +1616,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + } else if (sqlOptr == TK_INTEGER) { // select count(1) from table1 + char buf[8] = {0}; + int64_t val = -1; + tVariant* pVariant = &pParamElem->pNode->val; + if (pVariant->nType == TSDB_DATA_TYPE_BIGINT) { + tVariantDump(pVariant, buf, TSDB_DATA_TYPE_BIGINT, true); + val = GET_INT64_VAL(buf); + } + if (val == 1) { + index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + } else { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } } else { // count the number of meters created according to the super table if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -2741,27 +2758,31 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, } } + int32_t retVal = TSDB_CODE_SUCCESS; if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) { - tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false); - } else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd - if (colType == TSDB_DATA_TYPE_BINARY) { - pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE); - pColumnFilter->len = pRight->val.nLen; + retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false); - tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); - } else if (colType == TSDB_DATA_TYPE_NCHAR) { - // pRight->val.nLen + 1 is larger than the actual nchar string length - pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE); + // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd + } else if (colType == TSDB_DATA_TYPE_BINARY) { + pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE); + pColumnFilter->len = pRight->val.nLen; + retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); - tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); + } else if (colType == TSDB_DATA_TYPE_NCHAR) { + // pRight->val.nLen + 1 is larger than the actual nchar string length + pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE); + retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); + size_t len = twcslen((wchar_t*)pColumnFilter->pz); + pColumnFilter->len = len * TSDB_NCHAR_SIZE; - size_t len = twcslen((wchar_t*)pColumnFilter->pz); - pColumnFilter->len = len * TSDB_NCHAR_SIZE; - } else { - tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false); - } + } else { + retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false); } + if (retVal != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); + } + switch (pExpr->nSQLOptr) { case TK_LE: pColumnFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL; @@ -4424,7 +4445,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const int32_t DEFAULT_TABLE_INDEX = 0; const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; const char* msg3 = "manipulation of tag available for super table"; const char* msg4 = "set tag value only available for table"; const char* msg5 = "only support add one tag"; @@ -4457,7 +4477,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return TSDB_CODE_TSC_INVALID_SQL; } int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo); @@ -5734,7 +5754,6 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; SSqlCmd* pCmd = &pSql->cmd; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); @@ -5755,7 +5774,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p } if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return TSDB_CODE_TSC_INVALID_SQL; } if (!validateTableColumnInfo(pFieldList, pCmd) || @@ -5810,7 +5829,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { } if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + return TSDB_CODE_TSC_INVALID_SQL; } // get meter meta from mnode @@ -6002,7 +6021,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); const char* msg0 = "invalid table name"; - const char* msg1 = "table name too long"; + //const char* msg1 = "table name too long"; const char* msg2 = "point interpolation query needs timestamp"; const char* msg5 = "fill only available for interval query"; const char* msg6 = "start(end) time of query range required or time range too large"; @@ -6075,7 +6094,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + return TSDB_CODE_TSC_INVALID_SQL; } tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 0733690e3f..eda6db8a2b 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2019,7 +2019,8 @@ int tscProcessUseDbRsp(SSqlObj *pSql) { return 0; } -int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { +int tscProcessDropDbRsp(SSqlObj *pSql) { + pSql->pTscObj->db[0] = 0; taosCacheEmpty(tscCacheHandle); return 0; } @@ -2095,6 +2096,10 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp; + if (pRetrieve == NULL) { + pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; + return pRes->code; + } pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->precision = htons(pRetrieve->precision); diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 6c6e5e35d8..cc4afeb3f8 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -325,8 +325,6 @@ void tdResetKVRowBuilder(SKVRowBuilder *pBuilder); SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { - ASSERT(pBuilder->nCols == 0 || colId > pBuilder->pColIdx[pBuilder->nCols - 1].colId); - if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index fedafe5b02..77e8b76456 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -158,6 +158,7 @@ extern char buildinfo[]; // log extern int32_t tsAsyncLog; extern int32_t tsNumOfLogLines; +extern int32_t tsLogKeepDays; extern int32_t dDebugFlag; extern int32_t vDebugFlag; extern int32_t mDebugFlag; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 9683a63503..96e8fb26c6 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -254,7 +254,7 @@ bool taosCfgDynamicOptions(char *msg) { //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue; - int32_t cfgLen = strlen(cfg->option); + int32_t cfgLen = (int32_t)strlen(cfg->option); if (cfgLen != olen) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue; *((int32_t *)cfg->ptr) = vint; @@ -1013,12 +1013,22 @@ static void doInitGlobalConfig(void) { cfg.ptr = &tsNumOfLogLines; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT; - cfg.minValue = 10000; + cfg.minValue = 1000; cfg.maxValue = 2000000000; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "logKeepDays"; + cfg.ptr = &tsLogKeepDays; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 365000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "asyncLog"; cfg.ptr = &tsAsyncLog; cfg.valType = TAOS_CFG_VTYPE_INT16; diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index b73bb010e0..766a58f9ba 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -93,14 +93,13 @@ 3.6.1 UTF-8 - 11 - 11 + 8 + 8 true true - org.apache.maven.plugins maven-source-plugin diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 86d179eae4..d8df2fc0d3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -57,9 +57,9 @@ public class TSDBConnection implements Connection { File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR)); File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0]; List endpoints = loadConfigEndpoints(cfgFile); - if (!endpoints.isEmpty()){ - info.setProperty(TSDBDriver.PROPERTY_KEY_HOST,endpoints.get(0).split(":")[0]); - info.setProperty(TSDBDriver.PROPERTY_KEY_PORT,endpoints.get(0).split(":")[1]); + if (!endpoints.isEmpty()) { + info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]); + info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]); } //load taos.cfg end @@ -69,15 +69,15 @@ public class TSDBConnection implements Connection { info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); } - private List loadConfigEndpoints(File cfgFile){ + private List loadConfigEndpoints(File cfgFile) { List endpoints = new ArrayList<>(); - try(BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) { + try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) { String line = null; - while ((line = reader.readLine())!=null){ - if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")){ - endpoints.add(line.substring(line.indexOf('p')+1).trim()); + while ((line = reader.readLine()) != null) { + if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) { + endpoints.add(line.substring(line.indexOf('p') + 1).trim()); } - if (endpoints.size()>1) + if (endpoints.size() > 1) break; } } catch (FileNotFoundException e) { @@ -91,7 +91,7 @@ public class TSDBConnection implements Connection { /** * @param cfgDirPath * @return return the config dir - * **/ + **/ private File loadConfigDir(String cfgDirPath) { if (cfgDirPath == null) return loadDefaultConfigDir(); @@ -103,8 +103,8 @@ public class TSDBConnection implements Connection { /** * @return search the default config dir, if the config dir is not exist will return null - * */ - private File loadDefaultConfigDir(){ + */ + private File loadDefaultConfigDir() { File cfgDir; File cfgDir_linux = new File("/etc/taos"); cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null; @@ -132,7 +132,9 @@ public class TSDBConnection implements Connection { public Statement createStatement() throws SQLException { if (!this.connector.isClosed()) { - return new TSDBStatement(this.connector); + TSDBStatement statement = new TSDBStatement(this, this.connector); + statement.setConnection(this); + return statement; } else { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } @@ -153,7 +155,7 @@ public class TSDBConnection implements Connection { public PreparedStatement prepareStatement(String sql) throws SQLException { if (!this.connector.isClosed()) { - return new TSDBPreparedStatement(this.connector, sql); + return new TSDBPreparedStatement(this, this.connector, sql); } else { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 6097ad9c43..230943fd53 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -42,8 +42,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private SavedPreparedStatement savedPreparedStatement; - TSDBPreparedStatement(TSDBJNIConnector connecter, String sql) { - super(connecter); + TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) { + super(connection, connecter); init(sql); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 06ae449596..a8d6ceb713 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -19,153 +19,164 @@ import java.util.ArrayList; import java.util.List; public class TSDBStatement implements Statement { - private TSDBJNIConnector connecter = null; + private TSDBJNIConnector connecter = null; - /** To store batched commands */ - protected List batchedArgs; + /** + * To store batched commands + */ + protected List batchedArgs; - /** Timeout for a query */ - protected int queryTimeout = 0; + /** + * Timeout for a query + */ + protected int queryTimeout = 0; - private Long pSql = 0l; + private Long pSql = 0l; /** * Status of current statement */ - private boolean isClosed = true; - private int affectedRows = 0; + private boolean isClosed = true; + private int affectedRows = 0; - TSDBStatement(TSDBJNIConnector connecter) { - this.connecter = connecter; - this.isClosed = false; - } + private TSDBConnection connection; - public T unwrap(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void setConnection(TSDBConnection connection) { + this.connection = connection; + } - public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) { + this.connection = connection; + this.connecter = connecter; + this.isClosed = false; + } - public ResultSet executeQuery(String sql) throws SQLException { + public T unwrap(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isWrapperFor(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet executeQuery(String sql) throws SQLException { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } // TODO make sure it is not a update query - pSql = this.connecter.executeQuery(sql); + pSql = this.connecter.executeQuery(sql); - long resultSetPointer = this.connecter.getResultSet(); + long resultSetPointer = this.connecter.getResultSet(); - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connecter.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } + if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + this.connecter.freeResultSet(pSql); + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } // create/insert/update/delete/alter - if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - this.connecter.freeResultSet(pSql); - return null; - } + if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { + this.connecter.freeResultSet(pSql); + return null; + } - if (!this.connecter.isUpdateQuery(pSql)) { - return new TSDBResultSet(this.connecter, resultSetPointer); - } else { - this.connecter.freeResultSet(pSql); - return null; - } + if (!this.connecter.isUpdateQuery(pSql)) { + return new TSDBResultSet(this.connecter, resultSetPointer); + } else { + this.connecter.freeResultSet(pSql); + return null; + } - } + } - public int executeUpdate(String sql) throws SQLException { + public int executeUpdate(String sql) throws SQLException { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } // TODO check if current query is update query - pSql = this.connecter.executeQuery(sql); - long resultSetPointer = this.connecter.getResultSet(); + pSql = this.connecter.executeQuery(sql); + long resultSetPointer = this.connecter.getResultSet(); - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connecter.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } + if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + this.connecter.freeResultSet(pSql); + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } - this.affectedRows = this.connecter.getAffectedRows(pSql); - this.connecter.freeResultSet(pSql); + this.affectedRows = this.connecter.getAffectedRows(pSql); + this.connecter.freeResultSet(pSql); - return this.affectedRows; - } + return this.affectedRows; + } - public String getErrorMsg(long pSql) { - return this.connecter.getErrMsg(pSql); - } + public String getErrorMsg(long pSql) { + return this.connecter.getErrMsg(pSql); + } - public void close() throws SQLException { + public void close() throws SQLException { if (!isClosed) { if (!this.connecter.isResultsetClosed()) { this.connecter.freeResultSet(); } isClosed = true; } - } + } - public int getMaxFieldSize() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int getMaxFieldSize() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public void setMaxFieldSize(int max) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void setMaxFieldSize(int max) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int getMaxRows() throws SQLException { - // always set maxRows to zero, meaning unlimitted rows in a resultSet - return 0; - } + public int getMaxRows() throws SQLException { + // always set maxRows to zero, meaning unlimitted rows in a resultSet + return 0; + } - public void setMaxRows(int max) throws SQLException { - // always set maxRows to zero, meaning unlimitted rows in a resultSet - } + public void setMaxRows(int max) throws SQLException { + // always set maxRows to zero, meaning unlimitted rows in a resultSet + } - public void setEscapeProcessing(boolean enable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void setEscapeProcessing(boolean enable) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int getQueryTimeout() throws SQLException { - return queryTimeout; - } + public int getQueryTimeout() throws SQLException { + return queryTimeout; + } - public void setQueryTimeout(int seconds) throws SQLException { - this.queryTimeout = seconds; - } + public void setQueryTimeout(int seconds) throws SQLException { + this.queryTimeout = seconds; + } - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void cancel() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public SQLWarning getWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void clearWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public void setCursorName(String name) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void setCursorName(String name) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean execute(String sql) throws SQLException { + public boolean execute(String sql) throws SQLException { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } - boolean res = true; - pSql = this.connecter.executeQuery(sql); - long resultSetPointer = this.connecter.getResultSet(); + boolean res = true; + pSql = this.connecter.executeQuery(sql); + long resultSetPointer = this.connecter.getResultSet(); if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connecter.freeResultSet(pSql); + this.connecter.freeResultSet(pSql); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { // no result set is retrieved @@ -173,145 +184,147 @@ public class TSDBStatement implements Statement { res = false; } - return res; - } + return res; + } - public ResultSet getResultSet() throws SQLException { + public ResultSet getResultSet() throws SQLException { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } - long resultSetPointer = connecter.getResultSet(); - TSDBResultSet resSet = null; + long resultSetPointer = connecter.getResultSet(); + TSDBResultSet resSet = null; if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { resSet = new TSDBResultSet(connecter, resultSetPointer); } - return resSet; - } + return resSet; + } - public int getUpdateCount() throws SQLException { + public int getUpdateCount() throws SQLException { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } - return this.affectedRows; - } + return this.affectedRows; + } - public boolean getMoreResults() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean getMoreResults() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void setFetchDirection(int direction) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int getFetchDirection() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int getFetchDirection() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - /* - * used by spark - */ - public void setFetchSize(int rows) throws SQLException { - } + /* + * used by spark + */ + public void setFetchSize(int rows) throws SQLException { + } - /* - * used by spark - */ - public int getFetchSize() throws SQLException { - return 4096; - } + /* + * used by spark + */ + public int getFetchSize() throws SQLException { + return 4096; + } - public int getResultSetConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int getResultSetConcurrency() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int getResultSetType() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int getResultSetType() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public void addBatch(String sql) throws SQLException { - if (batchedArgs == null) { - batchedArgs = new ArrayList(); - } - batchedArgs.add(sql); - } + public void addBatch(String sql) throws SQLException { + if (batchedArgs == null) { + batchedArgs = new ArrayList(); + } + batchedArgs.add(sql); + } - public void clearBatch() throws SQLException { - batchedArgs.clear(); - } + public void clearBatch() throws SQLException { + batchedArgs.clear(); + } - public int[] executeBatch() throws SQLException { + public int[] executeBatch() throws SQLException { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } - if (batchedArgs == null) { - throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); - } else { - int[] res = new int[batchedArgs.size()]; - for (int i = 0; i < batchedArgs.size(); i++) { - res[i] = executeUpdate(batchedArgs.get(i)); - } - return res; - } - } + if (batchedArgs == null) { + throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); + } else { + int[] res = new int[batchedArgs.size()]; + for (int i = 0; i < batchedArgs.size(); i++) { + res[i] = executeUpdate(batchedArgs.get(i)); + } + return res; + } + } - public Connection getConnection() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public Connection getConnection() throws SQLException { + if (this.connecter != null) + return this.connection; + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean getMoreResults(int current) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean getMoreResults(int current) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public ResultSet getGeneratedKeys() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public int getResultSetHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public int getResultSetHoldability() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean isClosed() throws SQLException { - return isClosed; - } + public boolean isClosed() throws SQLException { + return isClosed; + } - public void setPoolable(boolean poolable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void setPoolable(boolean poolable) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean isPoolable() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean isPoolable() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public void closeOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public void closeOnCompletion() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } - public boolean isCloseOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + public boolean isCloseOnCompletion() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java index 7e144cbe0f..066dfad5d5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java @@ -23,6 +23,7 @@ import java.sql.SQLException; public class SqlSyntaxValidator { private TSDBConnection tsdbConnection; + public SqlSyntaxValidator(Connection connection) { this.tsdbConnection = (TSDBConnection) connection; } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index 6c3437186f..b793a47c99 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -8,8 +8,7 @@ import org.junit.BeforeClass; public class BaseTest { private static boolean testCluster = false; - private static TDNodes nodes = new TDNodes(); - + private static TDNodes nodes = new TDNodes(); @BeforeClass public static void setupEnv() { @@ -19,11 +18,9 @@ public class BaseTest { nodes.getTDNode(1).setRunning(1); nodes.stop(1); } - nodes.setTestCluster(testCluster); nodes.deploy(1); - nodes.start(1); - + nodes.start(1); } catch (Exception e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java index c49293c96b..7d96cbb538 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java @@ -7,13 +7,11 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertEquals; -import java.util.Properties; -import java.util.concurrent.Executors; -import java.util.concurrent.*; - -import static org.junit.Assert.assertTrue; public class BatchInsertTest extends BaseTest { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java new file mode 100644 index 0000000000..611e21e887 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java @@ -0,0 +1,81 @@ +package com.taosdata.jdbc; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.util.Random; + +import static org.junit.Assert.assertEquals; +import java.util.Properties; +import java.util.concurrent.Executors; +import java.util.concurrent.*; + +import static org.junit.Assert.assertTrue; + +public class QueryDataTest extends BaseTest { + + static Connection connection = null; + static Statement statement = null; + static String dbName = "test"; + static String stbName = "meters"; + static String host = "localhost"; + static int numOfTables = 30; + final static int numOfRecordsPerTable = 1000; + static long ts = 1496732686000l; + final static String tablePrefix = "t"; + + @Before + public void createDatabase() throws SQLException { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + } catch (ClassNotFoundException e) { + return; + } + + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + + statement = connection.createStatement(); + statement.executeUpdate("drop database if exists " + dbName); + statement.executeUpdate("create database if not exists " + dbName); + statement.executeUpdate("use " + dbName); + + String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))"; + statement.executeUpdate(createTableSql); + } + + @Test + public void testQueryBinaryData() throws SQLException{ + + String insertSql = "insert into " + stbName + " values(now, 'taosda')"; + System.out.println(insertSql); + + statement.executeUpdate(insertSql); + + String querySql = "select * from " + stbName; + ResultSet rs = statement.executeQuery(querySql); + + while(rs.next()) { + String name = rs.getString(2) + "001"; + System.out.println("name = " + name); + assertEquals(name, "taosda001"); + } + rs.close(); + } + + + @After + public void close() throws Exception { + statement.close(); + connection.close(); + Thread.sleep(10); + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java index 1844a92b47..6c75860e0f 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java @@ -34,7 +34,6 @@ public class SelectTest extends BaseTest { statement.executeUpdate("drop database if exists " + dbName); statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); - } @Test @@ -66,6 +65,5 @@ public class SelectTest extends BaseTest { statement.close(); connection.close(); Thread.sleep(10); - } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java new file mode 100644 index 0000000000..9608c4985d --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java @@ -0,0 +1,108 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.lib.TSDBCommon; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; + +public class BatchInsertTest { + + static String host = "localhost"; + static String dbName = "test"; + static String stbName = "meters"; + static int numOfTables = 30; + final static int numOfRecordsPerTable = 1000; + static long ts = 1496732686000l; + final static String tablePrefix = "t"; + + private Connection connection; + + @Before + public void before() { + try { + connection = TSDBCommon.getConn(host); + TSDBCommon.createDatabase(connection, dbName); + TSDBCommon.createStable(connection, stbName); + TSDBCommon.createTables(connection, numOfTables, stbName, tablePrefix); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testBatchInsert(){ + ExecutorService executorService = Executors.newFixedThreadPool(numOfTables); + for (int i = 0; i < numOfTables; i++) { + final int index = i; + executorService.execute(new Runnable() { + @Override + public void run() { + try { + long startTime = System.currentTimeMillis(); + Statement statement = connection.createStatement(); // get statement + StringBuilder sb = new StringBuilder(); + sb.append("INSERT INTO " + tablePrefix + index + " VALUES"); + Random rand = new Random(); + for (int j = 1; j <= numOfRecordsPerTable; j++) { + sb.append("(" + (ts + j) + ", "); + sb.append(rand.nextInt(100) + ", "); + sb.append(rand.nextInt(100) + ", "); + sb.append(rand.nextInt(100) + ")"); + } + statement.addBatch(sb.toString()); + statement.executeBatch(); + long endTime = System.currentTimeMillis(); + System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds"); + connection.commit(); + statement.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + }); + } + + executorService.shutdown(); + try { + executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + try{ + Statement statement = connection.createStatement(); + ResultSet rs = statement.executeQuery("select * from meters"); + int num = 0; + while (rs.next()) { + num++; + } + assertEquals(num, numOfTables * numOfRecordsPerTable); + rs.close(); + }catch (Exception e){ + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (connection != null) + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/lib/TSDBCommon.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/lib/TSDBCommon.java new file mode 100644 index 0000000000..0e2613d617 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/lib/TSDBCommon.java @@ -0,0 +1,47 @@ +package com.taosdata.jdbc.lib; + +import com.taosdata.jdbc.TSDBDriver; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class TSDBCommon { + + public static Connection getConn(String host) throws SQLException, ClassNotFoundException { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + return DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + } + + public static void createDatabase(Connection connection, String dbName) throws SQLException { + Statement statement = connection.createStatement(); + statement.executeUpdate("drop database if exists " + dbName); + statement.executeUpdate("create database if not exists " + dbName); + statement.executeUpdate("use " + dbName); + statement.close(); + } + + public static void createStable(Connection connection, String stbName) throws SQLException { + Statement statement = connection.createStatement(); + String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))"; + statement.executeUpdate(createTableSql); + statement.close(); + } + + public static void createTables(Connection connection, int numOfTables, String stbName,String tablePrefix) throws SQLException { + Statement statement = connection.createStatement(); + for(int i = 0; i < numOfTables; i++) { + String loc = i % 2 == 0 ? "beijing" : "shanghai"; + String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')"; + statement.executeUpdate(createSubTalbesSql); + } + statement.close(); + } +} diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index e1d298089c..0a5b9b550c 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -16,11 +16,13 @@ #define _DEFAULT_SOURCE #include "os.h" #include "taosdef.h" +#include "taosmsg.h" #include "tglobal.h" #include "mnode.h" #include "http.h" #include "tmqtt.h" #include "monitor.h" +#include "dnode.h" #include "dnodeInt.h" #include "dnodeModule.h" @@ -129,17 +131,34 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) { for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) { bool enableModule = moduleStatus & (1 << module); if (!tsModule[module].enable && enableModule) { - dInfo("module status:%u is received, start %s module", tsModuleStatus, tsModule[module].name); + dInfo("module status:%u is set, start %s module", moduleStatus, tsModule[module].name); tsModule[module].enable = true; dnodeSetModuleStatus(module); (*tsModule[module].startFp)(); } if (tsModule[module].enable && !enableModule) { - dInfo("module status:%u is received, stop %s module", tsModuleStatus, tsModule[module].name); + dInfo("module status:%u is set, stop %s module", moduleStatus, tsModule[module].name); tsModule[module].enable = false; dnodeUnSetModuleStatus(module); (*tsModule[module].stopFp)(); } } } + +bool dnodeCheckMnodeStarting() { + if (tsModuleStatus & TSDB_MOD_MNODE) return false; + + SDMMnodeInfos *mnodes = dnodeGetMnodeInfos(); + for (int32_t i = 0; i < mnodes->nodeNum; ++i) { + SDMMnodeInfo *node = &mnodes->nodeInfos[i]; + if (node->nodeId == dnodeGetDnodeId()) { + uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE);; + dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus); + dnodeProcessModuleStatus(moduleStatus); + return true; + } + } + + return false; +} diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 5daf616706..4a1d337824 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -154,15 +154,15 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char rpcMsg.contLen = sizeof(SDMAuthMsg); rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH; - dDebug("user:%s, send auth msg to mnode", user); + dDebug("user:%s, send auth msg to mnodes", user); SRpcMsg rpcRsp = {0}; dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp); if (rpcRsp.code != 0) { - dError("user:%s, auth msg received from mnode, error:%s", user, tstrerror(rpcRsp.code)); + dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code)); } else { SDMAuthRsp *pRsp = rpcRsp.pCont; - dDebug("user:%s, auth msg received from mnode", user); + dDebug("user:%s, auth msg received from mnodes", user); memcpy(secret, pRsp->secret, TSDB_KEY_LEN); memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN); *spi = pRsp->spi; diff --git a/src/inc/dnode.h b/src/inc/dnode.h index 093ce93205..fda9c1c1dd 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -43,6 +43,7 @@ void dnodeGetMnodeEpSetForPeer(void *epSet); void dnodeGetMnodeEpSetForShell(void *epSet); void * dnodeGetMnodeInfos(); int32_t dnodeGetDnodeId(); +bool dnodeCheckMnodeStarting(); void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)); void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); diff --git a/src/inc/tsync.h b/src/inc/tsync.h index ff9c9901bd..ca0f70d104 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -103,6 +103,9 @@ typedef struct { typedef void* tsync_h; +int32_t syncInit(); +void syncCleanUp(); + tsync_h syncStart(const SSyncInfo *); void syncStop(tsync_h shandle); int32_t syncReconfig(tsync_h shandle, const SSyncCfg *); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index c8df17d6ae..277dc45f8e 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -786,7 +786,7 @@ void read_history() { } void write_history() { - char f_history[128]; + char f_history[TSDB_FILENAME_LEN]; get_history_path(f_history); FILE *f = fopen(f_history, "w"); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 69bab44985..6f5ea33d79 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -409,7 +409,7 @@ void set_terminal_mode() { } } -void get_history_path(char *history) { sprintf(history, "%s/%s", getenv("HOME"), HISTORY_FILE); } +void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); } void clearScreen(int ecmd_pos, int cursor_pos) { struct winsize w; diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c index b0a12979cd..e161940a2b 100644 --- a/src/mnode/src/mnodeAcct.c +++ b/src/mnode/src/mnodeAcct.c @@ -211,8 +211,8 @@ static int32_t mnodeCreateRootAcct() { strcpy(pAcct->user, TSDB_DEFAULT_USER); taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass); pAcct->cfg = (SAcctCfg){ - .maxUsers = 10, - .maxDbs = 64, + .maxUsers = 128, + .maxDbs = 128, .maxTimeSeries = INT32_MAX, .maxConnections = 1024, .maxStreams = 1000, diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 8187cfd265..54c049d242 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -242,6 +242,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { return TSDB_CODE_MND_INVALID_DB_OPTION; } +#if 0 if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) { mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep); return TSDB_CODE_MND_INVALID_DB_OPTION; @@ -251,6 +252,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2); return TSDB_CODE_MND_INVALID_DB_OPTION; } +#endif if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) { mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock, diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index aaad2462ea..ac8730b0cc 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -72,6 +72,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) { pDnode->lastAccess = tsAccessSquence; } + mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index c762403525..bff84d8041 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -68,6 +68,7 @@ static int32_t mnodeMnodeActionInsert(SSdbOper *pOper) { pDnode->isMgmt = true; mnodeDecDnodeRef(pDnode); + mInfo("mnode:%d, fqdn:%s ep:%s port:%d, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index e3ed7daf8c..646c17b2b8 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -22,6 +22,7 @@ #include "tqueue.h" #include "twal.h" #include "tsync.h" +#include "ttimer.h" #include "tglobal.h" #include "dnode.h" #include "mnode.h" @@ -64,6 +65,7 @@ typedef struct _SSdbTable { int32_t (*encodeFp)(SSdbOper *pOper); int32_t (*destroyFp)(SSdbOper *pOper); int32_t (*restoredFp)(); + pthread_mutex_t mutex; } SSdbTable; typedef struct { @@ -88,6 +90,8 @@ typedef struct { SSdbWriteWorker *writeWorker; } SSdbWriteWorkerPool; +extern void * tsMnodeTmr; +static void * tsUpdateSyncTmr; static SSdbObject tsSdbObj = {0}; static taos_qset tsSdbWriteQset; static taos_qall tsSdbWriteQall; @@ -290,11 +294,17 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) { taosFreeQitem(pOper); } +static void sdbUpdateSyncTmrFp(void *param, void *tmrId) { sdbUpdateSync(); } + void sdbUpdateSync() { if (!mnodeIsRunning()) { mDebug("mnode not start yet, update sync info later"); + if (dnodeCheckMnodeStarting()) { + taosTmrReset(sdbUpdateSyncTmrFp, 1000, NULL, tsMnodeTmr, &tsUpdateSyncTmr); + } return; } + mDebug("update sync info in sdb"); SSyncCfg syncCfg = {0}; int32_t index = 0; @@ -387,8 +397,6 @@ int32_t sdbInit() { tsSdbObj.role = TAOS_SYNC_ROLE_MASTER; } - sdbUpdateSync(); - tsSdbObj.status = SDB_STATUS_SERVING; return TSDB_CODE_SUCCESS; } @@ -448,8 +456,9 @@ static void *sdbGetRowMeta(SSdbTable *pTable, void *key) { } void **ppRow = (void **)taosHashGet(pTable->iHandle, key, keySize); - if (ppRow == NULL) return NULL; - return *ppRow; + if (ppRow != NULL) return *ppRow; + + return NULL; } static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) { @@ -457,13 +466,14 @@ static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) { } void *sdbGetRow(void *handle, void *key) { + SSdbTable *pTable = handle; + + pthread_mutex_lock(&pTable->mutex); void *pRow = sdbGetRowMeta(handle, key); - if (pRow) { - sdbIncRef(handle, pRow); - return pRow; - } else { - return NULL; - } + if (pRow) sdbIncRef(handle, pRow); + pthread_mutex_unlock(&pTable->mutex); + + return pRow; } static void *sdbGetRowFromObj(SSdbTable *pTable, void *key) { @@ -478,7 +488,9 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { keySize = strlen((char *)key); } + pthread_mutex_lock(&pTable->mutex); taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t)); + pthread_mutex_unlock(&pTable->mutex); sdbIncRef(pTable, pOper->pObj); atomic_add_fetch_32(&pTable->numOfRows, 1); @@ -519,7 +531,10 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { keySize = strlen((char *)key); } + pthread_mutex_lock(&pTable->mutex); taosHashRemove(pTable->iHandle, key, keySize); + pthread_mutex_unlock(&pTable->mutex); + atomic_sub_fetch_32(&pTable->numOfRows, 1); sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName, @@ -612,8 +627,8 @@ static int sdbWrite(void *param, void *data, int type) { } else if (action == SDB_ACTION_DELETE) { void *pRow = sdbGetRowMeta(pTable, pHead->cont); if (pRow == NULL) { - sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName, - pHead->cont); + sdbDebug("table:%s, object:%s not exist in hash, ignore delete action", pTable->tableName, + sdbGetKeyStr(pTable, pHead->cont)); return TSDB_CODE_SUCCESS; } SSdbOper oper = {.table = pTable, .pObj = pRow}; @@ -621,8 +636,8 @@ static int sdbWrite(void *param, void *data, int type) { } else if (action == SDB_ACTION_UPDATE) { void *pRow = sdbGetRowMeta(pTable, pHead->cont); if (pRow == NULL) { - sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName, - pHead->cont); + sdbDebug("table:%s, object:%s not exist in hash, ignore update action", pTable->tableName, + sdbGetKeyStr(pTable, pHead->cont)); return TSDB_CODE_SUCCESS; } SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; @@ -861,6 +876,7 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) { if (pTable == NULL) return NULL; + pthread_mutex_init(&pTable->mutex, NULL); tstrncpy(pTable->tableName, pDesc->tableName, SDB_TABLE_LEN); pTable->keyType = pDesc->keyType; pTable->tableId = pDesc->tableId; @@ -908,6 +924,7 @@ void sdbCloseTable(void *handle) { taosHashDestroyIter(pIter); taosHashCleanup(pTable->iHandle); + pthread_mutex_destroy(&pTable->mutex); sdbDebug("table:%s, is closed, numOfTables:%d", pTable->tableName, tsSdbObj.numOfTables); free(pTable); diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h index e7dc04fd15..17683743e3 100644 --- a/src/os/inc/osDir.h +++ b/src/os/inc/osDir.h @@ -24,6 +24,7 @@ extern "C" { void taosRemoveDir(char *rootDir); int taosMkDir(const char *pathname, mode_t mode); void taosRename(char* oldName, char *newName); +void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays); #ifdef __cplusplus } diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index 224e41593d..d4f3d6d2af 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -39,6 +39,7 @@ #include #include #include +#include #include "msvcProcess.h" #include "msvcDirect.h" #include "msvcFcntl.h" @@ -58,8 +59,6 @@ extern "C" { int32_t BUILDIN_CTZL(uint64_t val); int32_t BUILDIN_CTZ(uint32_t val); -#define TAOS_OS_FUNC_DIR - #define TAOS_OS_FUNC_FILE #define TAOS_OS_FUNC_FILE_ISREG #define TAOS_OS_FUNC_FILE_ISDIR diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c index 7a537cdfea..93651c78ef 100644 --- a/src/os/src/detail/osDir.c +++ b/src/os/src/detail/osDir.c @@ -18,8 +18,6 @@ #include "tglobal.h" #include "tulog.h" -#ifndef TAOS_OS_FUNC_DIR - void taosRemoveDir(char *rootDir) { DIR *dir = opendir(rootDir); if (dir == NULL) return; @@ -51,18 +49,54 @@ int taosMkDir(const char *path, mode_t mode) { } void taosRename(char* oldName, char *newName) { - if (0 == tsEnableVnodeBak) { - uInfo("vnode backup not enabled"); - return; - } - // if newName in not empty, rename return fail. // the newName must be empty or does not exist if (rename(oldName, newName)) { - uError("%s is modify to %s fail, reason:%s", oldName, newName, strerror(errno)); + uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); } else { - uInfo("%s is modify to %s success!", oldName, newName); + uInfo("successfully to rename file %s to %s", oldName, newName); } } -#endif +void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) { + DIR *dir = opendir(rootDir); + if (dir == NULL) return; + + int64_t sec = taosGetTimestampSec(); + struct dirent *de = NULL; + + while ((de = readdir(dir)) != NULL) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue; + + char filename[1024]; + snprintf(filename, 1023, "%s/%s", rootDir, de->d_name); + if (de->d_type & DT_DIR) { + continue; + } else { + // struct stat fState; + // if (stat(fname, &fState) < 0) { + // continue; + // } + int32_t len = (int32_t)strlen(filename); + int64_t fileSec = 0; + for (int i = len - 1; i >= 0; i--) { + if (filename[i] == '.') { + fileSec = atoll(filename + i + 1); + break; + } + } + + if (fileSec <= 100) continue; + int32_t days = (int32_t)(ABS(sec - fileSec) / 86400 + 1); + if (days > keepDays) { + (void)remove(filename); + uInfo("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays); + } else { + uTrace("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays); + } + } + } + + closedir(dir); + rmdir(rootDir); +} diff --git a/src/os/src/windows/wDir.c b/src/os/src/windows/wDir.c deleted file mode 100644 index c486cd0d40..0000000000 --- a/src/os/src/windows/wDir.c +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE -#include "os.h" -#include "tulog.h" - -void taosRemoveDir(char *rootDir) { - uError("%s not implemented yet", __FUNCTION__); -} - -int taosMkDir(const char *path, mode_t mode) { - uError("%s not implemented yet", __FUNCTION__); - return 0; -} - -void taosMvDir(char* destDir, char *srcDir) { - uError("%s not implemented yet", __FUNCTION__); -} diff --git a/src/os/src/windows/wString.c b/src/os/src/windows/wString.c index 0d9a28e288..1fb235a005 100644 --- a/src/os/src/windows/wString.c +++ b/src/os/src/windows/wString.c @@ -58,11 +58,20 @@ char *strsep(char **stringp, const char *delim) { char *getpass(const char *prefix) { static char passwd[TSDB_KEY_LEN] = {0}; - + memset(passwd, 0, TSDB_KEY_LEN); printf("%s", prefix); - scanf("%s", passwd); - char n = getchar(); + int32_t index = 0; + char ch; + while (index < TSDB_KEY_LEN) { + ch = getch(); + if (ch == '\n' || ch == '\r') { + break; + } else { + passwd[index++] = ch; + } + } + return passwd; } @@ -131,11 +140,11 @@ int tasoUcs4Compare(void *f1_ucs4, void *f2_ucs4, int bytes) { } -/* Copy memory to memory until the specified number of bytes -has been copied, return pointer to following byte. -Overlap is NOT handled correctly. */ -void *mempcpy(void *dest, const void *src, size_t len) { - return (char*)memcpy(dest, src, len) + len; +/* Copy memory to memory until the specified number of bytes +has been copied, return pointer to following byte. +Overlap is NOT handled correctly. */ +void *mempcpy(void *dest, const void *src, size_t len) { + return (char*)memcpy(dest, src, len) + len; } /* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */ diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 9c94bdc1fa..048f839b72 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -115,6 +115,10 @@ static void *monitorThreadFunc(void *param) { monitorDebug("starting to initialize monitor module ..."); while (1) { + static int32_t accessTimes = 0; + accessTimes++; + taosMsleep(1000); + if (tsMonitor.quiting) { tsMonitor.state = MON_STATE_NOT_INIT; monitorInfo("monitor thread will quit, for taosd is quiting"); @@ -126,11 +130,7 @@ static void *monitorThreadFunc(void *param) { if (tsMonitor.start == 0) { continue; } - - static int32_t accessTimes = 0; - accessTimes++; - taosMsleep(1000); - + if (dnodeGetDnodeId() <= 0) { monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module"); continue; diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 3aecd127ef..f59bf62ec5 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -709,21 +709,21 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) { } if (terrno != 0) { - taosFreeId(pRpc->idPool, sid); // sid shall be released + taosFreeId(pRpc->idPool, sid); // sid shall be released pConn = NULL; } } - } + } if (pConn) { if (pRecv->connType == RPC_CONN_UDPS && pRpc->numOfThreads > 1) { // UDP server, assign to new connection - pRpc->index = (pRpc->index+1) % pRpc->numOfThreads; + pRpc->index = (pRpc->index + 1) % pRpc->numOfThreads; pConn->localPort = (pRpc->localPort + pRpc->index); } - + taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES); - tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid); + tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid, hashstr); } return pConn; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index b92ef4cf26..f96b902efd 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -30,23 +30,19 @@ #include "syncInt.h" // global configurable -int tsMaxSyncNum = 2; -int tsSyncTcpThreads = 2; -int tsMaxWatchFiles = 500; -int tsMaxFwdInfo = 200; -int tsSyncTimer = 1; -//int sDebugFlag = 135; -//char tsArbitrator[TSDB_FQDN_LEN] = {0}; +int tsMaxSyncNum = 2; +int tsSyncTcpThreads = 2; +int tsMaxWatchFiles = 500; +int tsMaxFwdInfo = 200; +int tsSyncTimer = 1; // module global, not configurable -int tsSyncNum; // number of sync in process in whole system -char tsNodeFqdn[TSDB_FQDN_LEN]; +int tsSyncNum; // number of sync in process in whole system +char tsNodeFqdn[TSDB_FQDN_LEN]; -static int tsNodeNum; // number of nodes in system -static ttpool_h tsTcpPool; -static void *syncTmrCtrl = NULL; -static void *vgIdHash; -static pthread_once_t syncModuleInit = PTHREAD_ONCE_INIT; +static ttpool_h tsTcpPool; +static void * syncTmrCtrl = NULL; +static void * vgIdHash; // local functions static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer); @@ -75,7 +71,7 @@ char* syncRole[] = { "master" }; -static void syncModuleInitFunc() { +int32_t syncInit() { SPoolInfo info; info.numOfThreads = tsSyncTcpThreads; @@ -87,25 +83,52 @@ static void syncModuleInitFunc() { info.processIncomingConn = syncProcessIncommingConnection; tsTcpPool = taosOpenTcpThreadPool(&info); - if (tsTcpPool == NULL) return; + if (tsTcpPool == NULL) { + sError("failed to init tcpPool"); + return -1; + } syncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC"); if (syncTmrCtrl == NULL) { + sError("failed to init tmrCtrl"); taosCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; - return; + return -1; } - + vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); if (vgIdHash == NULL) { + sError("failed to init vgIdHash"); taosTmrCleanUp(syncTmrCtrl); taosCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; syncTmrCtrl = NULL; - return; - } + return -1; + } tstrncpy(tsNodeFqdn, tsLocalFqdn, sizeof(tsNodeFqdn)); + sInfo("sync module initialized successfully"); + + return 0; +} + +void syncCleanUp() { + if (tsTcpPool) { + taosCloseTcpThreadPool(tsTcpPool); + tsTcpPool = NULL; + } + + if (syncTmrCtrl) { + taosTmrCleanUp(syncTmrCtrl); + syncTmrCtrl = NULL; + } + + if (vgIdHash) { + taosHashCleanup(vgIdHash); + vgIdHash = NULL; + } + + sInfo("sync module is cleaned up"); } void *syncStart(const SSyncInfo *pInfo) { @@ -118,15 +141,6 @@ void *syncStart(const SSyncInfo *pInfo) { return NULL; } - pthread_once(&syncModuleInit, syncModuleInitFunc); - if (tsTcpPool == NULL) { - free(pNode); - syncModuleInit = PTHREAD_ONCE_INIT; - sError("failed to init sync module(%s)", tstrerror(errno)); - return NULL; - } - - atomic_add_fetch_32(&tsNodeNum, 1); tstrncpy(pNode->path, pInfo->path, sizeof(pNode->path)); pthread_mutex_init(&pNode->mutex, NULL); @@ -138,7 +152,7 @@ void *syncStart(const SSyncInfo *pInfo) { pNode->confirmForward = pInfo->confirmForward; pNode->notifyFlowCtrl = pInfo->notifyFlowCtrl; pNode->notifyFileSynced = pInfo->notifyFileSynced; - + pNode->selfIndex = -1; pNode->vgId = pInfo->vgId; pNode->replica = pCfg->replica; @@ -148,8 +162,9 @@ void *syncStart(const SSyncInfo *pInfo) { for (int i = 0; i < pCfg->replica; ++i) { const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i; pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo); - if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) + if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) { pNode->selfIndex = i; + } } if (pNode->selfIndex < 0) { @@ -181,16 +196,17 @@ void *syncStart(const SSyncInfo *pInfo) { syncAddArbitrator(pNode); syncAddNodeRef(pNode); taosHashPut(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *)); - - if (pNode->notifyRole) - (*pNode->notifyRole)(pNode->ahandle, nodeRole); + + if (pNode->notifyRole) { + (*pNode->notifyRole)(pNode->ahandle, nodeRole); + } return pNode; } void syncStop(void *param) { - SSyncNode * pNode = param; - SSyncPeer *pPeer; + SSyncNode *pNode = param; + SSyncPeer *pPeer; if (pNode == NULL) return; sInfo("vgId:%d, cleanup sync", pNode->vgId); @@ -199,7 +215,7 @@ void syncStop(void *param) { for (int i = 0; i < pNode->replica; ++i) { pPeer = pNode->peerInfo[i]; - if (pPeer) syncRemovePeer(pPeer); + if (pPeer) syncRemovePeer(pPeer); } pPeer = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]; @@ -214,19 +230,19 @@ void syncStop(void *param) { } int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { - SSyncNode * pNode = param; - int i, j; + SSyncNode *pNode = param; + int i, j; if (pNode == NULL) return TSDB_CODE_SYN_INVALID_CONFIG; - sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], - pNewCfg->replica, pNode->replica); + sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], pNewCfg->replica, + pNode->replica); pthread_mutex_lock(&(pNode->mutex)); for (i = 0; i < pNode->replica; ++i) { for (j = 0; j < pNewCfg->replica; ++j) { - if ((strcmp(pNode->peerInfo[i]->fqdn, pNewCfg->nodeInfo[j].nodeFqdn) == 0) && - (pNode->peerInfo[i]->port == pNewCfg->nodeInfo[j].nodePort)) + if ((strcmp(pNode->peerInfo[i]->fqdn, pNewCfg->nodeInfo[j].nodeFqdn) == 0) && + (pNode->peerInfo[i]->port == pNewCfg->nodeInfo[j].nodePort)) break; } @@ -241,8 +257,8 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { const SNodeInfo *pNewNode = &pNewCfg->nodeInfo[i]; for (j = 0; j < pNode->replica; ++j) { - if (pNode->peerInfo[j] && (strcmp(pNode->peerInfo[j]->fqdn, pNewNode->nodeFqdn) == 0) && - (pNode->peerInfo[j]->port == pNewNode->nodePort)) + if (pNode->peerInfo[j] && (strcmp(pNode->peerInfo[j]->fqdn, pNewNode->nodeFqdn) == 0) && + (pNode->peerInfo[j]->port == pNewNode->nodePort)) break; } @@ -252,8 +268,9 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { newPeers[i] = pNode->peerInfo[j]; } - if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort)) + if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort)) { pNode->selfIndex = i; + } } pNode->replica = pNewCfg->replica; @@ -261,8 +278,9 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica; memcpy(pNode->peerInfo, newPeers, sizeof(SSyncPeer *) * pNewCfg->replica); - for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i) + for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i) { pNode->peerInfo[i] = NULL; + } syncAddArbitrator(pNode); @@ -274,43 +292,44 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { pthread_mutex_unlock(&(pNode->mutex)); - sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, syncRole[nodeRole]); + sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, + syncRole[nodeRole]); syncBroadcastStatus(pNode); return 0; } int32_t syncForwardToPeer(void *param, void *data, void *mhandle, int qtype) { - SSyncNode * pNode = param; - SSyncPeer * pPeer; - SSyncHead *pSyncHead; - SWalHead *pWalHead = data; - int fwdLen; - int code = 0; + SSyncNode *pNode = param; + SSyncPeer *pPeer; + SSyncHead *pSyncHead; + SWalHead * pWalHead = data; + int fwdLen; + int code = 0; if (pNode == NULL) return 0; // always update version nodeVersion = pWalHead->version; - if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER ) return 0; + if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER) return 0; // only pkt from RPC or CQ can be forwarded if (qtype != TAOS_QTYPE_RPC && qtype != TAOS_QTYPE_CQ) return 0; // a hacker way to improve the performance - pSyncHead = (SSyncHead *) ( ((char *)pWalHead) - sizeof(SSyncHead)); + pSyncHead = (SSyncHead *)(((char *)pWalHead) - sizeof(SSyncHead)); pSyncHead->type = TAOS_SMSG_FORWARD; pSyncHead->pversion = 0; - pSyncHead->len = sizeof(SWalHead) + pWalHead->len; - fwdLen = pSyncHead->len + sizeof(SSyncHead); //include the WAL and SYNC head + pSyncHead->len = sizeof(SWalHead) + pWalHead->len; + fwdLen = pSyncHead->len + sizeof(SSyncHead); // include the WAL and SYNC head pthread_mutex_lock(&(pNode->mutex)); for (int i = 0; i < pNode->replica; ++i) { pPeer = pNode->peerInfo[i]; - if (pPeer == NULL || pPeer->peerFd <0) continue; - if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue; - + if (pPeer == NULL || pPeer->peerFd < 0) continue; + if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue; + if (pNode->quorum > 1 && code == 0) { syncSaveFwdInfo(pNode, pWalHead->version, mhandle); code = 1; @@ -335,12 +354,12 @@ void syncConfirmForward(void *param, uint64_t version, int32_t code) { if (pNode == NULL) return; if (pNode->quorum <= 1) return; - SSyncPeer *pPeer = pNode->pMaster; + SSyncPeer *pPeer = pNode->pMaster; if (pPeer == NULL) return; char msg[sizeof(SSyncHead) + sizeof(SFwdRsp)] = {0}; - SSyncHead *pHead = (SSyncHead *) msg; + SSyncHead *pHead = (SSyncHead *)msg; pHead->type = TAOS_SMSG_FORWARD_RSP; pHead->len = sizeof(SFwdRsp); @@ -363,7 +382,7 @@ void syncRecover(void *param) { SSyncNode *pNode = param; SSyncPeer *pPeer; - // to do: add a few lines to check if recover is OK + // to do: add a few lines to check if recover is OK // if take this node to unsync state, the whole system may not work nodeRole = TAOS_SYNC_ROLE_UNSYNCED; @@ -373,7 +392,7 @@ void syncRecover(void *param) { pthread_mutex_lock(&(pNode->mutex)); for (int i = 0; i < pNode->replica; ++i) { - pPeer = (SSyncPeer *) pNode->peerInfo[i]; + pPeer = (SSyncPeer *)pNode->peerInfo[i]; if (pPeer->peerFd >= 0) { syncRestartConnection(pPeer); } @@ -386,7 +405,7 @@ int syncGetNodesRole(void *param, SNodesRole *pNodesRole) { SSyncNode *pNode = param; pNodesRole->selfIndex = pNode->selfIndex; - for (int i=0; ireplica; ++i) { + for (int i = 0; i < pNode->replica; ++i) { pNodesRole->nodeId[i] = pNode->peerInfo[i]->nodeId; pNodesRole->role[i] = pNode->peerInfo[i]->role; } @@ -410,7 +429,7 @@ static void syncAddArbitrator(SSyncNode *pNode) { if (-1 == ret) { nodeInfo.nodePort = tsArbitratorPort; } - + if (pPeer) { if ((strcmp(nodeInfo.nodeFqdn, pPeer->fqdn) == 0) && (nodeInfo.nodePort == pPeer->port)) { return; @@ -418,39 +437,26 @@ static void syncAddArbitrator(SSyncNode *pNode) { syncRemovePeer(pPeer); pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = NULL; } - } + } pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = syncAddPeer(pNode, &nodeInfo); } -static void syncAddNodeRef(SSyncNode *pNode) -{ - atomic_add_fetch_8(&pNode->refCount, 1); +static void syncAddNodeRef(SSyncNode *pNode) { + atomic_add_fetch_8(&pNode->refCount, 1); } -static void syncDecNodeRef(SSyncNode *pNode) -{ +static void syncDecNodeRef(SSyncNode *pNode) { if (atomic_sub_fetch_8(&pNode->refCount, 1) == 0) { pthread_mutex_destroy(&pNode->mutex); taosTFree(pNode->pRecv); taosTFree(pNode->pSyncFwds); taosTFree(pNode); - - if (atomic_sub_fetch_32(&tsNodeNum, 1) == 0) { - if (tsTcpPool) taosCloseTcpThreadPool(tsTcpPool); - if (syncTmrCtrl) taosTmrCleanUp(syncTmrCtrl); - if (vgIdHash) taosHashCleanup(vgIdHash); - syncTmrCtrl = NULL; - tsTcpPool = NULL; - vgIdHash = NULL; - syncModuleInit = PTHREAD_ONCE_INIT; - sDebug("sync module is cleaned up"); - } } } void syncAddPeerRef(SSyncPeer *pPeer) { - atomic_add_fetch_8(&pPeer->refCount, 1); + atomic_add_fetch_8(&pPeer->refCount, 1); } int syncDecPeerRef(SSyncPeer *pPeer) { @@ -486,8 +492,8 @@ static void syncRemovePeer(SSyncPeer *pPeer) { static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn); if (ip == -1) return NULL; - - SSyncPeer *pPeer = (SSyncPeer *) calloc(1, sizeof(SSyncPeer)); + + SSyncPeer *pPeer = (SSyncPeer *)calloc(1, sizeof(SSyncPeer)); if (pPeer == NULL) return NULL; pPeer->nodeId = pInfo->nodeId; @@ -506,9 +512,11 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { int ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) { sDebug("%s, start to check peer connection", pPeer->id); - taosTmrReset(syncCheckPeerConnection, 100 + (pNode->vgId*10)%100, pPeer, syncTmrCtrl, &pPeer->timer); + int32_t checkMs = 100 + (pNode->vgId * 10) % 100; + if (pNode->vgId) checkMs = tsStatusInterval * 2000 + 100; + taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer); } - + syncAddNodeRef(pNode); return pPeer; } @@ -542,16 +550,18 @@ static void syncChooseMaster(SSyncNode *pNode) { sDebug("vgId:%d, choose master", pNode->vgId); for (int i = 0; i < pNode->replica; ++i) { - if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) + if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) { onlineNum++; + } } if (onlineNum == pNode->replica) { // if all peers are online, peer with highest version shall be master index = 0; for (int i = 1; i < pNode->replica; ++i) { - if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) + if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) { index = i; + } } } @@ -568,8 +578,9 @@ static void syncChooseMaster(SSyncNode *pNode) { //slave with highest version shall be master pPeer = pNode->peerInfo[i]; if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) { - if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) + if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) { index = i; + } } } } @@ -595,8 +606,9 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { int replica = pNode->replica; for (int i = 0; i < pNode->replica; ++i) { - if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) + if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) { onlineNum++; + } } // add arbitrator connection @@ -644,7 +656,7 @@ static int syncValidateMaster(SSyncPeer *pPeer) { code = -1; for (int i = 0; i < pNode->replica; ++i) { - if ( i == pNode->selfIndex ) continue; + if (i == pNode->selfIndex) continue; syncRestartPeer(pNode->peerInfo[i]); } } @@ -661,12 +673,11 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne pNode->peerInfo[pNode->selfIndex]->version = nodeVersion; pPeer->role = newRole; - sDebug("%s, own role:%s, new peer role:%s", pPeer->id, - syncRole[nodeRole], syncRole[pPeer->role]); + sDebug("%s, own role:%s, new peer role:%s", pPeer->id, syncRole[nodeRole], syncRole[pPeer->role]); SSyncPeer *pMaster = syncCheckMaster(pNode); - if ( pMaster ) { + if (pMaster) { // master is there pNode->pMaster = pMaster; sDebug("%s, it is the master, ver:%" PRIu64, pMaster->id, pMaster->version); @@ -691,27 +702,30 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne for (i = 0; i < pNode->replica; ++i) { SSyncPeer *pTemp = pNode->peerInfo[i]; if (pTemp->role != peersStatus[i].role) break; - if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[i].version)) break; + if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[i].version)) break; } - + if (i >= pNode->replica) consistent = 1; } else { if (pNode->replica == 2) consistent = 1; } - if (consistent) + if (consistent) { syncChooseMaster(pNode); + } } if (syncRequired) { syncRecoverFromMaster(pMaster); } - if (peerOldRole != newRole || nodeRole != selfOldRole) + if (peerOldRole != newRole || nodeRole != selfOldRole) { syncBroadcastStatus(pNode); + } - if (nodeRole != TAOS_SYNC_ROLE_MASTER) + if (nodeRole != TAOS_SYNC_ROLE_MASTER) { syncResetFlowCtrl(pNode); + } } static void syncRestartPeer(SSyncPeer *pPeer) { @@ -722,8 +736,9 @@ static void syncRestartPeer(SSyncPeer *pPeer) { pPeer->sstatus = TAOS_SYNC_STATUS_INIT; int ret = strcmp(pPeer->fqdn, tsNodeFqdn); - if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) + if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) { taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); + } } void syncRestartConnection(SSyncPeer *pPeer) { @@ -747,13 +762,13 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { if (pPeer->sstatus != TAOS_SYNC_STATUS_INIT) { sDebug("%s, sync is already started", pPeer->id); - return; // already started + return; // already started } // start a new thread to retrieve the data syncAddPeerRef(pPeer); - pthread_attr_t thattr; - pthread_t thread; + pthread_attr_t thattr; + pthread_t thread; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); int ret = pthread_create(&thread, &thattr, syncRetrieveData, pPeer); @@ -780,8 +795,8 @@ static void syncNotStarted(void *param, void *tmrId) { } static void syncTryRecoverFromMaster(void *param, void *tmrId) { - SSyncPeer *pPeer = param; - SSyncNode *pNode = pPeer->pSyncNode; + SSyncPeer *pPeer = param; + SSyncNode *pNode = pPeer->pSyncNode; pthread_mutex_lock(&(pNode->mutex)); syncRecoverFromMaster(pPeer); @@ -797,13 +812,15 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { } taosTmrStopA(&pPeer->timer); - if (tsSyncNum >= tsMaxSyncNum) { + + // Ensure the sync of mnode not interrupted + if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) { sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); - taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId*10)%200, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, syncTmrCtrl, &pPeer->timer); return; } - sDebug("%s, try to sync", pPeer->id) + sDebug("%s, try to sync", pPeer->id); SFirstPkt firstPkt; memset(&firstPkt, 0, sizeof(firstPkt)); @@ -812,49 +829,47 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { firstPkt.syncHead.len = sizeof(firstPkt) - sizeof(SSyncHead); tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); firstPkt.port = tsSyncPort; - taosTmrReset(syncNotStarted, tsSyncTimer*1000, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncNotStarted, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); - if (write(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt) ) { + if (write(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { sError("%s, failed to send sync-req to peer", pPeer->id); } else { nodeSStatus = TAOS_SYNC_STATUS_START; sInfo("%s, sync-req is sent", pPeer->id); } - - return; } static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) { - SSyncNode * pNode = pPeer->pSyncNode; - SFwdRsp *pFwdRsp = (SFwdRsp *) cont; - SSyncFwds *pSyncFwds = pNode->pSyncFwds; - SFwdInfo *pFwdInfo; + SSyncNode *pNode = pPeer->pSyncNode; + SFwdRsp * pFwdRsp = (SFwdRsp *)cont; + SSyncFwds *pSyncFwds = pNode->pSyncFwds; + SFwdInfo * pFwdInfo; sDebug("%s, forward-rsp is received, ver:%" PRIu64, pPeer->id, pFwdRsp->version); SFwdInfo *pFirst = pSyncFwds->fwdInfo + pSyncFwds->first; if (pFirst->version <= pFwdRsp->version && pSyncFwds->fwds > 0) { // find the forwardInfo from first - for (int i=0; ifwds; ++i) { - pFwdInfo = pSyncFwds->fwdInfo + (i+pSyncFwds->first)%tsMaxFwdInfo; + for (int i = 0; i < pSyncFwds->fwds; ++i) { + pFwdInfo = pSyncFwds->fwdInfo + (i + pSyncFwds->first) % tsMaxFwdInfo; if (pFwdRsp->version == pFwdInfo->version) break; } - + syncProcessFwdAck(pNode, pFwdInfo, pFwdRsp->code); syncRemoveConfirmedFwdInfo(pNode); } } static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { - SSyncNode * pNode = pPeer->pSyncNode; - SWalHead *pHead = (SWalHead *)cont; + SSyncNode *pNode = pPeer->pSyncNode; + SWalHead * pHead = (SWalHead *)cont; sDebug("%s, forward is received, ver:%" PRIu64, pPeer->id, pHead->version); if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { - //nodeVersion = pHead->version; + // nodeVersion = pHead->version; (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD); - } else { + } else { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { syncSaveIntoBuffer(pPeer, pHead); } else { @@ -875,12 +890,13 @@ static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) { pPeer->version = pPeersStatus->version; syncCheckRole(pPeer, pPeersStatus->peersStatus, pPeersStatus->role); - if (pPeersStatus->ack) + if (pPeersStatus->ack) { syncSendPeersStatusMsgToPeer(pPeer, 0); + } } static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { - if (pPeer->peerFd <0) return -1; + if (pPeer->peerFd < 0) return -1; int hlen = taosReadMsg(pPeer->peerFd, pHead, sizeof(SSyncHead)); if (hlen != sizeof(SSyncHead)) { @@ -904,9 +920,9 @@ static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { } static int syncProcessPeerMsg(void *param, void *buffer) { - SSyncPeer * pPeer = param; - SSyncHead head; - char *cont = (char *)buffer; + SSyncPeer *pPeer = param; + SSyncHead head; + char * cont = (char *)buffer; SSyncNode *pNode = pPeer->pSyncNode; pthread_mutex_lock(&(pNode->mutex)); @@ -930,16 +946,16 @@ static int syncProcessPeerMsg(void *param, void *buffer) { return code; } -#define statusMsgLen sizeof(SSyncHead)+sizeof(SPeersStatus)+sizeof(SPeerStatus)*TAOS_SYNC_MAX_REPLICA +#define statusMsgLen sizeof(SSyncHead) + sizeof(SPeersStatus) + sizeof(SPeerStatus) * TAOS_SYNC_MAX_REPLICA static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack) { SSyncNode *pNode = pPeer->pSyncNode; char msg[statusMsgLen] = {0}; - if (pPeer->peerFd <0 || pPeer->ip ==0) return; + if (pPeer->peerFd < 0 || pPeer->ip == 0) return; - SSyncHead *pHead = (SSyncHead *) msg; - SPeersStatus *pPeersStatus = (SPeersStatus *) (msg + sizeof(SSyncHead)); + SSyncHead * pHead = (SSyncHead *)msg; + SPeersStatus *pPeersStatus = (SPeersStatus *)(msg + sizeof(SSyncHead)); pHead->type = TAOS_SMSG_STATUS; pHead->len = statusMsgLen - sizeof(SSyncHead); @@ -977,28 +993,28 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { int connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); if (connFd < 0) { sDebug("%s, failed to open tcp socket(%s)", pPeer->id, strerror(errno)); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer *1000, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); return; } SFirstPkt firstPkt; memset(&firstPkt, 0, sizeof(firstPkt)); - firstPkt.syncHead.vgId = pPeer->nodeId ? pNode->vgId:0; + firstPkt.syncHead.vgId = pPeer->nodeId ? pNode->vgId : 0; firstPkt.syncHead.type = TAOS_SMSG_STATUS; - tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); + tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); firstPkt.port = tsSyncPort; firstPkt.sourceId = pNode->vgId; // tell arbitrator its vgId if (write(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) { sDebug("%s, connection to peer server is setup", pPeer->id); - pPeer->peerFd = connFd; + pPeer->peerFd = connFd; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); syncAddPeerRef(pPeer); } else { sDebug("try later"); close(connFd); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer *1000, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); } } @@ -1009,7 +1025,7 @@ static void syncCheckPeerConnection(void *param, void *tmrId) { pthread_mutex_lock(&(pNode->mutex)); sDebug("%s, check peer connection", pPeer->id); - syncSetupPeerConnection(pPeer); + syncSetupPeerConnection(pPeer); pthread_mutex_unlock(&(pNode->mutex)); } @@ -1018,7 +1034,7 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) { taosTmrStopA(&pPeer->timer); pthread_attr_t thattr; - pthread_t thread; + pthread_t thread; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); @@ -1030,15 +1046,15 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) { sError("%s, failed to create sync thread", pPeer->id); taosClose(pPeer->syncFd); syncDecPeerRef(pPeer); - } else { + } else { sInfo("%s, sync connection is up", pPeer->id); } } static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) { - char ipstr[24]; - int i; - + char ipstr[24]; + int i; + tinet_ntoa(ipstr, sourceIp); sDebug("peer TCP connection from ip:%s", ipstr); @@ -1063,8 +1079,7 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) { SSyncPeer *pPeer; for (i = 0; i < pNode->replica; ++i) { pPeer = pNode->peerInfo[i]; - if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port)) - break; + if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port)) break; } pPeer = (i < pNode->replica) ? pNode->peerInfo[i] : NULL; @@ -1089,8 +1104,6 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) { } pthread_mutex_unlock(&(pNode->mutex)); - - return; } static void syncProcessBrokenLink(void *param) { @@ -1119,10 +1132,12 @@ static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) { if (pSyncFwds->fwds >= tsMaxFwdInfo) { pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo; pSyncFwds->fwds--; - } + } + + if (pSyncFwds->fwds > 0) { + pSyncFwds->last = (pSyncFwds->last + 1) % tsMaxFwdInfo; + } - if (pSyncFwds->fwds > 0) - pSyncFwds->last = (pSyncFwds->last+1) % tsMaxFwdInfo; SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last; pFwdInfo->version = version; pFwdInfo->mhandle = mhandle; @@ -1138,14 +1153,14 @@ static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode) { SSyncFwds *pSyncFwds = pNode->pSyncFwds; int fwds = pSyncFwds->fwds; - for (int i=0; ifwdInfo + pSyncFwds->first; + for (int i = 0; i < fwds; ++i) { + SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->first; if (pFwdInfo->confirmed == 0) break; - pSyncFwds->first = (pSyncFwds->first+1) % tsMaxFwdInfo; + pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo; pSyncFwds->fwds--; if (pSyncFwds->fwds == 0) pSyncFwds->first = pSyncFwds->last; - //sDebug("vgId:%d, fwd info is removed, ver:%d, fwds:%d", + // sDebug("vgId:%d, fwd info is removed, ver:%d, fwds:%d", // pNode->vgId, pFwdInfo->version, pSyncFwds->fwds); memset(pFwdInfo, 0, sizeof(SFwdInfo)); } @@ -1157,12 +1172,14 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code if (code == 0) { pFwdInfo->acks++; - if (pFwdInfo->acks >= pNode->quorum-1) + if (pFwdInfo->acks >= pNode->quorum - 1) { confirm = 1; + } } else { pFwdInfo->nacks++; - if (pFwdInfo->nacks > pNode->replica-pNode->quorum) + if (pFwdInfo->nacks > pNode->replica - pNode->quorum) { confirm = 1; + } } if (confirm && pFwdInfo->confirmed == 0) { @@ -1179,15 +1196,15 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) { if (pSyncFwds->fwds > 0) { pthread_mutex_lock(&(pNode->mutex)); - for (int i=0; ifwds; ++i) { - SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first+i) % tsMaxFwdInfo; + for (int i = 0; i < pSyncFwds->fwds; ++i) { + SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo; if (time - pFwdInfo->time < 2000) break; syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL); } syncRemoveConfirmedFwdInfo(pNode); pthread_mutex_unlock(&(pNode->mutex)); - } - + } + pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, 300, pNode, syncTmrCtrl); } diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 51c3a625e8..626ad77da2 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -26,11 +26,13 @@ const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; -static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); -static void tsdbDestroyFile(SFile *pFile); -static int compFGroup(const void *arg1, const void *arg2); -static int keyFGroupCompFunc(const void *key, const void *fgroup); -static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); +static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); +static void tsdbDestroyFile(SFile *pFile); +static int compFGroup(const void *arg1, const void *arg2); +static int keyFGroupCompFunc(const void *key, const void *fgroup); +static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); +static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep); +static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days); // ---------------- INTERNAL FUNCTIONS ---------------- STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) { @@ -79,9 +81,11 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { int vid = 0; regex_t regex1, regex2; int code = 0; + char fname[TSDB_FILENAME_LEN] = "\0"; SFileGroup fileGroup = {0}; STsdbFileH *pFileH = pRepo->tsdbFileH; + STsdbCfg * pCfg = &(pRepo->config); tDataDir = tsdbGetDataDirName(pRepo->rootDir); if (tDataDir == NULL) { @@ -108,6 +112,8 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { goto _err; } + int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); + struct dirent *dp = NULL; while ((dp = readdir(dir)) != NULL) { if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; @@ -120,6 +126,14 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { continue; } + if (fid < mfid) { + for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { + tsdbGetDataFileName(pRepo->rootDir, pCfg->tsdbId, fid, type, fname); + (void)remove(fname); + } + continue; + } + if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue; memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); fileGroup.fileId = fid; @@ -128,12 +142,30 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { } else if (code == REG_NOMATCH) { code = regexec(®ex2, dp->d_name, 0, NULL, 0); if (code == 0) { - tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name); - char *fname = malloc(strlen(tDataDir) + strlen(dp->d_name) + 2); - if (fname == NULL) goto _err; - sprintf(fname, "%s/%s", tDataDir, dp->d_name); - (void)remove(fname); - free(fname); + size_t tsize = strlen(tDataDir) + strlen(dp->d_name) + 2; + char * fname1 = malloc(tsize); + if (fname1 == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + sprintf(fname1, "%s/%s", tDataDir, dp->d_name); + + tsize = tsize + 64; + char *fname2 = malloc(tsize); + if (fname2 == NULL) { + free(fname1); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + sprintf(fname2, "%s/%s_back_%" PRId64, tDataDir, dp->d_name, taosGetTimestamp(TSDB_TIME_PRECISION_MILLI)); + + (void)rename(fname1, fname2); + + tsdbDebug("vgId:%d file %s exists, backup it as %s", REPO_ID(pRepo), fname1, fname2); + + free(fname1); + free(fname2); + continue; } else if (code == REG_NOMATCH) { tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); continue; @@ -146,6 +178,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { pFileH->pFGroup[pFileH->nFGroups++] = fileGroup; qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); + tsdbDebug("vgId:%d file group %d is restored, nFGroups %d", REPO_ID(pRepo), fileGroup.fileId, pFileH->nFGroups); } regfree(®ex1); @@ -179,8 +212,18 @@ void tsdbCloseFileH(STsdbRepo *pRepo) { SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) { STsdbFileH *pFileH = pRepo->tsdbFileH; + STsdbCfg * pCfg = &(pRepo->config); - if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL; + if (pFileH->nFGroups >= pFileH->maxFGroups) { + int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); + if (pFileH->pFGroup[0].fileId < mfid) { + pthread_rwlock_wrlock(&pFileH->fhlock); + tsdbRemoveFileGroup(pRepo, &(pFileH->pFGroup[0])); + pthread_rwlock_unlock(&pFileH->fhlock); + } + } + + ASSERT(pFileH->nFGroups < pFileH->maxFGroups); SFileGroup fGroup; SFileGroup *pFGroup = &fGroup; @@ -342,8 +385,7 @@ void tsdbFitRetention(STsdbRepo *pRepo) { STsdbFileH *pFileH = pRepo->tsdbFileH; SFileGroup *pGroup = pFileH->pFGroup; - int mfid = (int)(TSDB_KEY_FILEID(taosGetTimestamp(pCfg->precision), pCfg->daysPerFile, pCfg->precision) - - TSDB_MAX_FILE(pCfg->keep, pCfg->daysPerFile)); + int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); pthread_rwlock_wrlock(&(pFileH->fhlock)); @@ -547,3 +589,11 @@ static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) { } } } + +static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep) { + return (TSKEY)(taosGetTimestamp(precision) - keep * tsMsPerDay[precision]); +} + +static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) { + return (int)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision)); +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index db24eae148..bb0f00ef53 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -767,7 +767,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa int32_t lsize = tsize; int32_t keyLen = 0; for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) { - if (tcol >= nColsNotAllNull) break; + if (ncol != 0 && tcol >= nColsNotAllNull) break; SDataCol *pDataCol = pDataCols->cols + ncol; SCompCol *pCompCol = pCompData->cols + tcol; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 875c597008..0ec55841a0 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -270,7 +270,7 @@ void taosReadGlobalLogCfg() { } wordfree(&full_path); - taosReadLogOption("tsLogDir", tsLogDir); + taosReadLogOption("logDir", tsLogDir); sprintf(fileName, "%s/taos.cfg", configDir); fp = fopen(fileName, "r"); @@ -288,9 +288,9 @@ void taosReadGlobalLogCfg() { option = value = NULL; olen = vlen = 0; - taosGetline(&line, &len, fp); + taosGetline(&line, &len, fp); line[len - 1] = 0; - + paGetToken(line, &option, &olen); if (olen == 0) continue; option[olen] = 0; diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 766301914a..a8587de767 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -62,6 +62,7 @@ typedef struct { pthread_mutex_t logMutex; } SLogObj; +int32_t tsLogKeepDays = 0; int32_t tsAsyncLog = 1; float tsTotalLogDirGB = 0; float tsAvailLogDirGB = 0; @@ -78,6 +79,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen static SLogBuff *taosLogBuffNew(int32_t bufSize); static void taosCloseLogByFd(int32_t oldFd); static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum); +extern void taosPrintGlobalCfg(); static int32_t taosStartLog() { pthread_attr_t threadAttr; @@ -136,11 +138,24 @@ static void taosUnLockFile(int32_t fd) { } } +static void taosKeepOldLog(char *oldName) { + if (tsLogKeepDays <= 0) return; + + int64_t fileSec = taosGetTimestampSec(); + char fileName[LOG_FILE_NAME_LEN + 20]; + snprintf(fileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64, tsLogObj.logName, fileSec); + + taosRename(oldName, fileName); + taosRemoveOldLogFiles(tsLogDir, tsLogKeepDays); +} + static void *taosThreadToOpenNewFile(void *param) { - char name[LOG_FILE_NAME_LEN + 20]; + char keepName[LOG_FILE_NAME_LEN + 20]; + sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag); tsLogObj.flag ^= 1; tsLogObj.lines = 0; + char name[LOG_FILE_NAME_LEN + 20]; sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag); umask(0); @@ -150,6 +165,7 @@ static void *taosThreadToOpenNewFile(void *param) { uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno)); return NULL; } + taosLockFile(fd); (void)lseek(fd, 0, SEEK_SET); @@ -157,9 +173,13 @@ static void *taosThreadToOpenNewFile(void *param) { tsLogObj.logHandle->fd = fd; tsLogObj.lines = 0; tsLogObj.openInProgress = 0; - uInfo("new log file is opened!!!"); - taosCloseLogByFd(oldFd); + + uInfo(" new log file:%d is opened", tsLogObj.flag); + uInfo("=================================="); + taosPrintGlobalCfg(); + taosKeepOldLog(keepName); + return NULL; } @@ -264,20 +284,23 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcat(name, ".0"); } + if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { + strcpy(name, fn); + strcat(name, ".1"); + } + + bool log0Exist = stat(name, &logstat0) >= 0; + bool log1Exist = stat(name, &logstat1) >= 0; + // if none of the log files exist, open 0, if both exists, open the old one - if (stat(name, &logstat0) < 0) { + if (!log0Exist && !log1Exist) { tsLogObj.flag = 0; + } else if (!log1Exist) { + tsLogObj.flag = 0; + } else if (!log0Exist) { + tsLogObj.flag = 1; } else { - if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { - strcpy(name, fn); - strcat(name, ".1"); - } - - if (stat(name, &logstat1) < 0) { - tsLogObj.flag = 1; - } else { - tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1; - } + tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1; } char fileName[LOG_FILE_NAME_LEN + 50] = "\0"; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 5a778156ff..a4e88fb946 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -57,6 +57,9 @@ void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {} #endif int32_t vnodeInitResources() { + int code = syncInit(); + if (code != 0) return code; + vnodeInitWriteFp(); vnodeInitReadFp(); @@ -70,11 +73,12 @@ int32_t vnodeInitResources() { } void vnodeCleanupResources() { - if (tsDnodeVnodesHash != NULL) { taosHashCleanup(tsDnodeVnodesHash); tsDnodeVnodesHash = NULL; } + + syncCleanUp(); } int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { @@ -382,7 +386,13 @@ void vnodeRelease(void *pVnodeRaw) { char newDir[TSDB_FILENAME_LEN] = {0}; sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId); sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId); - taosRename(rootDir, newDir); + + if (0 == tsEnableVnodeBak) { + vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId); + } else { + taosRename(rootDir, newDir); + } + taosRemoveDir(rootDir); dnodeSendStatusMsgToMnode(); } @@ -671,9 +681,13 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) { len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum); len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); + + vInfo("vgId:%d, save vnode cfg, replica:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.replications); for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId); len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp); + vInfo("vgId:%d, save vnode cfg, nodeId:%d nodeEp:%s", pVnodeCfg->cfg.vgId, pVnodeCfg->nodes[i].nodeId, + pVnodeCfg->nodes[i].nodeEp); if (i < pVnodeCfg->cfg.replications - 1) { len += snprintf(content + len, maxLen - len, " },{\n"); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 11315f5321..017eeaf426 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -93,11 +93,12 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void** handle, vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); } } else { - SRetrieveTableRsp* pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRsp, 0, sizeof(SRetrieveTableRsp)); pRsp->completed = true; pRet->rsp = pRsp; + pRet->len = sizeof(SRetrieveTableRsp); *freeHandle = true; } @@ -270,6 +271,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (code != TSDB_CODE_SUCCESS) { //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + pRet->len = sizeof(SRetrieveTableRsp); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); freeHandle = true; } else { // result is not ready, return immediately diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml index b796d52d28..15aed1cf03 100644 --- a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -1,85 +1,91 @@ - 4.0.0 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + 4.0.0 - com.taosdata.jdbc - SpringJdbcTemplate - 1.0-SNAPSHOT + com.taosdata.jdbc + SpringJdbcTemplate + 1.0-SNAPSHOT - SpringJdbcTemplate - http://www.taosdata.com + SpringJdbcTemplate + http://www.taosdata.com - - UTF-8 - 1.8 - 1.8 - + + UTF-8 + 1.8 + 1.8 + - + - - org.springframework - spring-context - 4.3.2.RELEASE - + + org.springframework + spring-context + 5.2.8.RELEASE + - - org.springframework - spring-jdbc - 4.3.2.RELEASE - + + org.springframework + spring-jdbc + 5.1.9.RELEASE + - - junit - junit - 4.11 - test - + + org.springframework + spring-test + 5.1.9.RELEASE + - - com.taosdata.jdbc - taos-jdbcdriver - 2.0.2 - + + junit + junit + 4.13 + test + - + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.4 + - - - - maven-compiler-plugin - 3.8.0 - - 1.8 - 1.8 - - - - org.apache.maven.plugins - maven-assembly-plugin - 3.1.0 - - - - com.taosdata.jdbc.App - - - - jar-with-dependencies - - - - - make-assembly - package - - single - - - - - - + + + + + + maven-compiler-plugin + 3.8.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.1.0 + + + + com.taosdata.jdbc.example.jdbcTemplate.App + + + + jar-with-dependencies + + + + + make-assembly + package + + single + + + + + + diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java deleted file mode 100644 index 3230af46a8..0000000000 --- a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.taosdata.jdbc; - - -import org.springframework.context.ApplicationContext; -import org.springframework.context.support.ClassPathXmlApplicationContext; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.util.CollectionUtils; - -import java.util.List; -import java.util.Map; - -public class App { - - public static void main( String[] args ) { - - ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml"); - - JdbcTemplate jdbcTemplate = (JdbcTemplate) ctx.getBean("jdbcTemplate"); - - // create database - jdbcTemplate.execute("create database if not exists db "); - - // create table - jdbcTemplate.execute("create table if not exists db.tb (ts timestamp, temperature int, humidity float)"); - - String insertSql = "insert into db.tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"; - - // insert rows - int affectedRows = jdbcTemplate.update(insertSql); - - System.out.println("insert success " + affectedRows + " rows."); - - // query for list - List> resultList = jdbcTemplate.queryForList("select * from db.tb"); - - if(!CollectionUtils.isEmpty(resultList)){ - for (Map row : resultList){ - System.out.printf("%s, %d, %s\n", row.get("ts"), row.get("temperature"), row.get("humidity")); - } - } - - } - -} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/App.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/App.java new file mode 100644 index 0000000000..a03ca3924f --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/App.java @@ -0,0 +1,48 @@ +package com.taosdata.jdbc.example.jdbcTemplate; + + +import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement; +import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao; +import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather; +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +import java.sql.Timestamp; +import java.util.Date; +import java.util.List; +import java.util.Random; + +public class App { + + private static Random random = new Random(System.currentTimeMillis()); + + public static void main(String[] args) { + + ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml"); + + ExecuteAsStatement executor = ctx.getBean(ExecuteAsStatement.class); + // drop database + executor.doExecute("drop database if exists test"); + // create database + executor.doExecute("create database if not exists test"); + //use database + executor.doExecute("use test"); + // create table + executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)"); + + WeatherDao weatherDao = ctx.getBean(WeatherDao.class); + Weather weather = new Weather(new Timestamp(new Date().getTime()), random.nextFloat() * 50.0f, random.nextInt(100)); + // insert rows + int affectedRows = weatherDao.add(weather); + System.out.println("insert success " + affectedRows + " rows."); + + // query for list + int limit = 10, offset = 0; + List weatherList = weatherDao.queryForList(limit, offset); + for (Weather w : weatherList) { + System.out.println(w); + } + + } + +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/ExecuteAsStatement.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/ExecuteAsStatement.java new file mode 100644 index 0000000000..f146684cc0 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/ExecuteAsStatement.java @@ -0,0 +1,6 @@ +package com.taosdata.jdbc.example.jdbcTemplate.dao; + +public interface ExecuteAsStatement{ + + void doExecute(String sql); +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/WeatherDao.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/WeatherDao.java new file mode 100644 index 0000000000..28962ee1e6 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/WeatherDao.java @@ -0,0 +1,17 @@ +package com.taosdata.jdbc.example.jdbcTemplate.dao; + +import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather; + +import java.util.List; + +public interface WeatherDao { + + + int add(Weather weather); + + int[] batchInsert(List weatherList); + + List queryForList(int limit, int offset); + + int count(); +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/impl/ExecuteAsStatementImpl.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/impl/ExecuteAsStatementImpl.java new file mode 100644 index 0000000000..2700e701cc --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/impl/ExecuteAsStatementImpl.java @@ -0,0 +1,19 @@ +package com.taosdata.jdbc.example.jdbcTemplate.dao.impl; + +import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Repository; + + +@Repository +public class ExecuteAsStatementImpl implements ExecuteAsStatement { + + @Autowired + private JdbcTemplate jdbcTemplate; + + @Override + public void doExecute(String sql) { + jdbcTemplate.execute(sql); + } +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/impl/WeatherDaoImpl.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/impl/WeatherDaoImpl.java new file mode 100644 index 0000000000..1e0e0ab68c --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/dao/impl/WeatherDaoImpl.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.example.jdbcTemplate.dao.impl; + +import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao; +import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.BatchPreparedStatementSetter; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.core.namedparam.SqlParameterSourceUtils; +import org.springframework.jdbc.core.simple.SimpleJdbcInsert; +import org.springframework.stereotype.Repository; + +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Repository +public class WeatherDaoImpl implements WeatherDao { + + @Autowired + private JdbcTemplate jdbcTemplate; + + @Override + public int add(Weather weather) { + return jdbcTemplate.update( + "insert into test.weather(ts, temperature, humidity) VALUES(?,?,?)", + weather.getTs(), weather.getTemperature(), weather.getHumidity() + ); + } + + @Override + public int[] batchInsert(List weatherList) { + return jdbcTemplate.batchUpdate("insert into test.weather(ts, temperature, humidity) values( ?, ?, ?)", new BatchPreparedStatementSetter() { + @Override + public void setValues(PreparedStatement ps, int i) throws SQLException { + ps.setTimestamp(1, weatherList.get(i).getTs()); + ps.setFloat(2, weatherList.get(i).getTemperature()); + ps.setInt(3, weatherList.get(i).getHumidity()); + } + + @Override + public int getBatchSize() { + return weatherList.size(); + } + }); + } + + @Override + public List queryForList(int limit, int offset) { + return jdbcTemplate.query("select * from test.weather limit ? offset ?", (rs, rowNum) -> { + Timestamp ts = rs.getTimestamp("ts"); + float temperature = rs.getFloat("temperature"); + int humidity = rs.getInt("humidity"); + return new Weather(ts, temperature, humidity); + }, limit, offset); + } + + @Override + public int count() { + return jdbcTemplate.queryForObject("select count(*) from test.weather", Integer.class); + } +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/domain/Weather.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/domain/Weather.java new file mode 100644 index 0000000000..023b301481 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/example/jdbcTemplate/domain/Weather.java @@ -0,0 +1,54 @@ +package com.taosdata.jdbc.example.jdbcTemplate.domain; + +import java.sql.Timestamp; + +public class Weather { + + private Timestamp ts; + private float temperature; + private int humidity; + + public Weather() { + } + + public Weather(Timestamp ts, float temperature, int humidity) { + this.ts = ts; + this.temperature = temperature; + this.humidity = humidity; + } + + @Override + public String toString() { + return "Weather{" + + "ts=" + ts + + ", temperature=" + temperature + + ", humidity=" + humidity + + '}'; + } + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public float getTemperature() { + return temperature; + } + + public void setTemperature(float temperature) { + this.temperature = temperature; + } + + public int getHumidity() { + return humidity; + } + + public void setHumidity(int humidity) { + this.humidity = humidity; + } + + +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml b/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml index 41128148ec..19ac433385 100644 --- a/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml @@ -5,20 +5,21 @@ xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd - http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd - " + http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" default-autowire="byName"> - + - - + + + + diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java index d6a699598e..d0219f3db7 100644 --- a/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java @@ -7,14 +7,12 @@ import org.junit.Test; /** * Unit test for simple App. */ -public class AppTest -{ +public class AppTest { /** * Rigorous Test :-) */ @Test - public void shouldAnswerWithTrue() - { - assertTrue( true ); + public void shouldAnswerWithTrue() { + assertTrue(true); } } diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/example/jdbcTemplate/BatcherInsertTest.java b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/example/jdbcTemplate/BatcherInsertTest.java new file mode 100644 index 0000000000..2f2446eb70 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/example/jdbcTemplate/BatcherInsertTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.example.jdbcTemplate; + + +import com.taosdata.jdbc.example.jdbcTemplate.dao.ExecuteAsStatement; +import com.taosdata.jdbc.example.jdbcTemplate.dao.WeatherDao; +import com.taosdata.jdbc.example.jdbcTemplate.domain.Weather; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.assertEquals; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration({"classpath:applicationContext.xml"}) +public class BatcherInsertTest { + + + @Autowired + private WeatherDao weatherDao; + @Autowired + private ExecuteAsStatement executor; + + private static final int numOfRecordsPerTable = 1000; + private static long ts = 1496732686000l; + private static Random random = new Random(System.currentTimeMillis()); + + @Before + public void before() { + // drop database + executor.doExecute("drop database if exists test"); + // create database + executor.doExecute("create database if not exists test"); + //use database + executor.doExecute("use test"); + // create table + executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)"); + } + + @Test + public void batchInsert() { + List weatherList = new ArrayList<>(); + for (int i = 0; i < numOfRecordsPerTable; i++) { + ts += 1000; + Weather weather = new Weather(new Timestamp(ts), random.nextFloat() * 50.0f, random.nextInt(100)); + weatherList.add(weather); + } + long start = System.currentTimeMillis(); + weatherDao.batchInsert(weatherList); + long end = System.currentTimeMillis(); + System.out.println("batch insert(" + numOfRecordsPerTable + " rows) time cost ==========> " + (end - start) + " ms"); + + int count = weatherDao.count(); + assertEquals(count, numOfRecordsPerTable); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 5f31d36d6e..881ea0d6bf 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.2 + 2.0.4 @@ -76,6 +76,24 @@ + + + src/main/resources + + **/*.properties + **/*.xml + + true + + + src/main/java + + **/*.properties + **/*.xml + + + + org.springframework.boot diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/RainStationController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/RainStationController.java new file mode 100644 index 0000000000..844ac21bb8 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/RainStationController.java @@ -0,0 +1,28 @@ +package com.taosdata.jdbc.springbootdemo.controller; + + +import com.taosdata.jdbc.springbootdemo.domain.Rainfall; +import com.taosdata.jdbc.springbootdemo.service.RainStationService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.*; + +@RestController +@RequestMapping("/rainstation") +public class RainStationController { + + @Autowired + private RainStationService service; + + @GetMapping("/init") + public boolean init() { + service.init(); + service.createTable(); + return true; + } + + @PostMapping("/insert") + public int insert(@RequestBody Rainfall rainfall){ + return service.insert(rainfall); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java index 9123abd97b..56a58fbb4d 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java @@ -16,43 +16,47 @@ public class WeatherController { /** * create database and table + * * @return */ @GetMapping("/init") - public boolean init(){ + public boolean init() { return weatherService.init(); } /** * Pagination Query + * * @param limit * @param offset * @return */ @GetMapping("/{limit}/{offset}") - public List queryWeather(@PathVariable Long limit, @PathVariable Long offset){ + public List queryWeather(@PathVariable Long limit, @PathVariable Long offset) { return weatherService.query(limit, offset); } /** * upload single weather info + * * @param temperature * @param humidity * @return */ @PostMapping("/{temperature}/{humidity}") - public int saveWeather(@PathVariable int temperature, @PathVariable float humidity){ + public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) { return weatherService.save(temperature, humidity); } /** * upload multi weather info + * * @param weatherList * @return */ @PostMapping("/batch") - public int batchSaveWeather(@RequestBody List weatherList){ + public int batchSaveWeather(@RequestBody List weatherList) { return weatherService.save(weatherList); } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.java new file mode 100644 index 0000000000..a9266acb30 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.java @@ -0,0 +1,15 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import java.util.Map; + +public interface DatabaseMapper { + + int createDatabase(String dbname); + + int dropDatabase(String dbname); + + int creatDatabaseWithParameters(Map map); + + int useDatabase(String dbname); + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.xml new file mode 100644 index 0000000000..329f75b582 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.xml @@ -0,0 +1,44 @@ + + + + + + + create database if not exists ${dbname} + + + + DROP database if exists ${dbname} + + + + + CREATE database if not EXISTS ${dbname} + + KEEP ${keep} + + + DAYS ${days} + + + REPLICA ${replica} + + + cache ${cache} + + + blocks ${blocks} + + + minrows ${minrows} + + + maxrows ${maxrows} + + + + + use ${dbname} + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.java new file mode 100644 index 0000000000..f0efbf40ba --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.java @@ -0,0 +1,9 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import java.util.Map; + +public interface RainfallMapper { + + + int save(Map map); +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.xml new file mode 100644 index 0000000000..319b4f3974 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.xml @@ -0,0 +1,11 @@ + + + + + + + INSERT INTO ${table} using ${dbname}.${stable} tags(#{values.station_code}, #{values.station_name}) (ts, name, code, rainfall) values (#{values.ts}, #{values.name}, #{values.code}, #{values.rainfall}) + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.java new file mode 100644 index 0000000000..7601bf974c --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.java @@ -0,0 +1,8 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import com.taosdata.jdbc.springbootdemo.domain.TableMetadata; + +public interface TableMapper { + + boolean createSTable(TableMetadata tableMetadata); +} \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.xml new file mode 100644 index 0000000000..5a272eadb4 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.xml @@ -0,0 +1,21 @@ + + + + + + + create table if not exists ${dbname}.${tablename} + + ${field.name} ${field.type} + + TAGS + + ${tag.name} ${tag.type} + + + + + drop ${tablename} + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.xml similarity index 100% rename from tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml rename to tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.xml diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/FieldMetadata.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/FieldMetadata.java new file mode 100644 index 0000000000..619b5a303d --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/FieldMetadata.java @@ -0,0 +1,28 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +public class FieldMetadata { + + private String name; + private String type; + + public FieldMetadata(String name, String type) { + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Rainfall.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Rainfall.java new file mode 100644 index 0000000000..93e199d7e6 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Rainfall.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +import com.fasterxml.jackson.annotation.JsonFormat; + +import java.sql.Timestamp; + +public class Rainfall { + + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8") + private Timestamp ts; + private String name; + private String code; + private float rainfall; + private String station_code; + private String station_name; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getCode() { + return code; + } + + public void setCode(String code) { + this.code = code; + } + + public float getRainfall() { + return rainfall; + } + + public void setRainfall(float rainfall) { + this.rainfall = rainfall; + } + + public String getStation_code() { + return station_code; + } + + public void setStation_code(String station_code) { + this.station_code = station_code; + } + + public String getStation_name() { + return station_name; + } + + public void setStation_name(String station_name) { + this.station_name = station_name; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TableMetadata.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TableMetadata.java new file mode 100644 index 0000000000..74bb434f08 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TableMetadata.java @@ -0,0 +1,43 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +import java.util.List; + +public class TableMetadata { + + private String dbname; + private String tablename; + private List fields; + private List tags; + + public String getDbname() { + return dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public String getTablename() { + return tablename; + } + + public void setTablename(String tablename) { + this.tablename = tablename; + } + + public List getFields() { + return fields; + } + + public void setFields(List fields) { + this.fields = fields; + } + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TagMetadata.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TagMetadata.java new file mode 100644 index 0000000000..755ecc0075 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TagMetadata.java @@ -0,0 +1,27 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +public class TagMetadata { + private String name; + private String type; + + public TagMetadata(String name, String type) { + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java index 9547a8a89b..cd7de447ea 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java @@ -1,9 +1,12 @@ package com.taosdata.jdbc.springbootdemo.domain; +import com.fasterxml.jackson.annotation.JsonFormat; + import java.sql.Timestamp; public class Weather { + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8") private Timestamp ts; private int temperature; diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/RainStationService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/RainStationService.java new file mode 100644 index 0000000000..3ea63c1760 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/RainStationService.java @@ -0,0 +1,72 @@ +package com.taosdata.jdbc.springbootdemo.service; + +import com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper; +import com.taosdata.jdbc.springbootdemo.dao.RainfallMapper; +import com.taosdata.jdbc.springbootdemo.dao.TableMapper; +import com.taosdata.jdbc.springbootdemo.domain.FieldMetadata; +import com.taosdata.jdbc.springbootdemo.domain.Rainfall; +import com.taosdata.jdbc.springbootdemo.domain.TableMetadata; +import com.taosdata.jdbc.springbootdemo.domain.TagMetadata; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Service +public class RainStationService { + + @Autowired + private DatabaseMapper databaseMapper; + @Autowired + private TableMapper tableMapper; + @Autowired + private RainfallMapper rainfallMapper; + + public boolean init() { + databaseMapper.dropDatabase("rainstation"); + + Map map = new HashMap<>(); + map.put("dbname", "rainstation"); + map.put("keep", "36500"); + map.put("days", "30"); + map.put("blocks", "4"); + databaseMapper.creatDatabaseWithParameters(map); + + databaseMapper.useDatabase("rainstation"); + return true; + } + + public boolean createTable() { + TableMetadata tableMetadata = new TableMetadata(); + tableMetadata.setDbname("rainstation"); + tableMetadata.setTablename("monitoring"); + + List fields = new ArrayList<>(); + fields.add(new FieldMetadata("ts", "timestamp")); + fields.add(new FieldMetadata("name", "NCHAR(10)")); + fields.add(new FieldMetadata("code", " BINARY(8)")); + fields.add(new FieldMetadata("rainfall", "float")); + tableMetadata.setFields(fields); + + List tags = new ArrayList<>(); + tags.add(new TagMetadata("station_code", "BINARY(8)")); + tags.add(new TagMetadata("station_name", "NCHAR(10)")); + tableMetadata.setTags(tags); + + tableMapper.createSTable(tableMetadata); + return true; + } + + + public int insert(Rainfall rainfall) { + Map map = new HashMap<>(); + map.put("dbname", "rainstation"); + map.put("table", "S_53646"); + map.put("stable", "monitoring"); + map.put("values", rainfall); + return rainfallMapper.save(map); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java index 396d70bf92..b950a9a4e4 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java @@ -14,10 +14,8 @@ public class WeatherService { private WeatherMapper weatherMapper; public boolean init() { - weatherMapper.createDB(); weatherMapper.createTable(); - return true; } diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties index 926559a90c..683884fcdd 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -1,6 +1,6 @@ # datasource config spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.url=jdbc:TAOS://localhost:6030/log spring.datasource.username=root spring.datasource.password=taosdata diff --git a/tests/pytest/insert/tagSepcified.py b/tests/pytest/insert/tagSepcified.py new file mode 100644 index 0000000000..213cf119a0 --- /dev/null +++ b/tests/pytest/insert/tagSepcified.py @@ -0,0 +1,49 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table xcxlog (ts timestamp, user_id int, path BINARY(40),scene int) tags(appid bigint, adzone_id int,ip bigint,session_id bigint)') + tdSql.error("insert into d1000004(user_id,path,scene,ts) using xcxlog tags(1000004,145,97160) values (97160,'pagex/goods/taoke',1086,now)") + tdSql.execute("insert into d1000004_145(user_id,path,scene,ts) using xcxlog(appid,adzone_id,session_id,ip) tags(1000004,145,97160,1717171445) values (97160,'pagex/goods/taoke',1086,now)") + + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'd1000004_145') + + tdSql.query("select * from xcxlog") + tdSql.checkRows(1) + tdSql.checkData(0, 4, 1000004) + tdSql.checkData(0, 5, 145) + tdSql.checkData(0, 6, 1717171445) + tdSql.checkData(0, 7, 97160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryNullValueTest.py b/tests/pytest/query/queryNullValueTest.py index 2ad1979e0b..f521f2e5e9 100644 --- a/tests/pytest/query/queryNullValueTest.py +++ b/tests/pytest/query/queryNullValueTest.py @@ -25,7 +25,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.numOfRecords = 10 + self.types = ["tinyint", "smallint", "int", "bigint", "float", "double", "bool", "binary(10)", "nchar(10)"] self.ts = 1537146000000 def checkNullValue(self, result): @@ -38,139 +38,41 @@ class TDTestCase: return False return True - def restartTaosd(self): - tdDnodes.stop(1) - tdDnodes.start(1) - tdSql.execute("use db") - def run(self): tdSql.prepare() - - print("==============step1") - - tdSql.execute( - "create table meters (ts timestamp, col1 int) tags(tgcol1 int)") - tdSql.execute("create table t0 using meters tags(NULL)") - - for i in range (self.numOfRecords): - tdSql.execute("insert into t0 values (%d, %d)" % (self.ts + i, i)); - - tdSql.query("select * from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col2 tinyint") - tdSql.execute("alter table meters drop column col1") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col2 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col1 int") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col1 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col3 smallint") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col3 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col4 bigint") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col4 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col5 float") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col5 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col6 double") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col6 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col7 bool") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col7 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col8 binary(20)") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col8 from meters") - tdSql.checkRows(10) - - tdSql.execute("alter table meters add column col9 nchar(20)") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select col9 from meters") - tdSql.checkRows(10) - tdSql.execute("alter table meters add tag tgcol2 tinyint") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol2 from meters") - tdSql.checkRows(1) - + for i in range(len(self.types)): + print("======== checking type %s ==========" % self.types[i]) + tdSql.execute("create table t0 (ts timestamp, col %s)" % self.types[i]) + tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts)) + + tdDnodes.stop(1) + tdLog.sleep(10) + tdDnodes.start(1) + tdSql.execute("use db") + tdSql.query("select * from t0") + tdSql.checkRows(1) - tdSql.execute("alter table meters add tag tgcol3 smallint") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol3 from meters") - tdSql.checkRows(1) - + if self.checkNullValue(tdSql.queryResult) is False: + tdLog.exit("no None value is detected") - tdSql.execute("alter table meters add tag tgcol4 bigint") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol4 from meters") - tdSql.checkRows(1) + tdSql.execute("create table t1 (ts timestamp, col %s)" % self.types[i]) + tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts)) + tdDnodes.stop(1) + tdLog.sleep(10) + tdDnodes.start(1) + tdSql.execute("use db") - tdSql.execute("alter table meters add tag tgcol5 float") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol5 from meters") - tdSql.checkRows(1) + for j in range(150): + tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts + j + 1)); + + tdSql.query("select * from t1") + tdSql.checkRows(151) - tdSql.execute("alter table meters add tag tgcol6 double") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol6 from meters") - tdSql.checkRows(1) + if self.checkNullValue(tdSql.queryResult) is False: + tdLog.exit("no None value is detected") - tdSql.execute("alter table meters add tag tgcol7 bool") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol7 from meters") - tdSql.checkRows(1) - - tdSql.execute("alter table meters add tag tgcol8 binary(20)") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol8 from meters") - tdSql.checkRows(1) - - tdSql.execute("alter table meters add tag tgcol9 nchar(20)") - tdSql.query("select * from meters") - tdSql.checkRows(10) - tdSql.query("select tgcol9 from meters") - tdSql.checkRows(1) - - self.restartTaosd() - tdSql.query("select * from meters") - tdSql.checkRows(10) - if self.checkNullValue(tdSql.queryResult) is False: - tdLog.exit("non None value is detected") - - - + print("======== None value check for type %s is OK ==========" % self.types[i]) def stop(self): tdSql.close() diff --git a/tests/script/general/compute/count.sim b/tests/script/general/compute/count.sim index aef30c8b6c..c2ed5af3ca 100644 --- a/tests/script/general/compute/count.sim +++ b/tests/script/general/compute/count.sim @@ -52,6 +52,12 @@ if $data00 != $rowNum then return -1 endi +sql select count(1) from $tb +print ===> select count(1) from $tb => $data00 +if $data00 != $rowNum then + return -1 +endi + sql select count(tbcol) from $tb print ===> $data00 if $data00 != $rowNum then @@ -102,13 +108,20 @@ if $data00 != $totalNum then return -1 endi +print =============== step8 +sql select count(1) from $mt +print ===> $data00 +if $data00 != $totalNum then + return -1 +endi + sql select count(tbcol) from $mt print ===> $data00 if $data00 != $totalNum then return -1 endi -print =============== step8 +print =============== step10 sql select count(tbcol) as c from $mt where ts < now + 4m print ===> $data00 if $data00 != 50 then @@ -171,4 +184,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/db/alter_option.sim b/tests/script/general/db/alter_option.sim index 12a49c59b5..49c75966ca 100644 --- a/tests/script/general/db/alter_option.sim +++ b/tests/script/general/db/alter_option.sim @@ -7,11 +7,10 @@ system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect -print ============================ dnode1 start +print ============= create database sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 sql show databases -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 if $data00 != db then return -1 endi @@ -37,27 +36,240 @@ if $data09 != 4 then return -1 endi -print =============== step2 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -return -sql_error alter database db cache 256 -sql_error alter database db blocks 1 -sql_error alter database db days 10 -sql_error alter database db keep 10 -sql_error alter database db minRows 350 -sql_error alter database db minRows 550 -sql_error alter database db ctime 5000 -sql_error alter database db precision "us" -sql_error alter database db comp 3 -sql_error alter database db wal 1 +print ============== step name +sql_error alter database db name d1 +sql_error alter database db name d2 + +print ============== step ntables +sql_error alter database db ntables -1 +sql_error alter database db ntables 0 +sql_error alter database db ntables 1 +sql_error alter database db ntables 10 + +print ============== step vgroups +sql_error alter database db vgroups -1 +sql_error alter database db vgroups 0 +sql_error alter database db vgroups 1 +sql_error alter database db vgroups 10 + +print ============== step replica sql_error alter database db replica 2 +sql_error alter database db replica 3 +sql_error alter database db replica 0 +sql alter database db replica 1 +sql show databases +print replica $data4_db +if $data4_db != 1 then + return -1 +endi + +print ============== step quorum +sql show databases +print quorum $data5_db +if $data5_db != 1 then + return -1 +endi + +sql alter database db quorum 1 +sql show databases +print quorum $data5_db +if $data5_db != 1 then + return -1 +endi + +sql alter database db quorum 2 +sql show databases +print quorum $data5_db +if $data5_db != 2 then + return -1 +endi + +sql alter database db quorum 3 +sql show databases +print quorum $data5_db +if $data5_db != 3 then + return -1 +endi + +sql alter database db quorum 3 +sql alter database db quorum 2 +sql alter database db quorum 1 +sql_error alter database db quorum 0 +sql_error alter database db quorum 4 +sql_error alter database db quorum 5 +sql_error alter database db quorum -1 + +print ============== step days +sql_error alter database db days 0 +sql_error alter database db days 1 +sql_error alter database db days 2 +sql_error alter database db days 10 +sql_error alter database db days 50 +sql_error alter database db days 100 + +print ============== step keep +sql show databases +print keep $data7_db +if $data7_db != 20,20,20 then + return -1 +endi + +sql alter database db keep 10 +sql show databases +print keep $data7_db +if $data7_db != 20,20,10 then + return -1 +endi + +sql alter database db keep 20 +sql show databases +print keep $data7_db +if $data7_db != 20,20,20 then + return -1 +endi -print ============== step3 -sql alter database db comp 1 -sql alter database db blocks 40 sql alter database db keep 30 +sql show databases +print keep $data7_db +if $data7_db != 20,20,30 then + return -1 +endi + +sql alter database db keep 40 +sql alter database db keep 30 +sql alter database db keep 20 +sql alter database db keep 10 +sql_error alter database db keep 9 +sql_error alter database db keep 1 +sql alter database db keep 0 +sql alter database db keep -1 +sql_error alter database db keep 365001 + +print ============== step cache +sql_error alter database db cache 60 +sql_error alter database db cache 50 +sql_error alter database db cache 20 +sql_error alter database db cache 3 +sql_error alter database db cache 129 +sql_error alter database db cache 300 +sql_error alter database db cache 0 +sql_error alter database db cache -1 + +print ============== step blocks +sql show databases +print blocks $data9_db +if $data9_db != 4 then + return -1 +endi + +sql alter database db blocks 10 +sql show databases +print blocks $data9_db +if $data9_db != 10 then + return -1 +endi + +sql alter database db blocks 20 +sql show databases +print blocks $data9_db +if $data9_db != 20 then + return -1 +endi + +sql alter database db blocks 30 +sql show databases +print blocks $data9_db +if $data9_db != 30 then + return -1 +endi + +sql alter database db blocks 40 +sql alter database db blocks 30 +sql alter database db blocks 20 +sql alter database db blocks 10 +sql_error alter database db blocks 2 +sql_error alter database db blocks 1 +sql alter database db blocks 0 +sql_error alter database db blocks -1 +sql_error alter database db blocks 10001 + +print ============== step minrows +sql_error alter database db minrows 1 +sql_error alter database db minrows 100 +sql_error alter database db minrows 1000 + +print ============== step maxrows +sql_error alter database db maxrows 1 +sql_error alter database db maxrows 100 +sql_error alter database db maxrows 1000 + +print ============== step wallevel +sql show databases +print wallevel $data12_db +if $data12_db != 1 then + return -1 +endi + +sql alter database db wal 1 +sql show databases +print wal $data12_db +if $data12_db != 1 then + return -1 +endi + +sql_error alter database db wal 2 +sql_error alter database db wal 0 +sql_error alter database db wal 3 +sql_error alter database db wal 4 +sql_error alter database db wal -1 +sql_error alter database db wal 1000 + +print ============== step fsync +sql_error alter database db fsync 2 +sql_error alter database db fsync 3 +sql_error alter database db fsync 4 +sql_error alter database db fsync -1 +sql_error alter database db fsync 1000 + +print ============== step comp +sql show databases +print comp $data14_db +if $data14_db != 2 then + return -1 +endi + +sql alter database db comp 1 +sql show databases +print comp $data14_db +if $data14_db != 1 then + return -1 +endi + +sql alter database db comp 2 +sql show databases +print comp $data14_db +if $data14_db != 2 then + return -1 +endi + +sql alter database db comp 0 +sql show databases +print comp $data14_db +if $data14_db != 0 then + return -1 +endi + +sql_error alter database db comp 3 +sql_error alter database db comp 4 +sql_error alter database db comp 5 +sql_error alter database db comp -1 +print ============== step precision +sql_error alter database db prec 'us' -#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +print ============== step status +sql_error alter database db status 'delete' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 88a844333b..063d11bd9d 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -253,6 +253,7 @@ cd ../../../debug; make ./test.sh -f unique/cluster/balance2.sim ./test.sh -f unique/cluster/balance3.sim ./test.sh -f unique/cluster/cache.sim +./test.sh -f unique/cluster/vgroup100.sim ./test.sh -f unique/column/replica3.sim diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim index 60c08b21e9..ae511fe2b0 100644 --- a/tests/script/unique/account/paras.sim +++ b/tests/script/unique/account/paras.sim @@ -14,10 +14,10 @@ print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != root then return -1 endi -if $data02 != 3/10 then +if $data02 != 3/128 then return -1 endi -if $data03 != 0/64 then +if $data03 != 0/128 then return -1 endi if $data04 != 0/2147483647 then diff --git a/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim b/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim index b625619678..aaf0da8553 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim @@ -53,7 +53,7 @@ system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start sql create dnode $hostname2 sql create dnode $hostname3 -sleep 3000 +sleep 5000 $sleepTimer = 3000 @@ -225,6 +225,7 @@ if $data00 != $totalRows then endi print ============== step5: stop dnode2, and remove its vnode +sleep 5000 system sh/exec.sh -n dnode2 -s stop -x SIGINT sleep $sleepTimer diff --git a/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim b/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim index 10964b9209..3715be5fa9 100644 --- a/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim +++ b/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim @@ -193,6 +193,7 @@ if $data00 != $totalRows then endi print ============== step5: stop dnode1 +sleep 5000 system sh/exec.sh -n dnode1 -s stop sleep 3000 diff --git a/tests/script/unique/cluster/balance3.sim b/tests/script/unique/cluster/balance3.sim index cd669b69b6..e3b8125d8c 100644 --- a/tests/script/unique/cluster/balance3.sim +++ b/tests/script/unique/cluster/balance3.sim @@ -105,6 +105,15 @@ if $dnode4Vnodes != null then goto show1 endi +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step2 print ========= start dnode4 sql create dnode $hostname4 @@ -132,6 +141,15 @@ if $dnode4Vnodes != 2 then goto show2 endi +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step3 print ========= drop dnode2 sql drop dnode $hostname2 @@ -167,6 +185,15 @@ if $dnode4Vnodes != 3 then goto show3 endi +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + system sh/exec.sh -n dnode2 -s stop -x SIGINT print ============================== step4 @@ -195,6 +222,15 @@ if $dnode5Vnodes != 2 then goto show4 endi +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step5 print ========= drop dnode3 sql drop dnode $hostname3 @@ -232,6 +268,15 @@ endi system sh/exec.sh -n dnode3 -s stop -x SIGINT +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step6 sql create dnode $hostname6 system sh/exec.sh -n dnode6 -s start @@ -258,6 +303,15 @@ if $dnode6Vnodes != 2 then goto show6 endi +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step7 print ========= drop dnode4 sql drop dnode $hostname4 @@ -294,6 +348,14 @@ if $dnode4Vnodes != null then endi system sh/exec.sh -n dnode4 -s stop -x SIGINT +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 print ============================== step8 sql create dnode $hostname7 @@ -321,6 +383,15 @@ if $dnode7Vnodes != 2 then goto show8 endi +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step9 print ========= drop dnode1 system sh/exec.sh -n dnode1 -s stop -x SIGINT @@ -335,15 +406,20 @@ sql show mnodes $dnode1Role = $data2_1 $dnode4Role = $data2_4 $dnode5Role = $data2_5 -print dnode1 ==> $dnode1Role -print dnode4 ==> $dnode4Role -print dnode5 ==> $dnode5Role +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 if $dnode1Role != offline then return -1 endi print ============================== step9.1 +sleep 2000 system sh/exec.sh -n dnode1 -s start $x = 0 @@ -353,6 +429,19 @@ show9: if $x == 20 then return -1 endi + +sql show mnodes +$dnode1Role = $data2_1 +$dnode4Role = $data2_4 +$dnode5Role = $data2_5 +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + sql show dnodes -x show9 $dnode5Vnodes = $data2_5 print dnode5 $dnode5Vnodes @@ -374,6 +463,15 @@ endi system sh/exec.sh -n dnode1 -s stop -x SIGINT sleep 5000 +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +print dnode3 ==> $data2_3 +print dnode4 ==> $data2_4 +print dnode5 ==> $data2_5 +print dnode6 ==> $data2_6 +print dnode7 ==> $data2_7 + print ============================== step11 print ========= add db4 diff --git a/tests/script/unique/cluster/vgroup100.sim b/tests/script/unique/cluster/vgroup100.sim new file mode 100644 index 0000000000..cddb38cefd --- /dev/null +++ b/tests/script/unique/cluster/vgroup100.sim @@ -0,0 +1,127 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c maxTables -v 4 +system sh/cfg.sh -n dnode2 -c maxTables -v 4 +system sh/cfg.sh -n dnode3 -c maxTables -v 4 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0 + +print ============================== step1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +print ============================== step2 +print ========= start dnode2 +sql create dnode $hostname2 +system sh/exec.sh -n dnode2 -s start +sql create dnode $hostname3 +system sh/exec.sh -n dnode3 -s start + +sleep 5000 +sql show mnodes +$dnode1Role = $data2_1 +$dnode2Role = $data2_2 +$dnode3Role = $data2_3 + +print $dnode1Role +print $dnode2Role +print $dnode3Role + +print ============================== step3 +$count = 2 +while $count < 102 + $db = d . $count + $tb = $db . .t + sql create database $db replica 3 cache 1 blocks 3 + sql create table $tb (ts timestamp, i int) + sql insert into $tb values(now, 1) + $count = $count + 1 + print insert into $tb values(now, 1) ==> finished +endw + +print ============================== step4 + +$count = 2 +while $count < 102 + $db = d . $count + $tb = $db . .t + sql select * from $tb + if $rows != 1 then + print select * from $tb + return -1 + endi + $count = $count + 1 + print select * from $tb ==> rows: $rows +endw + +print ============================== step5 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 5000 + +print ============================== step6 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 + +print ============================== step7 + +$x = 0 +show7: + $x = $x + 1 + sleep 2000 + if $x == 50 then + return -1 + endi + +sql show mnodes -x show7 +$dnode1Role = $data2_1 +$dnode2Role = $data2_2 +$dnode3Role = $data2_3 +if $dnode1Role != master then + goto show7 +endi +if $dnode2Role != slave then + goto show7 +endi +if $dnode2Role != slave then + goto show7 +endi + +print ============================== step8 +$x = 0 +show8: + $x = $x + 1 + sleep 2000 + if $x == 20 then + return -1 + endi + +$count = 2 +while $count < 102 + $db = d . $count + $tb = $db . .t + sql select * from $tb + if $rows != 1 then + print select * from $tb + goto show8 + endi + $count = $count + 1 + print select * from $tb ==> rows: $rows +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/db/delete.sim b/tests/script/unique/db/delete.sim index f89588a8ac..fee47caadb 100644 --- a/tests/script/unique/db/delete.sim +++ b/tests/script/unique/db/delete.sim @@ -21,9 +21,10 @@ system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start +sleep 3000 print ======== step1 sql create database db replica 3 blocks 3 diff --git a/tests/script/unique/dnode/alternativeRole.sim b/tests/script/unique/dnode/alternativeRole.sim index ab37c1603a..b5d861c44f 100644 --- a/tests/script/unique/dnode/alternativeRole.sim +++ b/tests/script/unique/dnode/alternativeRole.sim @@ -18,13 +18,14 @@ system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 print ========== step1 system sh/exec.sh -n dnode1 -s start +sleep 3000 sql connect + sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start -sleep 3000 sql create dnode $hostname3 system sh/exec.sh -n dnode3 -s start -sleep 3000 +sleep 5000 sql show dnodes print dnode1 $data5_1 diff --git a/tests/script/unique/import/replica3.sim b/tests/script/unique/import/replica3.sim index b4e4a2191e..3a9f03a7ea 100644 --- a/tests/script/unique/import/replica3.sim +++ b/tests/script/unique/import/replica3.sim @@ -31,8 +31,8 @@ sleep 3000 sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start diff --git a/tests/script/unique/stable/dnode3.sim b/tests/script/unique/stable/dnode3.sim index 5fe37faa71..436ae73595 100644 --- a/tests/script/unique/stable/dnode3.sim +++ b/tests/script/unique/stable/dnode3.sim @@ -13,8 +13,8 @@ system sh/exec.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start $x = 0 diff --git a/tests/script/unique/stream/metrics_replica2_dnode3.sim b/tests/script/unique/stream/metrics_replica2_dnode3.sim index 3f8ae46e6b..981f5e9b70 100644 --- a/tests/script/unique/stream/metrics_replica2_dnode3.sim +++ b/tests/script/unique/stream/metrics_replica2_dnode3.sim @@ -17,8 +17,8 @@ system sh/exec.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start $x = 0 createDnode: diff --git a/tests/script/unique/stream/metrics_replica3_dnode4.sim b/tests/script/unique/stream/metrics_replica3_dnode4.sim index 70cd371e2c..902e9db16b 100644 --- a/tests/script/unique/stream/metrics_replica3_dnode4.sim +++ b/tests/script/unique/stream/metrics_replica3_dnode4.sim @@ -21,10 +21,10 @@ system sh/exec.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 -system sh/exec.sh -n dnode3 -s start sql create dnode $hostname4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode4 -s start $x = 0 diff --git a/tests/script/unique/stream/table_replica2_dnode3.sim b/tests/script/unique/stream/table_replica2_dnode3.sim index 9c65a427c7..10d9feec53 100644 --- a/tests/script/unique/stream/table_replica2_dnode3.sim +++ b/tests/script/unique/stream/table_replica2_dnode3.sim @@ -18,8 +18,8 @@ system sh/exec.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start $x = 0 createDnode: diff --git a/tests/script/unique/stream/table_replica3_dnode4.sim b/tests/script/unique/stream/table_replica3_dnode4.sim index 0affffe92c..3b9552084b 100644 --- a/tests/script/unique/stream/table_replica3_dnode4.sim +++ b/tests/script/unique/stream/table_replica3_dnode4.sim @@ -21,10 +21,10 @@ system sh/exec.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 -system sh/exec.sh -n dnode3 -s start sql create dnode $hostname4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode4 -s start $x = 0 createDnode: diff --git a/tests/script/unique/vnode/replica2_basic2.sim b/tests/script/unique/vnode/replica2_basic2.sim index bf540c11fd..6976353f3e 100644 --- a/tests/script/unique/vnode/replica2_basic2.sim +++ b/tests/script/unique/vnode/replica2_basic2.sim @@ -25,8 +25,8 @@ system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start sleep 3000 diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 0529808b6b..adc2fd0b9d 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -667,7 +667,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { TAOS_RES* pSql = NULL; - for (int attempt = 0; attempt < 3; ++attempt) { + for (int attempt = 0; attempt < 10; ++attempt) { simLogSql(rest, false); pSql = taos_query(script->taos, rest); ret = taos_errno(pSql);