diff --git a/CMakeLists.txt b/CMakeLists.txt index 946ceb95ab..588526c286 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,6 +13,8 @@ ENDIF () SET(TD_ACCOUNT FALSE) SET(TD_ADMIN FALSE) SET(TD_GRANT FALSE) +SET(TD_SYNC TRUE) +SET(TD_MQTT TRUE) SET(TD_TSDB_PLUGINS FALSE) SET(TD_COVER FALSE) diff --git a/README.md b/README.md index 3fbd166f49..22984d8cfe 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine) +[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine) [![TDengine](TDenginelogo.png)](https://www.taosdata.com) diff --git a/alert/app/rule.go b/alert/app/rule.go index d1ab9776fe..44596ca26d 100644 --- a/alert/app/rule.go +++ b/alert/app/rule.go @@ -121,7 +121,21 @@ func (alert *Alert) refresh(rule *Rule, values map[string]interface{}) { alert.Values = values res := rule.Expr.Eval(func(key string) interface{} { // ToLower is required as column name in result is in lower case - return alert.Values[strings.ToLower(key)] + i := alert.Values[strings.ToLower(key)] + switch v := i.(type) { + case int8: + return int64(v) + case int16: + return int64(v) + case int: + return int64(v) + case int32: + return int64(v) + case float32: + return float64(v) + default: + return v + } }) val, ok := res.(bool) diff --git a/cmake/define.inc b/cmake/define.inc index 6dd604057c..28770be254 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -13,6 +13,14 @@ IF (TD_GRANT) ADD_DEFINITIONS(-D_GRANT) ENDIF () +IF (TD_SYNC) + ADD_DEFINITIONS(-D_SYNC) +ENDIF () + +IF (TD_MQTT) + ADD_DEFINITIONS(-D_MQTT) +ENDIF () + IF (TD_TSDB_PLUGINS) ADD_DEFINITIONS(-D_TSDB_PLUGINS) ENDIF () diff --git a/cmake/input.inc b/cmake/input.inc index f90b10a087..1ef2045f57 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -42,6 +42,16 @@ IF (${MEM_CHECK} MATCHES "true") MESSAGE(STATUS "build with memory check") ENDIF () +IF (${MQTT} MATCHES "false") + SET(TD_MQTT FALSE) + MESSAGE(STATUS "build without mqtt module") +ENDIF () + +IF (${SYNC} MATCHES "false") + SET(TD_SYNC FALSE) + MESSAGE(STATUS "build without sync module") +ENDIF () + IF (${RANDOM_FILE_FAIL} MATCHES "true") SET(TD_RANDOM_FILE_FAIL TRUE) MESSAGE(STATUS "build with random-file-fail enabled") diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 64930b29d2..8c6c73c440 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -10,6 +10,6 @@ ADD_SUBDIRECTORY(cJson) ADD_SUBDIRECTORY(wepoll) ADD_SUBDIRECTORY(MsvcLibX) -IF (TD_LINUX) +IF (TD_LINUX AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) ENDIF () \ No newline at end of file diff --git a/deps/MQTT-C/CMakeLists.txt b/deps/MQTT-C/CMakeLists.txt index ea5d4238d5..15b3552521 100644 --- a/deps/MQTT-C/CMakeLists.txt +++ b/deps/MQTT-C/CMakeLists.txt @@ -1,5 +1,4 @@ -cmake_minimum_required(VERSION 3.5) -project(MQTT-C VERSION 1.1.2 LANGUAGES C) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) # MQTT-C build options option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 45a3501a0f..44b3ad4671 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -128,24 +128,84 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 - arbitrator: 系统中裁决器的end point,缺省为空 -- timezone:时区。从系统中动态获取当前的时区设置。 -- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 +- timezone、locale、charset 的配置见客户端配置。 ## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 客户端配置参数列表及解释 - firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 - secondEp: taos启动时,如果first连接不上,尝试连接集群中第二个taosd实例的end point, 缺省值为空。 -- charset:字符集编码。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -- locale:系统区位信息及编码格式。系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 -日志的配置参数,与server的配置参数完全一样。 +- locale -启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。 + > 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。 + +客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。 + +在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。 + +- charset + + > 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。 + +在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如: +``` + locale zh_CN.UTF-8 +``` +在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置: +``` + charset CP936 +``` +如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。 + +在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。 +``` + locale zh_CN.UTF-8 + charset GBK +``` +则charset的有效值是GBK。 +``` + charset GBK + locale zh_CN.UTF-8 +``` +charset的有效值是UTF-8。 + +日志的配置参数,与server 的配置参数完全一样。 + +- timezone + + 默认值:从系统中动态获取当前的时区设置 + +客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。 + +在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如: +``` + timezone UTC-8 + timezone GMT-8 + timezone Asia/Shanghai +``` +均是合法的设置东八区时区的格式。 + +时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: +``` + SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; +``` +在东八区,SQL语句等效于 +``` + SELECT count(*) FROM table_name WHERE TS<1554955268000; +``` +在UTC时区,SQL语句等效于 +``` + SELECT count(*) FROM table_name WHERE TS<1554984068000; +``` +为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 ## 用户管理 diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md index 4d593cec90..077a043138 100644 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md @@ -95,6 +95,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 - [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等 - [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 - [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理 +- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制):TAOS SQL的边界限制 +- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code):TDengine 2.0 错误码以及对应的十进制码 ## TDengine的技术设计 diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 29f6e5ee57..ae42582687 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -86,13 +86,29 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name COMP 2; ``` - 修改数据库文件压缩标志位,有效数字为0,1,2. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。修改后可以使用show databases命令查看是否修改成功 + COMP参数是指修改数据库文件压缩标志位,取值范围为[0, 2]. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。 ```mysql ALTER DATABASE db_name REPLICA 2; ``` - 修改数据库副本数,有效副本数为1到3。在集群中使用,副本数必须小于dnode的数目。修改后可以使用show databases命令查看是否修改成功 + REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。 + ```mysql + ALTER DATABASE db_name KEEP 365; + ``` + KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。 + + ```mysql + ALTER DATABASE db_name QUORUM 365; + ``` + QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 + + ```mysql + ALTER DATABASE db_name BLOCKS 365; + ``` + BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。 + + **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 - **显示系统所有数据库** ```mysql diff --git a/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md b/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md new file mode 100644 index 0000000000..6ba0decfd3 --- /dev/null +++ b/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md @@ -0,0 +1,173 @@ +# TDengine 2.0 错误码以及对应的十进制码 + + +| Code | bit | error code | 错误描述 | 十进制错误码 | +|-----------------------| :---: | :---------: | :------------------------ | ---------------- | +|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647| +|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646| +|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645| +|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644| +|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643| +|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642| +|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641| +|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640| +|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639| +|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638| +|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637| +|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636| +|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635| +|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634| +|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633| +|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632| +|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631| +|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630| +|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629| +|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392| +|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391| +|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390| +|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389| +|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388| +|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136| +|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135| +|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134| +|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133| +|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132| +|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131| +|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130| +|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129| +|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128| +|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127| +|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126| +|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125| +|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124| +|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123| +|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122| +|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121| +|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120| +|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119| +|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118| +|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117| +|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116| +|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880| +|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879| +|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878| +|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877| +|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876| +|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875| +|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874| +|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873| +|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872| +|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871| +|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870| +|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869| +|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868| +|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867| +|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866| +|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848| +|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847| +|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846| +|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845| +|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844| +|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843| +|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832| +|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831| +|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830| +|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829| +|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828| +|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827| +|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826| +|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825| +|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824| +|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823| +|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822| +|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821| +|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820| +|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816| +|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815| +|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814| +|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800| +|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799| +|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798| +|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797| +|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796| +|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795| +|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784| +|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783| +|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782| +|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781| +|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780| +|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778| +|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777| +|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776| +|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775| +|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774| +|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773| +|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772| +|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771| +|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752| +|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751| +|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750| +|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749| +|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748| +|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747| +|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746| +|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624| +|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623| +|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622| +|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621| +|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368| +|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367| +|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366| +|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365| +|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364| +|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363| +|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362| +|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361| +|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360| +|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359| +|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352| +|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351| +|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350| +|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112| +|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111| +|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110| +|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109| +|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108| +|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107| +|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106| +|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105| +|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104| +|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103| +|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102| +|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101| +|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100| +|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099| +|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098| +|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097| +|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096| +|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095| +|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094| +|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856| +|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855| +|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854| +|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853| +|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852| +|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851| +|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850| +|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849| +|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848| +|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600| +|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599| +|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598| +|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597| +|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596| +|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595| +|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594| +|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593| +|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592| +|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591| +|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590| +|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589| +|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344| +|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343| +|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552| \ No newline at end of file diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index ff5c0d5713..d6fc649b90 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -93,6 +93,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 - role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode - debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。 - numOfLogLines:单个日志文件允许的最大行数。默认值:10,000,000行。 +- logKeepDays:日志文件的最长保存时间。大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳,单位为秒。默认值:0天。 - maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/webdocs/markdowndocs/connector-java-ch.md index da5ea52966..f6da7ff403 100644 --- a/documentation20/webdocs/markdowndocs/connector-java-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-java-ch.md @@ -228,7 +228,8 @@ resultSet.close(); stmt.close(); conn.close(); ``` -> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 +> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 + ## 与连接池使用 **HikariCP** diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 27c1054dc8..e4bb920dd3 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -47,66 +47,11 @@ 检查服务器侧TCP端口连接是否工作:`nc -l {port}` 检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}` -10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。 - - taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。 - - 1)首先在服务器上停止taosd服务; - - 2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000; - - 3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000; - - -n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测; - - -h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。 - - -P :检测的起始端口号,缺省值是6030; - - -e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042; - - -l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同; - - 服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号; - - 对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。 - - 客户端运行的输出样例: - - `sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6` - - `host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000` - - `tcp port:6030 test ok. udp port:6030 test ok.` - - `tcp port:6031 test ok. udp port:6031 test ok.` - - `tcp port:6032 test ok. udp port:6032 test ok.` - - `tcp port:6033 test ok. udp port:6033 test ok.` - - `tcp port:6034 test ok. udp port:6034 test ok.` - - `tcp port:6035 test ok. udp port:6035 test ok.` - - `tcp port:6036 test ok. udp port:6036 test ok.` - - `tcp port:6037 test ok. udp port:6037 test ok.` - - `tcp port:6038 test ok. udp port:6038 test ok.` - - `tcp port:6039 test ok. udp port:6039 test ok.` - - `tcp port:6040 test ok. udp port:6040 test ok.` - - `tcp port:6041 test ok. udp port:6041 test ok.` - - `tcp port:6042 test ok. udp port:6042 test ok.` - - 如果某个端口不通,会输出 `port:xxxx test fail`的信息。 +10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 -## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办? + +## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办? 产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查: 1. 请检查连接的服务器的FQDN是否正确 diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index a91dde8796..fdb8466706 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -122,11 +122,14 @@ # number of replications, for cluster only # replica 1 -# mqtt uri -# mqttBrokerAddress mqtt://username:password@hostname:1883/taos/ +# mqtt hostname +# mqttHostName test.mosquitto.org -# mqtt client name -# mqttBrokerClientId taos_mqtt +# mqtt port +# mqttPort 1883 + +# mqtt topic +# mqttTopic /test # the compressed rpc message, option: # -1 (no compression) @@ -186,6 +189,9 @@ # max number of rows per log filters # numOfLogLines 10000000 +# time of keeping log files, days +# logKeepDays 0 + # enable/disable async log # asyncLog 1 diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 0ac0764217..d55be5b8c0 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine -base: core18 # the base snap is the execution environment for this snap -version: '2.0.2.0' # just for humans, typically '1.2+git' or '1.3.2' +base: core18 +version: 'RELEASE_VERSION' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.2.0 + - usr/lib/libtaos.so.RELEASE_VERSION - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ede66d95bb..898b7cb032 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -10,7 +10,9 @@ ADD_SUBDIRECTORY(client) ADD_SUBDIRECTORY(query) ADD_SUBDIRECTORY(kit) ADD_SUBDIRECTORY(plugins) -ADD_SUBDIRECTORY(sync) +IF (TD_SYNC) + ADD_SUBDIRECTORY(sync) +ENDIF () ADD_SUBDIRECTORY(balance) ADD_SUBDIRECTORY(mnode) ADD_SUBDIRECTORY(vnode) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index b25f620508..bd980b75a3 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -437,13 +437,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData * @return */ jstring jniFromNCharToByteArray(JNIEnv *env, char *nchar, int32_t maxBytes) { - int len = (int)strlen(nchar); - if (len > maxBytes) { // no terminated symbol exists '\0' - len = maxBytes; - } - - jbyteArray bytes = (*env)->NewByteArray(env, len); - (*env)->SetByteArrayRegion(env, bytes, 0, len, (jbyte *)nchar); + jbyteArray bytes = (*env)->NewByteArray(env, maxBytes); + (*env)->SetByteArrayRegion(env, bytes, 0, maxBytes, (jbyte *)nchar); return bytes; } @@ -481,6 +476,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn } } + int32_t* length = taos_fetch_lengths(result); + char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; for (int i = 0; i < num_fields; i++) { @@ -515,15 +512,15 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv); } break; case TSDB_DATA_TYPE_BINARY: { - strncpy(tmp, row[i], (size_t)fields[i].bytes); // handle the case that terminated does not exist + memcpy(tmp, row[i], length[i]); // handle the case that terminated does not exist (*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp)); - memset(tmp, 0, (size_t)fields[i].bytes); + memset(tmp, 0, length[i]); break; } case TSDB_DATA_TYPE_NCHAR: { (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, - jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes)); + jniFromNCharToByteArray(env, (char *)row[i], length[i])); break; } case TSDB_DATA_TYPE_TIMESTAMP: diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 48980fefb6..c248b08ddd 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -232,8 +232,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } else if (pInfo->type == TSDB_SQL_DROP_TABLE) { assert(pInfo->pDCLInfo->nTokens == 1); - if (tscSetTableFullName(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + code = tscSetTableFullName(pTableMetaInfo, pzName, pSql); + if(code != TSDB_CODE_SUCCESS) { + return code; } } else if (pInfo->type == TSDB_SQL_DROP_DNODE) { pzName->n = strdequote(pzName->z); @@ -348,8 +349,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_DESCRIBE_TABLE: { SStrToken* pToken = &pInfo->pDCLInfo->a[0]; - const char* msg2 = "table name is too long"; const char* msg1 = "invalid table name"; + const char* msg2 = "table name is too long"; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -710,7 +711,9 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu } int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) { - const char* msg = "name too long"; + const char* msg1 = "name too long"; + const char* msg2 = "invalid db name"; + const char *msg = msg1; SSqlCmd* pCmd = &pSql->cmd; int32_t code = TSDB_CODE_SUCCESS; @@ -728,16 +731,14 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa } else { // get current DB name first, then set it into path SStrToken t = {0}; getCurrentDBName(pSql, &t); - + if (t.n == 0) { + msg = msg2; + } code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL); } - if (code != TSDB_CODE_SUCCESS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); - } - - if (code != TSDB_CODE_SUCCESS) { - free(oldName); + free(oldName); return code; } @@ -1072,7 +1073,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr /* db name is not specified, the tableName dose not include db name */ if (pDB != NULL) { - if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) { + if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1597,13 +1598,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (pItem->pNode->pParam != NULL) { - SStrToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo; - if (pToken->z == NULL || pToken->n == 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } - tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; - if (pParamElem->pNode->nSQLOptr == TK_ALL) { + SStrToken* pToken = &pParamElem->pNode->colInfo; + short sqlOptr = pParamElem->pNode->nSQLOptr; + if ((pToken->z == NULL || pToken->n == 0) + && (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + if (sqlOptr == TK_ALL) { // select table.* // check if the table name is valid or not SStrToken tmpToken = pParamElem->pNode->colInfo; @@ -1615,6 +1617,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + } else if (sqlOptr == TK_INTEGER) { // select count(1) from table1 + char buf[8] = {0}; + int64_t val = -1; + tVariant* pVariant = &pParamElem->pNode->val; + if (pVariant->nType == TSDB_DATA_TYPE_BIGINT) { + tVariantDump(pVariant, buf, TSDB_DATA_TYPE_BIGINT, true); + val = GET_INT64_VAL(buf); + } + if (val == 1) { + index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + } else { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } } else { // count the number of meters created according to the super table if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -2739,27 +2756,31 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, } } + int32_t retVal = TSDB_CODE_SUCCESS; if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) { - tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false); - } else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd - if (colType == TSDB_DATA_TYPE_BINARY) { - pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE); - pColumnFilter->len = pRight->val.nLen; + retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false); - tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); - } else if (colType == TSDB_DATA_TYPE_NCHAR) { - // pRight->val.nLen + 1 is larger than the actual nchar string length - pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE); + // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd + } else if (colType == TSDB_DATA_TYPE_BINARY) { + pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE); + pColumnFilter->len = pRight->val.nLen; + retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); - tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); + } else if (colType == TSDB_DATA_TYPE_NCHAR) { + // pRight->val.nLen + 1 is larger than the actual nchar string length + pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE); + retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); + size_t len = twcslen((wchar_t*)pColumnFilter->pz); + pColumnFilter->len = len * TSDB_NCHAR_SIZE; - size_t len = twcslen((wchar_t*)pColumnFilter->pz); - pColumnFilter->len = len * TSDB_NCHAR_SIZE; - } else { - tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false); - } + } else { + retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false); } + if (retVal != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); + } + switch (pExpr->nSQLOptr) { case TK_LE: pColumnFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL; @@ -4430,7 +4451,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const int32_t DEFAULT_TABLE_INDEX = 0; const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; const char* msg3 = "manipulation of tag available for super table"; const char* msg4 = "set tag value only available for table"; const char* msg5 = "only support add one tag"; @@ -4463,7 +4483,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return TSDB_CODE_TSC_INVALID_SQL; } int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo); @@ -5724,7 +5744,6 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; SSqlCmd* pCmd = &pSql->cmd; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); @@ -5745,7 +5764,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p } if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return TSDB_CODE_TSC_INVALID_SQL; } if (!validateTableColumnInfo(pFieldList, pCmd) || @@ -5800,7 +5819,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { } if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + return TSDB_CODE_TSC_INVALID_SQL; } // get meter meta from mnode @@ -5992,7 +6011,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); const char* msg0 = "invalid table name"; - const char* msg1 = "table name too long"; + //const char* msg1 = "table name too long"; const char* msg2 = "point interpolation query needs timestamp"; const char* msg5 = "fill only available for interval query"; const char* msg6 = "start(end) time of query range required or time range too large"; @@ -6065,7 +6084,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + return TSDB_CODE_TSC_INVALID_SQL; } tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index fbc02cc40e..ae2013cd2b 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2013,7 +2013,8 @@ int tscProcessUseDbRsp(SSqlObj *pSql) { return 0; } -int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { +int tscProcessDropDbRsp(SSqlObj *pSql) { + pSql->pTscObj->db[0] = 0; taosCacheEmpty(tscCacheHandle); return 0; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 6c6e5e35d8..cc4afeb3f8 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -325,8 +325,6 @@ void tdResetKVRowBuilder(SKVRowBuilder *pBuilder); SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { - ASSERT(pBuilder->nCols == 0 || colId > pBuilder->pColIdx[pBuilder->nCols - 1].colId); - if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index ef0713c415..77e8b76456 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -104,8 +104,12 @@ extern int32_t tsTelegrafUseFieldNum; // mqtt extern int32_t tsEnableMqttModule; -extern char tsMqttBrokerAddress[]; -extern char tsMqttBrokerClientId[]; +extern char tsMqttHostName[]; +extern char tsMqttPort[]; +extern char tsMqttUser[]; +extern char tsMqttPass[]; +extern char tsMqttClientId[]; +extern char tsMqttTopic[]; // monitor extern int32_t tsEnableMonitorModule; @@ -154,6 +158,7 @@ extern char buildinfo[]; // log extern int32_t tsAsyncLog; extern int32_t tsNumOfLogLines; +extern int32_t tsLogKeepDays; extern int32_t dDebugFlag; extern int32_t vDebugFlag; extern int32_t mDebugFlag; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 795585e5c9..96e8fb26c6 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -137,8 +137,12 @@ int32_t tsTelegrafUseFieldNum = 0; // mqtt int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default -char tsMqttBrokerAddress[128] = {0}; -char tsMqttBrokerClientId[128] = {0}; +char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org"; +char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883"; +char tsMqttUser[TSDB_MQTT_USER_LEN] = {0}; +char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0}; +char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber"; +char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # // monitor int32_t tsEnableMonitorModule = 1; @@ -247,8 +251,11 @@ bool taosCfgDynamicOptions(char *msg) { for (int32_t i = 0; i < tsGlobalConfigNum; ++i) { SGlobalCfg *cfg = tsGlobalConfig + i; - if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; + //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue; + + int32_t cfgLen = (int32_t)strlen(cfg->option); + if (cfgLen != olen) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue; *((int32_t *)cfg->ptr) = vint; @@ -767,26 +774,36 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "mqttBrokerAddress"; - cfg.ptr = tsMqttBrokerAddress; + cfg.option = "mqttHostName"; + cfg.ptr = tsMqttHostName; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT; cfg.minValue = 0; cfg.maxValue = 0; - cfg.ptrLength = 126; + cfg.ptrLength = TSDB_MQTT_HOSTNAME_LEN; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "mqttBrokerClientId"; - cfg.ptr = tsMqttBrokerClientId; + cfg.option = "mqttPort"; + cfg.ptr = tsMqttPort; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT; cfg.minValue = 0; cfg.maxValue = 0; - cfg.ptrLength = 126; + cfg.ptrLength = TSDB_MQTT_PORT_LEN; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - + + cfg.option = "mqttTopic"; + cfg.ptr = tsMqttTopic; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = TSDB_MQTT_TOPIC_LEN; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "compressMsgSize"; cfg.ptr = &tsCompressMsgSize; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -996,12 +1013,22 @@ static void doInitGlobalConfig(void) { cfg.ptr = &tsNumOfLogLines; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT; - cfg.minValue = 10000; + cfg.minValue = 1000; cfg.maxValue = 2000000000; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "logKeepDays"; + cfg.ptr = &tsLogKeepDays; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 365000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "asyncLog"; cfg.ptr = &tsAsyncLog; cfg.valType = TAOS_CFG_VTYPE_INT16; @@ -1270,6 +1297,9 @@ void taosInitGlobalCfg() { } bool taosCheckGlobalCfg() { + char fqdn[TSDB_FQDN_LEN]; + uint16_t port; + if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { taosSetAllDebugFlag(); } @@ -1278,17 +1308,23 @@ bool taosCheckGlobalCfg() { taosGetFqdn(tsLocalFqdn); } - snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%d", tsLocalFqdn, tsServerPort); + snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%u", tsLocalFqdn, tsServerPort); uInfo("localEp is: %s", tsLocalEp); if (tsFirst[0] == 0) { strcpy(tsFirst, tsLocalEp); + } else { + taosGetFqdnPortFromEp(tsFirst, fqdn, &port); + snprintf(tsFirst, sizeof(tsFirst), "%s:%u", fqdn, port); } if (tsSecond[0] == 0) { strcpy(tsSecond, tsLocalEp); + } else { + taosGetFqdnPortFromEp(tsSecond, fqdn, &port); + snprintf(tsSecond, sizeof(tsSecond), "%s:%u", fqdn, port); } - + taosGetSystemInfo(); tsSetLocale(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java new file mode 100644 index 0000000000..611e21e887 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java @@ -0,0 +1,81 @@ +package com.taosdata.jdbc; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.util.Random; + +import static org.junit.Assert.assertEquals; +import java.util.Properties; +import java.util.concurrent.Executors; +import java.util.concurrent.*; + +import static org.junit.Assert.assertTrue; + +public class QueryDataTest extends BaseTest { + + static Connection connection = null; + static Statement statement = null; + static String dbName = "test"; + static String stbName = "meters"; + static String host = "localhost"; + static int numOfTables = 30; + final static int numOfRecordsPerTable = 1000; + static long ts = 1496732686000l; + final static String tablePrefix = "t"; + + @Before + public void createDatabase() throws SQLException { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + } catch (ClassNotFoundException e) { + return; + } + + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + + statement = connection.createStatement(); + statement.executeUpdate("drop database if exists " + dbName); + statement.executeUpdate("create database if not exists " + dbName); + statement.executeUpdate("use " + dbName); + + String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))"; + statement.executeUpdate(createTableSql); + } + + @Test + public void testQueryBinaryData() throws SQLException{ + + String insertSql = "insert into " + stbName + " values(now, 'taosda')"; + System.out.println(insertSql); + + statement.executeUpdate(insertSql); + + String querySql = "select * from " + stbName; + ResultSet rs = statement.executeQuery(querySql); + + while(rs.next()) { + String name = rs.getString(2) + "001"; + System.out.println("name = " + name); + assertEquals(name, "taosda001"); + } + rs.close(); + } + + + @After + public void close() throws Exception { + statement.close(); + connection.close(); + Thread.sleep(10); + } + +} \ No newline at end of file diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index 24a109dd29..dea90811ad 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -11,10 +11,12 @@ AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) ADD_EXECUTABLE(taosd ${SRC}) + TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4) + IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosd mnode taos_static monitor http mqtt tsdb twal vnode cJson lz4 balance sync) + TARGET_LINK_LIBRARIES(taosd taos_static) ELSE () - TARGET_LINK_LIBRARIES(taosd mnode taos monitor http mqtt tsdb twal vnode cJson lz4 balance sync) + TARGET_LINK_LIBRARIES(taosd taos) ENDIF () IF (TD_ACCOUNT) @@ -25,6 +27,14 @@ IF (TD_LINUX) TARGET_LINK_LIBRARIES(taosd grant) ENDIF () + IF (TD_MQTT) + TARGET_LINK_LIBRARIES(taosd mqtt) + ENDIF () + + IF (TD_SYNC) + TARGET_LINK_LIBRARIES(taosd balance sync) + ENDIF () + SET(PREPARE_ENV_CMD "prepare_env_cmd") SET(PREPARE_ENV_TARGET "prepare_env_target") ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 1f41bc23eb..c968246a68 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -611,7 +611,7 @@ static bool dnodeReadMnodeInfos() { } for (int i = 0; i < size; ++i) { - cJSON* nodeInfo = cJSON_GetArrayItem(nodeInfos, i); + cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i); if (nodeInfo == NULL) continue; cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); @@ -627,7 +627,7 @@ static bool dnodeReadMnodeInfos() { goto PARSE_OVER; } strncpy(tsDMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN); - } + } ret = true; diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index f8acb871c5..e1d298089c 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -62,6 +62,7 @@ static void dnodeAllocModules() { dnodeSetModuleStatus(TSDB_MOD_HTTP); } +#ifdef _MQTT tsModule[TSDB_MOD_MQTT].enable = (tsEnableMqttModule == 1); tsModule[TSDB_MOD_MQTT].name = "mqtt"; tsModule[TSDB_MOD_MQTT].initFp = mqttInitSystem; @@ -71,6 +72,7 @@ static void dnodeAllocModules() { if (tsEnableMqttModule) { dnodeSetModuleStatus(TSDB_MOD_MQTT); } +#endif tsModule[TSDB_MOD_MONITOR].enable = (tsEnableMonitorModule == 1); tsModule[TSDB_MOD_MONITOR].name = "monitor"; diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 5daf616706..4a1d337824 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -154,15 +154,15 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char rpcMsg.contLen = sizeof(SDMAuthMsg); rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH; - dDebug("user:%s, send auth msg to mnode", user); + dDebug("user:%s, send auth msg to mnodes", user); SRpcMsg rpcRsp = {0}; dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp); if (rpcRsp.code != 0) { - dError("user:%s, auth msg received from mnode, error:%s", user, tstrerror(rpcRsp.code)); + dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code)); } else { SDMAuthRsp *pRsp = rpcRsp.pCont; - dDebug("user:%s, auth msg received from mnode", user); + dDebug("user:%s, auth msg received from mnodes", user); memcpy(secret, pRsp->secret, TSDB_KEY_LEN); memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN); *spi = pRsp->spi; diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index cd25ddcc55..1a40f3b56d 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -272,6 +272,13 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_SHOW_SQL_LEN 64 #define TSDB_SLOW_QUERY_SQL_LEN 512 +#define TSDB_MQTT_HOSTNAME_LEN 64 +#define TSDB_MQTT_PORT_LEN 8 +#define TSDB_MQTT_USER_LEN 24 +#define TSDB_MQTT_PASS_LEN 24 +#define TSDB_MQTT_TOPIC_LEN 64 +#define TSDB_MQTT_CLIENT_ID_LEN 32 + #define TSDB_METER_STATE_OFFLINE 0 #define TSDB_METER_STATE_ONLLINE 1 diff --git a/src/inc/tmqtt.h b/src/inc/tmqtt.h index 401aac16c6..256e61fbae 100644 --- a/src/inc/tmqtt.h +++ b/src/inc/tmqtt.h @@ -19,11 +19,11 @@ #ifdef __cplusplus extern "C" { #endif -#include + int32_t mqttInitSystem(); int32_t mqttStartSystem(); -void mqttStopSystem(); -void mqttCleanUpSystem(); +void mqttStopSystem(); +void mqttCleanUpSystem(); #ifdef __cplusplus } diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index c8df17d6ae..277dc45f8e 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -786,7 +786,7 @@ void read_history() { } void write_history() { - char f_history[128]; + char f_history[TSDB_FILENAME_LEN]; get_history_path(f_history); FILE *f = fopen(f_history, "w"); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 69bab44985..6f5ea33d79 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -409,7 +409,7 @@ void set_terminal_mode() { } } -void get_history_path(char *history) { sprintf(history, "%s/%s", getenv("HOME"), HISTORY_FILE); } +void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); } void clearScreen(int ecmd_pos, int cursor_pos) { struct winsize w; diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c index b0a12979cd..e161940a2b 100644 --- a/src/mnode/src/mnodeAcct.c +++ b/src/mnode/src/mnodeAcct.c @@ -211,8 +211,8 @@ static int32_t mnodeCreateRootAcct() { strcpy(pAcct->user, TSDB_DEFAULT_USER); taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass); pAcct->cfg = (SAcctCfg){ - .maxUsers = 10, - .maxDbs = 64, + .maxUsers = 128, + .maxDbs = 128, .maxTimeSeries = INT32_MAX, .maxConnections = 1024, .maxStreams = 1000, diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index cdea9eda60..54c049d242 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -242,6 +242,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { return TSDB_CODE_MND_INVALID_DB_OPTION; } +#if 0 if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) { mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep); return TSDB_CODE_MND_INVALID_DB_OPTION; @@ -251,6 +252,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2); return TSDB_CODE_MND_INVALID_DB_OPTION; } +#endif if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) { mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock, @@ -310,6 +312,13 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { return TSDB_CODE_MND_INVALID_DB_OPTION; } +#ifndef _SYNC + if (pCfg->replications != 1) { + mError("invalid db option replications:%d can only be 1 in this version", pCfg->replications); + return TSDB_CODE_MND_INVALID_DB_OPTION; + } +#endif + return TSDB_CODE_SUCCESS; } diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index d5b9d18e37..ac8730b0cc 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -72,12 +72,16 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) { pDnode->lastAccess = tsAccessSquence; } + mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) { SDnodeObj *pDnode = pOper->pObj; +#ifndef _SYNC + mnodeDropAllDnodeVgroups(pDnode); +#endif mnodeDropMnodeLocal(pDnode->dnodeId); balanceAsyncNotify(); @@ -585,7 +589,11 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { mInfo("dnode:%d, start to drop it", pDnode->dnodeId); +#ifndef _SYNC + int32_t code = mnodeDropDnode(pDnode, pMsg); +#else int32_t code = balanceDropDnode(pDnode); +#endif mnodeDecDnodeRef(pDnode); return code; } @@ -1043,3 +1051,59 @@ static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole) { } } +#ifndef _SYNC + +int32_t balanceInit() { return TSDB_CODE_SUCCESS; } +void balanceCleanUp() {} +void balanceAsyncNotify() {} +void balanceSyncNotify() {} +void balanceReset() {} +int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; } + +char* syncRole[] = { + "offline", + "unsynced", + "syncing", + "slave", + "master" +}; + +int32_t balanceAllocVnodes(SVgObj *pVgroup) { + void * pIter = NULL; + SDnodeObj *pDnode = NULL; + SDnodeObj *pSelDnode = NULL; + float vnodeUsage = 1000.0; + + while (1) { + pIter = mnodeGetNextDnode(pIter, &pDnode); + if (pDnode == NULL) break; + + if (pDnode->numOfCores > 0 && pDnode->openVnodes < TSDB_MAX_VNODES) { + float openVnodes = pDnode->openVnodes; + if (pDnode->isMgmt) openVnodes += tsMnodeEqualVnodeNum; + + float usage = openVnodes / pDnode->numOfCores; + if (usage <= vnodeUsage) { + pSelDnode = pDnode; + vnodeUsage = usage; + } + } + mnodeDecDnodeRef(pDnode); + } + + sdbFreeIter(pIter); + + if (pSelDnode == NULL) { + mError("failed to alloc vnode to vgroup"); + return TSDB_CODE_MND_NO_ENOUGH_DNODES; + } + + pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId; + pVgroup->vnodeGid[0].pDnode = pSelDnode; + + mDebug("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes); + return TSDB_CODE_SUCCESS; +} + +#endif + diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index c762403525..bff84d8041 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -68,6 +68,7 @@ static int32_t mnodeMnodeActionInsert(SSdbOper *pOper) { pDnode->isMgmt = true; mnodeDecDnodeRef(pDnode); + mInfo("mnode:%d, fqdn:%s ep:%s port:%d, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 06f992c26a..26f38dde03 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -98,8 +98,10 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { .connId = connId, .stime = taosGetTimestampMs() }; + tstrncpy(connObj.user, user, sizeof(connObj.user)); - + connObj.lastAccess = connObj.stime; + SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME * 1000); mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); @@ -244,6 +246,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + if (pConnObj->lastAccess < pConnObj->stime) pConnObj->lastAccess = pConnObj->stime; *(int64_t *)pWrite = pConnObj->lastAccess; cols++; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index e3ed7daf8c..8928a6622d 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -612,8 +612,8 @@ static int sdbWrite(void *param, void *data, int type) { } else if (action == SDB_ACTION_DELETE) { void *pRow = sdbGetRowMeta(pTable, pHead->cont); if (pRow == NULL) { - sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName, - pHead->cont); + sdbDebug("table:%s, object:%s not exist in hash, ignore delete action", pTable->tableName, + sdbGetKeyStr(pTable, pHead->cont)); return TSDB_CODE_SUCCESS; } SSdbOper oper = {.table = pTable, .pObj = pRow}; @@ -621,8 +621,8 @@ static int sdbWrite(void *param, void *data, int type) { } else if (action == SDB_ACTION_UPDATE) { void *pRow = sdbGetRowMeta(pTable, pHead->cont); if (pRow == NULL) { - sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName, - pHead->cont); + sdbDebug("table:%s, object:%s not exist in hash, ignore update action", pTable->tableName, + sdbGetKeyStr(pTable, pHead->cont)); return TSDB_CODE_SUCCESS; } SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 03b1399ea7..4400927e9b 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1711,14 +1711,20 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { mnodeDestroyChildTable(pTable); return TSDB_CODE_MND_INVALID_TABLE_NAME; } - + pTable->suid = pMsg->pSTable->uid; - pTable->uid = (((uint64_t)pTable->vgId) << 40) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 16) + - (sdbGetVersion() & ((1ul << 16) - 1ul)); + pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 24) + + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); pTable->superTable = pMsg->pSTable; } else { - pTable->uid = (((uint64_t)pTable->vgId) << 40) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 16) + - (sdbGetVersion() & ((1ul << 16) - 1ul)); + if (pTable->info.type == TSDB_SUPER_TABLE) { + int64_t us = taosGetTimestampUs(); + pTable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + } else { + pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 24) + + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + } + pTable->sversion = 0; pTable->numOfColumns = htons(pCreate->numOfColumns); pTable->sqlLen = htons(pCreate->sqlLen); diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c index f4cb1a9ef3..c03ff688d2 100644 --- a/src/mnode/src/mnodeUser.c +++ b/src/mnode/src/mnodeUser.c @@ -581,7 +581,7 @@ void mnodeDropAllUsers(SAcctObj *pAcct) { int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, char *ckey) { if (!sdbIsMaster()) { *secret = 0; - mDebug("user:%s, failed to auth user, reason:%s", user, tstrerror(TSDB_CODE_APP_NOT_READY)); + mDebug("user:%s, failed to auth user, mnode is not master", user); return TSDB_CODE_APP_NOT_READY; } diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h index e7dc04fd15..17683743e3 100644 --- a/src/os/inc/osDir.h +++ b/src/os/inc/osDir.h @@ -24,6 +24,7 @@ extern "C" { void taosRemoveDir(char *rootDir); int taosMkDir(const char *pathname, mode_t mode); void taosRename(char* oldName, char *newName); +void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays); #ifdef __cplusplus } diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index 0dbc3aac9c..d4f3d6d2af 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -39,6 +39,7 @@ #include #include #include +#include #include "msvcProcess.h" #include "msvcDirect.h" #include "msvcFcntl.h" @@ -58,8 +59,6 @@ extern "C" { int32_t BUILDIN_CTZL(uint64_t val); int32_t BUILDIN_CTZ(uint32_t val); -#define TAOS_OS_FUNC_DIR - #define TAOS_OS_FUNC_FILE #define TAOS_OS_FUNC_FILE_ISREG #define TAOS_OS_FUNC_FILE_ISDIR @@ -179,9 +178,9 @@ int gettimeofday(struct timeval *ptv, void *pTimeZone); #endif #ifdef _MSC_VER -#if _MSC_VER >= 1900 +//#if _MSC_VER >= 1900 #define TAOS_OS_FUNC_SOCKET_INET -#endif +//#endif #endif #define SHUT_RDWR SD_BOTH diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c index 7a537cdfea..93651c78ef 100644 --- a/src/os/src/detail/osDir.c +++ b/src/os/src/detail/osDir.c @@ -18,8 +18,6 @@ #include "tglobal.h" #include "tulog.h" -#ifndef TAOS_OS_FUNC_DIR - void taosRemoveDir(char *rootDir) { DIR *dir = opendir(rootDir); if (dir == NULL) return; @@ -51,18 +49,54 @@ int taosMkDir(const char *path, mode_t mode) { } void taosRename(char* oldName, char *newName) { - if (0 == tsEnableVnodeBak) { - uInfo("vnode backup not enabled"); - return; - } - // if newName in not empty, rename return fail. // the newName must be empty or does not exist if (rename(oldName, newName)) { - uError("%s is modify to %s fail, reason:%s", oldName, newName, strerror(errno)); + uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); } else { - uInfo("%s is modify to %s success!", oldName, newName); + uInfo("successfully to rename file %s to %s", oldName, newName); } } -#endif +void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) { + DIR *dir = opendir(rootDir); + if (dir == NULL) return; + + int64_t sec = taosGetTimestampSec(); + struct dirent *de = NULL; + + while ((de = readdir(dir)) != NULL) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue; + + char filename[1024]; + snprintf(filename, 1023, "%s/%s", rootDir, de->d_name); + if (de->d_type & DT_DIR) { + continue; + } else { + // struct stat fState; + // if (stat(fname, &fState) < 0) { + // continue; + // } + int32_t len = (int32_t)strlen(filename); + int64_t fileSec = 0; + for (int i = len - 1; i >= 0; i--) { + if (filename[i] == '.') { + fileSec = atoll(filename + i + 1); + break; + } + } + + if (fileSec <= 100) continue; + int32_t days = (int32_t)(ABS(sec - fileSec) / 86400 + 1); + if (days > keepDays) { + (void)remove(filename); + uInfo("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays); + } else { + uTrace("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays); + } + } + } + + closedir(dir); + rmdir(rootDir); +} diff --git a/src/os/src/windows/wDir.c b/src/os/src/windows/wDir.c deleted file mode 100644 index c486cd0d40..0000000000 --- a/src/os/src/windows/wDir.c +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE -#include "os.h" -#include "tulog.h" - -void taosRemoveDir(char *rootDir) { - uError("%s not implemented yet", __FUNCTION__); -} - -int taosMkDir(const char *path, mode_t mode) { - uError("%s not implemented yet", __FUNCTION__); - return 0; -} - -void taosMvDir(char* destDir, char *srcDir) { - uError("%s not implemented yet", __FUNCTION__); -} diff --git a/src/os/src/windows/wSocket.c b/src/os/src/windows/wSocket.c index 8fd198ba80..da9242d6a3 100644 --- a/src/os/src/windows/wSocket.c +++ b/src/os/src/windows/wSocket.c @@ -61,6 +61,10 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op return 0; } + if (level == SOL_TCP && optname == TCP_KEEPCNT) { + return 0; + } + return setsockopt(socketfd, level, optname, optval, optlen); } diff --git a/src/os/src/windows/wString.c b/src/os/src/windows/wString.c index 0d9a28e288..1fb235a005 100644 --- a/src/os/src/windows/wString.c +++ b/src/os/src/windows/wString.c @@ -58,11 +58,20 @@ char *strsep(char **stringp, const char *delim) { char *getpass(const char *prefix) { static char passwd[TSDB_KEY_LEN] = {0}; - + memset(passwd, 0, TSDB_KEY_LEN); printf("%s", prefix); - scanf("%s", passwd); - char n = getchar(); + int32_t index = 0; + char ch; + while (index < TSDB_KEY_LEN) { + ch = getch(); + if (ch == '\n' || ch == '\r') { + break; + } else { + passwd[index++] = ch; + } + } + return passwd; } @@ -131,11 +140,11 @@ int tasoUcs4Compare(void *f1_ucs4, void *f2_ucs4, int bytes) { } -/* Copy memory to memory until the specified number of bytes -has been copied, return pointer to following byte. -Overlap is NOT handled correctly. */ -void *mempcpy(void *dest, const void *src, size_t len) { - return (char*)memcpy(dest, src, len) + len; +/* Copy memory to memory until the specified number of bytes +has been copied, return pointer to following byte. +Overlap is NOT handled correctly. */ +void *mempcpy(void *dest, const void *src, size_t len) { + return (char*)memcpy(dest, src, len) + len; } /* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */ diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index 2bc6bf54bf..742235894a 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -3,4 +3,6 @@ PROJECT(TDengine) ADD_SUBDIRECTORY(monitor) ADD_SUBDIRECTORY(http) -ADD_SUBDIRECTORY(mqtt) +IF (TD_MQTT) + ADD_SUBDIRECTORY(mqtt) +ENDIF () \ No newline at end of file diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt index 2c3cbf636f..5d8b52986a 100644 --- a/src/plugins/http/CMakeLists.txt +++ b/src/plugins/http/CMakeLists.txt @@ -11,11 +11,12 @@ AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) ADD_LIBRARY(http ${SRC}) + TARGET_LINK_LIBRARIES(http z) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(http taos_static z) + TARGET_LINK_LIBRARIES(http taos_static) ELSE () - TARGET_LINK_LIBRARIES(http taos z) + TARGET_LINK_LIBRARIES(http taos) ENDIF () IF (TD_ADMIN) diff --git a/src/plugins/monitor/CMakeLists.txt b/src/plugins/monitor/CMakeLists.txt index 26a7775e9c..edea2187ea 100644 --- a/src/plugins/monitor/CMakeLists.txt +++ b/src/plugins/monitor/CMakeLists.txt @@ -2,11 +2,11 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) AUX_SOURCE_DIRECTORY(./src SRC) - + IF (TD_LINUX) - INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) - INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) ADD_LIBRARY(monitor ${SRC}) IF (TD_SOMODE_STATIC) diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index d76bb4bd82..048f839b72 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -25,6 +25,7 @@ #include "tsclient.h" #include "dnode.h" #include "monitor.h" +#include "taoserror.h" #define monitorFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }} #define monitorError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }} @@ -33,129 +34,159 @@ #define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} #define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} -#define SQL_LENGTH 1024 +#define SQL_LENGTH 1030 #define LOG_LEN_STR 100 #define IP_LEN_STR TSDB_EP_LEN #define CHECK_INTERVAL 1000 typedef enum { - MONITOR_CMD_CREATE_DB, - MONITOR_CMD_CREATE_TB_LOG, - MONITOR_CMD_CREATE_MT_DN, - MONITOR_CMD_CREATE_MT_ACCT, - MONITOR_CMD_CREATE_TB_DN, - MONITOR_CMD_CREATE_TB_ACCT_ROOT, - MONITOR_CMD_CREATE_TB_SLOWQUERY, - MONITOR_CMD_MAX + MON_CMD_CREATE_DB, + MON_CMD_CREATE_TB_LOG, + MON_CMD_CREATE_MT_DN, + MON_CMD_CREATE_MT_ACCT, + MON_CMD_CREATE_TB_DN, + MON_CMD_CREATE_TB_ACCT_ROOT, + MON_CMD_CREATE_TB_SLOWQUERY, + MON_CMD_MAX } EMonitorCommand; typedef enum { - MONITOR_STATE_UN_INIT, - MONITOR_STATE_INITIALIZING, - MONITOR_STATE_INITIALIZED, - MONITOR_STATE_STOPPED + MON_STATE_NOT_INIT, + MON_STATE_INITED } EMonitorState; typedef struct { - void * conn; - void * timer; - char ep[TSDB_EP_LEN]; - int8_t cmdIndex; - int8_t state; - char sql[SQL_LENGTH + 1]; - void * initTimer; - void * diskTimer; + pthread_t thread; + void * conn; + char ep[TSDB_EP_LEN]; + int8_t cmdIndex; + int8_t state; + int8_t start; // enable/disable by mnode + int8_t quiting; // taosd is quiting + char sql[SQL_LENGTH + 1]; } SMonitorConn; -static SMonitorConn tsMonitorConn; -static void monitorInitConn(void *para, void *unused); -static void monitorInitConnCb(void *param, TAOS_RES *result, int32_t code); -static void monitorInitDatabase(); -static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code); -static void monitorStartTimer(); -static void monitorSaveSystemInfo(); +static SMonitorConn tsMonitor = {0}; +static void monitorSaveSystemInfo(); +static void *monitorThreadFunc(void *param); +static void monitorBuildMonitorSql(char *sql, int32_t cmd); extern int32_t (*monitorStartSystemFp)(); -extern void (*monitorStopSystemFp)(); -extern void (*monitorExecuteSQLFp)(char *sql); - -static void monitorCheckDiskUsage(void *para, void *unused) { - taosGetDisk(); - taosTmrReset(monitorCheckDiskUsage, CHECK_INTERVAL, NULL, tscTmr, &tsMonitorConn.diskTimer); -} +extern void (*monitorStopSystemFp)(); +extern void (*monitorExecuteSQLFp)(char *sql); int32_t monitorInitSystem() { - taos_init(); - taosTmrReset(monitorCheckDiskUsage, CHECK_INTERVAL, NULL, tscTmr, &tsMonitorConn.diskTimer); + if (tsMonitor.ep[0] == 0) { + strcpy(tsMonitor.ep, tsLocalEp); + } + + int len = strlen(tsMonitor.ep); + for (int i = 0; i < len; ++i) { + if (tsMonitor.ep[i] == ':' || tsMonitor.ep[i] == '-' || tsMonitor.ep[i] == '.') { + tsMonitor.ep[i] = '_'; + } + } + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&tsMonitor.thread, &thAttr, monitorThreadFunc, NULL)) { + monitorError("failed to create thread to for monitor module, reason:%s", strerror(errno)); + return -1; + } + + pthread_attr_destroy(&thAttr); + monitorDebug("monitor thread is launched"); + monitorStartSystemFp = monitorStartSystem; monitorStopSystemFp = monitorStopSystem; return 0; } int32_t monitorStartSystem() { - monitorInfo("start monitor module"); - monitorInitSystem(); - taosTmrReset(monitorInitConn, 10, NULL, tscTmr, &tsMonitorConn.initTimer); + taos_init(); + tsMonitor.start = 1; + monitorExecuteSQLFp = monitorExecuteSQL; + monitorInfo("monitor module start"); return 0; } -static void monitorStartSystemRetry() { - if (tsMonitorConn.initTimer != NULL) { - taosTmrReset(monitorInitConn, 3000, NULL, tscTmr, &tsMonitorConn.initTimer); - } -} +static void *monitorThreadFunc(void *param) { + monitorDebug("starting to initialize monitor module ..."); -static void monitorInitConn(void *para, void *unused) { - if (dnodeGetDnodeId() <= 0) { - monitorStartSystemRetry(); - return; - } - - monitorInfo("starting to initialize monitor service .."); - tsMonitorConn.state = MONITOR_STATE_INITIALIZING; + while (1) { + static int32_t accessTimes = 0; + accessTimes++; + taosMsleep(1000); - if (tsMonitorConn.ep[0] == 0) - strcpy(tsMonitorConn.ep, tsLocalEp); + if (tsMonitor.quiting) { + tsMonitor.state = MON_STATE_NOT_INIT; + monitorInfo("monitor thread will quit, for taosd is quiting"); + break; + } else { + taosGetDisk(); + } - int len = strlen(tsMonitorConn.ep); - for (int i = 0; i < len; ++i) { - if (tsMonitorConn.ep[i] == ':' || tsMonitorConn.ep[i] == '-') { - tsMonitorConn.ep[i] = '_'; + if (tsMonitor.start == 0) { + continue; + } + + if (dnodeGetDnodeId() <= 0) { + monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module"); + continue; + } + + if (tsMonitor.conn == NULL) { + tsMonitor.state = MON_STATE_NOT_INIT; + tsMonitor.conn = taos_connect(NULL, "monitor", tsInternalPass, "", 0); + if (tsMonitor.conn == NULL) { + monitorError("failed to connect to database, reason:%s", tstrerror(terrno)); + continue; + } else { + monitorDebug("connect to database success"); + } + } + + if (tsMonitor.state == MON_STATE_NOT_INIT) { + for (; tsMonitor.cmdIndex < MON_CMD_MAX; ++tsMonitor.cmdIndex) { + monitorBuildMonitorSql(tsMonitor.sql, tsMonitor.cmdIndex); + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + int code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monitorError("failed to exec sql:%s, reason:%s", tsMonitor.sql, tstrerror(code)); + break; + } else { + monitorDebug("successfully to exec sql:%s", tsMonitor.sql); + } + } + + if (tsMonitor.start) { + tsMonitor.state = MON_STATE_INITED; + } + } + + if (tsMonitor.state == MON_STATE_INITED) { + if (accessTimes % tsMonitorInterval == 0) { + monitorSaveSystemInfo(); + } } } - if (tsMonitorConn.conn == NULL) { - taos_connect_a(NULL, "monitor", tsInternalPass, "", 0, monitorInitConnCb, &tsMonitorConn, &(tsMonitorConn.conn)); - } else { - monitorInitDatabase(); - } + monitorInfo("monitor thread is stopped"); + return NULL; } -static void monitorInitConnCb(void *param, TAOS_RES *result, int32_t code) { - // free it firstly in any cases. - taos_free_result(result); - - if (code != TSDB_CODE_SUCCESS) { - monitorError("monitor:%p, connect to database failed, reason:%s", tsMonitorConn.conn, tstrerror(code)); - taos_close(tsMonitorConn.conn); - tsMonitorConn.conn = NULL; - tsMonitorConn.state = MONITOR_STATE_UN_INIT; - monitorStartSystemRetry(); - return; - } - - monitorDebug("monitor:%p, connect to database success, reason:%s", tsMonitorConn.conn, tstrerror(code)); - monitorInitDatabase(); -} - -static void dnodeBuildMonitorSql(char *sql, int32_t cmd) { +static void monitorBuildMonitorSql(char *sql, int32_t cmd) { memset(sql, 0, SQL_LENGTH); - if (cmd == MONITOR_CMD_CREATE_DB) { + if (cmd == MON_CMD_CREATE_DB) { snprintf(sql, SQL_LENGTH, "create database if not exists %s replica 1 days 10 keep 30 cache %d " - "blocks %d maxtables 16 precision 'us'", + "blocks %d precision 'us'", tsMonitorDbName, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS); - } else if (cmd == MONITOR_CMD_CREATE_MT_DN) { + } else if (cmd == MON_CMD_CREATE_MT_DN) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn(ts timestamp" ", cpu_taosd float, cpu_system float, cpu_cores int" @@ -166,10 +197,10 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) { ", req_http int, req_select int, req_insert int" ") tags (dnodeid int, fqdn binary(%d))", tsMonitorDbName, TSDB_FQDN_LEN); - } else if (cmd == MONITOR_CMD_CREATE_TB_DN) { + } else if (cmd == MON_CMD_CREATE_TB_DN) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn%d using %s.dn tags(%d, '%s')", tsMonitorDbName, dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp); - } else if (cmd == MONITOR_CMD_CREATE_MT_ACCT) { + } else if (cmd == MON_CMD_CREATE_MT_ACCT) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct(ts timestamp " ", currentPointsPerSecond bigint, maxPointsPerSecond bigint" @@ -185,15 +216,15 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) { ", accessState smallint" ") tags (acctId binary(%d))", tsMonitorDbName, TSDB_USER_LEN); - } else if (cmd == MONITOR_CMD_CREATE_TB_ACCT_ROOT) { + } else if (cmd == MON_CMD_CREATE_TB_ACCT_ROOT) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct_%s using %s.acct tags('%s')", tsMonitorDbName, TSDB_DEFAULT_USER, tsMonitorDbName, TSDB_DEFAULT_USER); - } else if (cmd == MONITOR_CMD_CREATE_TB_SLOWQUERY) { + } else if (cmd == MON_CMD_CREATE_TB_SLOWQUERY) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.slowquery(ts timestamp, username " "binary(%d), created_time timestamp, time bigint, sql binary(%d))", tsMonitorDbName, TSDB_TABLE_FNAME_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); - } else if (cmd == MONITOR_CMD_CREATE_TB_LOG) { + } else if (cmd == MON_CMD_CREATE_TB_LOG) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.log(ts timestamp, level tinyint, " "content binary(%d), ipaddr binary(%d))", @@ -203,75 +234,22 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) { sql[SQL_LENGTH] = 0; } -static void monitorInitDatabase() { - if (tsMonitorConn.cmdIndex < MONITOR_CMD_MAX) { - dnodeBuildMonitorSql(tsMonitorConn.sql, tsMonitorConn.cmdIndex); - taos_query_a(tsMonitorConn.conn, tsMonitorConn.sql, monitorInitDatabaseCb, NULL); - } else { - tsMonitorConn.state = MONITOR_STATE_INITIALIZED; - monitorExecuteSQLFp = monitorExecuteSQL; - monitorInfo("monitor service init success"); - - monitorStartTimer(); - } -} - -static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) { - if (code == TSDB_CODE_MND_TABLE_ALREADY_EXIST || code == TSDB_CODE_MND_DB_ALREADY_EXIST || code >= 0) { - monitorDebug("monitor:%p, sql success, reason:%s, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql); - if (tsMonitorConn.cmdIndex == MONITOR_CMD_CREATE_TB_LOG) { - monitorInfo("dnode:%s is started", tsLocalEp); - } - tsMonitorConn.cmdIndex++; - monitorInitDatabase(); - } else { - monitorError("monitor:%p, sql failed, reason:%s, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql); - tsMonitorConn.state = MONITOR_STATE_UN_INIT; - monitorStartSystemRetry(); - } - - taos_free_result(result); -} - void monitorStopSystem() { - if (tsMonitorConn.state == MONITOR_STATE_STOPPED) return; - tsMonitorConn.state = MONITOR_STATE_STOPPED; + tsMonitor.start = 0; + tsMonitor.state = MON_STATE_NOT_INIT; monitorExecuteSQLFp = NULL; - - monitorInfo("monitor module is stopped"); - - if (tsMonitorConn.initTimer != NULL) { - taosTmrStopA(&(tsMonitorConn.initTimer)); - } - if (tsMonitorConn.timer != NULL) { - taosTmrStopA(&(tsMonitorConn.timer)); - } - if (tsMonitorConn.conn != NULL) { - taos_close(tsMonitorConn.conn); - tsMonitorConn.conn = NULL; - } + monitorInfo("monitor module stopped"); } void monitorCleanUpSystem() { + tsMonitor.quiting = 1; monitorStopSystem(); - monitorInfo("monitor module cleanup"); -} - -static void monitorStartTimer() { - taosTmrReset(monitorSaveSystemInfo, tsMonitorInterval * 1000, NULL, tscTmr, &tsMonitorConn.timer); -} - -static void dnodeMontiorLogCallback(void *param, TAOS_RES *result, int32_t code) { - int32_t c = taos_errno(result); - - if (c != TSDB_CODE_SUCCESS) { - monitorError("monitor:%p, save %s failed, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(c)); - } else { - int32_t rows = taos_affected_rows(result); - monitorDebug("monitor:%p, save %s succ, rows:%d", tsMonitorConn.conn, (char *)param, rows); + pthread_join(tsMonitor.thread, NULL); + if (tsMonitor.conn != NULL) { + taos_close(tsMonitor.conn); + tsMonitor.conn = NULL; } - - taos_free_result(result); + monitorInfo("monitor module is cleaned up"); } // unit is MB @@ -279,13 +257,13 @@ static int32_t monitorBuildMemorySql(char *sql) { float sysMemoryUsedMB = 0; bool suc = taosGetSysMemory(&sysMemoryUsedMB); if (!suc) { - monitorError("monitor:%p, get sys memory info failed.", tsMonitorConn.conn); + monitorDebug("failed to get sys memory info"); } float procMemoryUsedMB = 0; suc = taosGetProcMemory(&procMemoryUsedMB); if (!suc) { - monitorError("monitor:%p, get proc memory info failed.", tsMonitorConn.conn); + monitorDebug("failed to get proc memory info"); } return sprintf(sql, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB); @@ -296,11 +274,11 @@ static int32_t monitorBuildCpuSql(char *sql) { float sysCpuUsage = 0, procCpuUsage = 0; bool suc = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage); if (!suc) { - monitorError("monitor:%p, get cpu usage failed.", tsMonitorConn.conn); + monitorDebug("failed to get cpu usage"); } if (sysCpuUsage <= procCpuUsage) { - sysCpuUsage = procCpuUsage + (float)0.1; + sysCpuUsage = procCpuUsage + 0.1f; } return sprintf(sql, ", %f, %f, %d", procCpuUsage, sysCpuUsage, tsNumOfCores); @@ -316,14 +294,14 @@ static int32_t monitorBuildBandSql(char *sql) { float bandSpeedKb = 0; bool suc = taosGetBandSpeed(&bandSpeedKb); if (!suc) { - monitorError("monitor:%p, get bandwidth speed failed.", tsMonitorConn.conn); + monitorDebug("failed to get bandwidth speed"); } return sprintf(sql, ", %f", bandSpeedKb); } static int32_t monitorBuildReqSql(char *sql) { - SDnodeStatisInfo info = dnodeGetStatisInfo(); + SDnodeStatisInfo info = dnodeGetStatisInfo(); return sprintf(sql, ", %d, %d, %d)", info.httpReqNum, info.queryReqNum, info.submitReqNum); } @@ -331,20 +309,15 @@ static int32_t monitorBuildIoSql(char *sql) { float readKB = 0, writeKB = 0; bool suc = taosGetProcIO(&readKB, &writeKB); if (!suc) { - monitorError("monitor:%p, get io info failed.", tsMonitorConn.conn); + monitorDebug("failed to get io info"); } return sprintf(sql, ", %f, %f", readKB, writeKB); } static void monitorSaveSystemInfo() { - if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) { - monitorStartTimer(); - return; - } - int64_t ts = taosGetTimestampUs(); - char * sql = tsMonitorConn.sql; + char * sql = tsMonitor.sql; int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts); pos += monitorBuildCpuSql(sql + pos); @@ -354,16 +327,31 @@ static void monitorSaveSystemInfo() { pos += monitorBuildIoSql(sql + pos); pos += monitorBuildReqSql(sql + pos); - monitorDebug("monitor:%p, save system info, sql:%s", tsMonitorConn.conn, sql); - taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sys"); + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + int code = taos_errno(res); + taos_free_result(res); - if (tsMonitorConn.timer != NULL && tsMonitorConn.state != MONITOR_STATE_STOPPED) { - monitorStartTimer(); + if (code != 0) { + monitorError("failed to save system info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); + } else { + monitorDebug("successfully to save system info, sql:%s", tsMonitor.sql); } } +static void montiorExecSqlCb(void *param, TAOS_RES *result, int32_t code) { + int32_t c = taos_errno(result); + if (c != TSDB_CODE_SUCCESS) { + monitorError("save %s failed, reason:%s", (char *)param, tstrerror(c)); + } else { + int32_t rows = taos_affected_rows(result); + monitorDebug("save %s succ, rows:%d", (char *)param, rows); + } + + taos_free_result(result); +} + void monitorSaveAcctLog(SAcctMonitorObj *pMon) { - if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; + if (tsMonitor.state != MON_STATE_INITED) return; char sql[1024] = {0}; sprintf(sql, @@ -392,19 +380,16 @@ void monitorSaveAcctLog(SAcctMonitorObj *pMon) { pMon->totalConns, pMon->maxConns, pMon->accessState); - monitorDebug("monitor:%p, save account info, sql %s", tsMonitorConn.conn, sql); - taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "account"); + monitorDebug("save account info, sql:%s", sql); + taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "account info"); } void monitorSaveLog(int32_t level, const char *const format, ...) { - if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; + if (tsMonitor.state != MON_STATE_INITED) return; va_list argpointer; char sql[SQL_LENGTH] = {0}; int32_t max_length = SQL_LENGTH - 30; - - if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; - int32_t len = snprintf(sql, (size_t)max_length, "insert into %s.log values(%" PRId64 ", %d,'", tsMonitorDbName, taosGetTimestampUs(), level); @@ -416,12 +401,13 @@ void monitorSaveLog(int32_t level, const char *const format, ...) { len += sprintf(sql + len, "', '%s')", tsLocalEp); sql[len++] = 0; - monitorDebug("monitor:%p, save log, sql: %s", tsMonitorConn.conn, sql); - taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "log"); + monitorDebug("save log, sql: %s", sql); + taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "log"); } void monitorExecuteSQL(char *sql) { - if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; - monitorDebug("monitor:%p, execute sql: %s", tsMonitorConn.conn, sql); - taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sql"); + if (tsMonitor.state != MON_STATE_INITED) return; + + monitorDebug("execute sql:%s", sql); + taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "sql"); } diff --git a/src/plugins/mqtt/CMakeLists.txt b/src/plugins/mqtt/CMakeLists.txt index 2467af588c..cf5729d608 100644 --- a/src/plugins/mqtt/CMakeLists.txt +++ b/src/plugins/mqtt/CMakeLists.txt @@ -2,21 +2,19 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/include) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/examples/templates) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) - INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) - INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) - INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/include) - INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/examples/templates) ADD_LIBRARY(mqtt ${SRC}) + TARGET_LINK_LIBRARIES(mqtt cJson mqttc) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(mqtt taos_static cJson mqttc) + TARGET_LINK_LIBRARIES(mqtt taos_static) ELSE () - TARGET_LINK_LIBRARIES(mqtt taos cJson mqttc) - ENDIF () - IF (TD_ADMIN) - TARGET_LINK_LIBRARIES(mqtt admin cJson) + TARGET_LINK_LIBRARIES(mqtt taos) ENDIF () ENDIF () diff --git a/src/plugins/mqtt/inc/mqttInit.h b/src/plugins/mqtt/inc/mqttInit.h index 5dbd62789b..81a9a39a2c 100644 --- a/src/plugins/mqtt/inc/mqttInit.h +++ b/src/plugins/mqtt/inc/mqttInit.h @@ -23,11 +23,12 @@ extern "C" { * @file * A simple subscriber program that performs automatic reconnections. */ -#include -#include -#include #include "mqtt.h" -#include "taos.h" + +#define QOS 1 +#define TIMEOUT 10000L +#define MQTT_SEND_BUF_SIZE 102400 +#define MQTT_RECV_BUF_SIZE 102400 /** * @brief A structure that I will use to keep track of some data needed @@ -36,18 +37,12 @@ extern "C" { * An instance of this struct will be created in my \c main(). Then, whenever * \ref mqttReconnectClient is called, this instance will be passed. */ -struct reconnect_state_t { - char* hostname; - char* port; - char* topic; - char* client_id; - char* user_name; - char* password; +typedef struct SMqttReconnectState { uint8_t* sendbuf; size_t sendbufsz; uint8_t* recvbuf; size_t recvbufsz; -}; +} SMqttReconnectState; /** * @brief My reconnect callback. It will reestablish the connection whenever @@ -58,7 +53,7 @@ void mqttReconnectClient(struct mqtt_client* client, void** reconnect_state_vptr /** * @brief The function will be called whenever a PUBLISH message is received. */ -void mqtt_PublishCallback(void** unused, struct mqtt_response_publish* published); +void mqttPublishCallback(void** unused, struct mqtt_response_publish* published); /** * @brief The client's refresher. This function triggers back-end routines to @@ -73,12 +68,7 @@ void* mqttClientRefresher(void* client); /** * @brief Safelty closes the \p sockfd and cancels the \p client_daemon before \c exit. */ - -void mqttCleanup(int status, int sockfd, pthread_t* client_daemon); -void mqttInitConnCb(void* param, TAOS_RES* result, int32_t code); -void mqttQueryInsertCallback(void* param, TAOS_RES* result, int32_t code); -#define QOS 1 -#define TIMEOUT 10000L +void mqttCleanupRes(int status, int sockfd, pthread_t* client_daemon); #ifdef __cplusplus } diff --git a/src/plugins/mqtt/inc/mqttPayload.h b/src/plugins/mqtt/inc/mqttPayload.h index b7e7abbd96..12a714afac 100644 --- a/src/plugins/mqtt/inc/mqttPayload.h +++ b/src/plugins/mqtt/inc/mqttPayload.h @@ -15,11 +15,13 @@ #ifndef TDENGINE_MQTT_PLYLOAD_H #define TDENGINE_MQTT_PLYLOAD_H + #ifdef __cplusplus extern "C" { #endif -char split(char str[], char delims[], char** p_p_cmd_part, int max); -char* converJsonToSql(char* json, char* _dbname, char* _tablename); + +char* mqttConverJsonToSql(char* json, int maxSize); + #ifdef __cplusplus } #endif diff --git a/src/plugins/mqtt/inc/mqttSystem.h b/src/plugins/mqtt/inc/mqttSystem.h deleted file mode 100644 index a79fac33b5..0000000000 --- a/src/plugins/mqtt/inc/mqttSystem.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef TDENGINE_MQTT_SYSTEM_H -#define TDENGINE_MQTT_SYSTEM_H -#ifdef __cplusplus -extern "C" { -#endif -#include -int32_t mqttInitSystem(); -int32_t mqttStartSystem(); -void mqttStopSystem(); -void mqttCleanUpSystem(); -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/plugins/mqtt/src/mqttPayload.c b/src/plugins/mqtt/src/mqttPayload.c index 96c8e71edd..1af8b02fad 100644 --- a/src/plugins/mqtt/src/mqttPayload.c +++ b/src/plugins/mqtt/src/mqttPayload.c @@ -14,52 +14,146 @@ */ #define _DEFAULT_SOURCE -#include "mqttPayload.h" -#include "cJSON.h" -#include "string.h" -#include "taos.h" -#include "mqttLog.h" #include "os.h" -char split(char str[], char delims[], char** p_p_cmd_part, int max) { - char* token = strtok(str, delims); - char part_index = 0; - char** tmp_part = p_p_cmd_part; - while (token) { - *tmp_part++ = token; - token = strtok(NULL, delims); - part_index++; - if (part_index >= max) break; - } - return part_index; -} +#include "cJSON.h" +#include "mqttLog.h" +#include "mqttPayload.h" -char* converJsonToSql(char* json, char* _dbname, char* _tablename) { - cJSON* jPlayload = cJSON_Parse(json); - char _names[102400] = {0}; - char _values[102400] = {0}; - int i = 0; - int count = cJSON_GetArraySize(jPlayload); - for (; i < count; i++) - { - cJSON* item = cJSON_GetArrayItem(jPlayload, i); - if (cJSON_Object == item->type) { - mqttInfo("The item '%s' is not supported", item->string); - } else { - strcat(_names, item->string); - if (i < count - 1) { - strcat(_names, ","); - } - char* __value_json = cJSON_Print(item); - strcat(_values, __value_json); - free(__value_json); - if (i < count - 1) { - strcat(_values, ","); - } +// subscribe message like this + +/* +/test { + "timestamp": 1599121290, + "gateway": { + "name": "AcuLink 810 Gateway", + "model": "AcuLink810-868", + "serial": "S8P20200207" + }, + "device": { + "name": "Acuvim L V3 .221", + "model": "Acuvim-L-V3", + "serial": "221", + "online": true, + "readings": [ + { + "param": "Freq_Hz", + "value": "59.977539", + "unit": "Hz" + }, + { + "param": "Va_V", + "value": "122.002907", + "unit": "V" + }, + { + "param": "DI4", + "value": "5.000000", + "unit": "" + } + ] } +} +*/ + +// send msg cmd +// mosquitto_pub -h test.mosquitto.org -t "/test" -m '{"timestamp": 1599121290,"gateway": {"name": "AcuLink 810 Gateway","model": "AcuLink810-868","serial": "S8P20200207"},"device": {"name": "Acuvim L V3 .221","model": "Acuvim-L-V3","serial": "221","online": true,"readings": [{"param": "Freq_Hz","value": "59.977539","unit": "Hz"},{"param": "Va_V","value": "122.002907","unit": "V"},{"param": "DI4","value": "5.000000","unit": ""}]}}' + +/* + * This is an example, this function needs to be implemented in order to parse the json file into a sql statement + * Note that you need to create a super table and database before writing data + * In this case: + * create database mqttdb; + * create table mqttdb.devices(ts timestamp, value bigint) tags(name binary(32), model binary(32), serial binary(16), param binary(16), unit binary(16)); + */ + +char* mqttConverJsonToSql(char* json, int maxSize) { + // const int32_t maxSize = 10240; + maxSize *= 5; + char* sql = malloc(maxSize); + + cJSON* root = cJSON_Parse(json); + if (root == NULL) { + mqttError("failed to parse msg, invalid json format"); + goto MQTT_PARSE_OVER; } - cJSON_free(jPlayload); - int sqllen = strlen(_names) + strlen(_values) + strlen(_dbname) + strlen(_tablename) + 1024; - char* _sql = calloc(1, sqllen); - sprintf(_sql, "INSERT INTO %s.%s (%s) VALUES(%s);", _dbname, _tablename, _names, _values); - return _sql; + + cJSON* timestamp = cJSON_GetObjectItem(root, "timestamp"); + if (!timestamp || timestamp->type != cJSON_Number) { + mqttError("failed to parse msg, timestamp not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* device = cJSON_GetObjectItem(root, "device"); + if (!device) { + mqttError("failed to parse msg, device not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* name = cJSON_GetObjectItem(device, "name"); + if (!name || name->type != cJSON_String) { + mqttError("failed to parse msg, name not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* model = cJSON_GetObjectItem(device, "model"); + if (!model || model->type != cJSON_String) { + mqttError("failed to parse msg, model not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* serial = cJSON_GetObjectItem(device, "serial"); + if (!serial || serial->type != cJSON_String) { + mqttError("failed to parse msg, serial not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* readings = cJSON_GetObjectItem(device, "readings"); + if (!readings || readings->type != cJSON_Array) { + mqttError("failed to parse msg, readings not found"); + goto MQTT_PARSE_OVER; + } + + int count = cJSON_GetArraySize(readings); + if (count <= 0) { + mqttError("failed to parse msg, readings size smaller than 0"); + goto MQTT_PARSE_OVER; + } + + int len = snprintf(sql, maxSize, "insert into"); + + for (int i = 0; i < count; ++i) { + cJSON* reading = cJSON_GetArrayItem(readings, i); + if (reading == NULL) continue; + + cJSON* param = cJSON_GetObjectItem(reading, "param"); + if (!param || param->type != cJSON_String) { + mqttError("failed to parse msg, param not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* value = cJSON_GetObjectItem(reading, "value"); + if (!value || value->type != cJSON_String) { + mqttError("failed to parse msg, value not found"); + goto MQTT_PARSE_OVER; + } + + cJSON* unit = cJSON_GetObjectItem(reading, "unit"); + if (!unit || unit->type != cJSON_String) { + mqttError("failed to parse msg, unit not found"); + goto MQTT_PARSE_OVER; + } + + len += snprintf(sql + len, maxSize - len, + " mqttdb.serial_%s_%s using mqttdb.devices tags('%s', '%s', '%s', '%s', '%s') values(%" PRId64 ", %s)", + serial->valuestring, param->valuestring, name->valuestring, model->valuestring, serial->valuestring, + param->valuestring, unit->valuestring, timestamp->valueint * 1000, value->valuestring); + } + + cJSON_free(root); + return sql; + +MQTT_PARSE_OVER: + cJSON_free(root); + free(sql); + return NULL; } \ No newline at end of file diff --git a/src/plugins/mqtt/src/mqttSystem.c b/src/plugins/mqtt/src/mqttSystem.c index 69810e2785..eacc3b1f74 100644 --- a/src/plugins/mqtt/src/mqttSystem.c +++ b/src/plugins/mqtt/src/mqttSystem.c @@ -14,244 +14,131 @@ */ #define _DEFAULT_SOURCE - -#include "cJSON.h" +#include "os.h" #include "mqtt.h" #include "mqttInit.h" #include "mqttLog.h" #include "mqttPayload.h" -#include "os.h" +#include "tmqtt.h" #include "posix_sockets.h" -#include "string.h" #include "taos.h" #include "tglobal.h" -#include "tmqtt.h" -#include "tsclient.h" -#include "tsocket.h" -#include "ttimer.h" -#include "mqttSystem.h" -struct mqtt_client mqttClient = {0}; -pthread_t clientDaemonThread = {0}; -void* mqttConnect=NULL; -struct reconnect_state_t recntStatus = {0}; -char* topicPath=NULL; -int mttIsRuning = 1; +#include "taoserror.h" -int32_t mqttInitSystem() { - int rc = 0; -#if 0 - uint8_t sendbuf[2048]; - uint8_t recvbuf[1024]; - recntStatus.sendbuf = sendbuf; - recntStatus.sendbufsz = sizeof(sendbuf); - recntStatus.recvbuf = recvbuf; - recntStatus.recvbufsz = sizeof(recvbuf); - char* url = tsMqttBrokerAddress; - recntStatus.user_name = strstr(url, "@") != NULL ? strbetween(url, "//", ":") : NULL; - - char * passStr = strstr(url, recntStatus.user_name); - if (passStr != NULL) { - recntStatus.password = strstr(url, "@") != NULL ? strbetween(passStr, ":", "@") : NULL; - } +struct SMqttReconnectState tsMqttStatus = {0}; +struct mqtt_client tsMqttClient = {0}; +static pthread_t tsMqttClientDaemonThread = {0}; +static void* tsMqttConnect = NULL; +static bool tsMqttIsRuning = false; - if (strlen(url) == 0) { - mqttDebug("mqtt module not init, url is null"); - return rc; - } - - if (strstr(url, "@") != NULL) { - recntStatus.hostname = strbetween(url, "@", ":"); - } else if (strstr(strstr(url, "://") + 3, ":") != NULL) { - recntStatus.hostname = strbetween(url, "//", ":"); - - } else { - recntStatus.hostname = strbetween(url, "//", "/"); - } - - char* _begin_hostname = strstr(url, recntStatus.hostname); - if (_begin_hostname != NULL && strstr(_begin_hostname, ":") != NULL) { - recntStatus.port = strbetween(_begin_hostname, ":", "/"); - } else { - recntStatus.port = strbetween("'1883'", "'", "'"); - } - - char* portStr = recntStatus.hostname; - if (_begin_hostname != NULL) { - char* colonStr = strstr(_begin_hostname, ":"); - if (colonStr != NULL) { - portStr = recntStatus.port; - } - } - - char* topicStr = strstr(url, portStr); - if (topicStr != NULL) { - topicPath = strbetween(topicStr, "/", "/"); - char* _topic = "+/+/+/"; - int _tpsize = strlen(topicPath) + strlen(_topic) + 1; - recntStatus.topic = calloc(1, _tpsize); - sprintf(recntStatus.topic, "/%s/%s", topicPath, _topic); - recntStatus.client_id = strlen(tsMqttBrokerClientId) < 3 ? tsMqttBrokerClientId : "taos_mqtt"; - mqttConnect = NULL; - } else { - topicPath = NULL; - } - -#endif - return rc; -} +int32_t mqttInitSystem() { return 0; } int32_t mqttStartSystem() { - int rc = 0; -#if 0 - if (recntStatus.user_name != NULL && recntStatus.password != NULL) { - mqttInfo("connecting to mqtt://%s:%s@%s:%s/%s/", recntStatus.user_name, recntStatus.password, - recntStatus.hostname, recntStatus.port, topicPath); - } else if (recntStatus.user_name != NULL && recntStatus.password == NULL) { - mqttInfo("connecting to mqtt://%s@%s:%s/%s/", recntStatus.user_name, recntStatus.hostname, recntStatus.port, - topicPath); + tsMqttStatus.sendbufsz = MQTT_SEND_BUF_SIZE; + tsMqttStatus.recvbufsz = MQTT_RECV_BUF_SIZE; + tsMqttStatus.sendbuf = malloc(MQTT_SEND_BUF_SIZE); + tsMqttStatus.recvbuf = malloc(MQTT_RECV_BUF_SIZE); + tsMqttIsRuning = true; + + mqtt_init_reconnect(&tsMqttClient, mqttReconnectClient, &tsMqttStatus, mqttPublishCallback); + if (pthread_create(&tsMqttClientDaemonThread, NULL, mqttClientRefresher, &tsMqttClient)) { + mqttError("mqtt failed to start daemon."); + mqttCleanupRes(EXIT_FAILURE, -1, NULL); + return -1; } - mqtt_init_reconnect(&mqttClient, mqttReconnectClient, &recntStatus, mqtt_PublishCallback); - if (pthread_create(&clientDaemonThread, NULL, mqttClientRefresher, &mqttClient)) { - mqttError("Failed to start client daemon."); - mqttCleanup(EXIT_FAILURE, -1, NULL); - rc = -1; - } else { - mqttInfo("listening for '%s' messages.", recntStatus.topic); - } -#endif - return rc; + mqttInfo("mqtt listening for topic:%s messages", tsMqttTopic); + return 0; } void mqttStopSystem() { -#if 0 - mqttClient.error = MQTT_ERROR_SOCKET_ERROR; - mttIsRuning = 0; - usleep(300000U); - mqttCleanup(EXIT_SUCCESS, mqttClient.socketfd, &clientDaemonThread); - mqttInfo("mqtt is stoped"); -#endif + if (tsMqttIsRuning) { + tsMqttIsRuning = false; + tsMqttClient.error = MQTT_ERROR_SOCKET_ERROR; + + taosMsleep(300); + mqttCleanupRes(EXIT_SUCCESS, tsMqttClient.socketfd, &tsMqttClientDaemonThread); + + mqttInfo("mqtt is stopped"); + } } void mqttCleanUpSystem() { -#if 0 - mqttInfo("starting to cleanup mqtt"); - free(recntStatus.user_name); - free(recntStatus.password); - free(recntStatus.hostname); - free(recntStatus.port); - free(recntStatus.topic); - free(topicPath); + mqttStopSystem(); mqttInfo("mqtt is cleaned up"); -#endif } -void mqtt_PublishCallback(void** unused, struct mqtt_response_publish* published) { - /* note that published->topic_name is NOT null-terminated (here we'll change it to a c-string) */ - char* topic_name = (char*)malloc(published->topic_name_size + 1); - memcpy(topic_name, published->topic_name, published->topic_name_size); - topic_name[published->topic_name_size] = '\0'; - mqttInfo("received publish('%s'): %s", topic_name, (const char*)published->application_message); - char _token[128] = {0}; - char _dbname[128] = {0}; - char _tablename[128] = {0}; - if (mqttConnect == NULL) { - mqttInfo("connect database"); - taos_connect_a(NULL, "_root", tsInternalPass, "", 0, mqttInitConnCb, &mqttClient, &mqttConnect); - } - if (topic_name[1]=='/' && strncmp((char*)&topic_name[1], topicPath, strlen(topicPath)) == 0) { - char* p_p_cmd_part[5] = {0}; - char copystr[1024] = {0}; - strncpy(copystr, topic_name, MIN(1024, published->topic_name_size)); - char part_index = split(copystr, "/", p_p_cmd_part, 10); - if (part_index < 4) { - mqttError("The topic %s is't format '/path/token/dbname/table name/'. for expmle: '/taos/token/db/t'", topic_name); - } else { - strncpy(_token, p_p_cmd_part[1], 127); - strncpy(_dbname, p_p_cmd_part[2], 127); - strncpy(_tablename, p_p_cmd_part[3], 127); - mqttInfo("part count=%d,access token:%s,database name:%s, table name:%s", part_index, _token, _dbname, - _tablename); +void mqttPublishCallback(void** unused, struct mqtt_response_publish* published) { + const char* content = published->application_message; + mqttDebug("receive mqtt message, size:%d", (int)published->application_message_size); - if (mqttConnect != NULL) { - char* _sql = converJsonToSql((char*)published->application_message, _dbname, _tablename); - mqttInfo("query:%s", _sql); - taos_query_a(mqttConnect, _sql, mqttQueryInsertCallback, &mqttClient); - mqttInfo("free sql:%s", _sql); - free(_sql); - } + if (tsMqttConnect == NULL) { + tsMqttConnect = taos_connect(NULL, "_root", tsInternalPass, "", 0); + if (tsMqttConnect == NULL) { + mqttError("failed to connect to tdengine, reason:%s", tstrerror(terrno)); + return; + } else { + mqttInfo("successfully connected to the tdengine"); } } - free(topic_name); + + mqttTrace("receive mqtt message, content:%s", content); + + char* sql = mqttConverJsonToSql((char*)content, (int)published->application_message_size); + if (sql != NULL) { + void* res = taos_query(tsMqttConnect, sql); + int code = taos_errno(res); + if (code != 0) { + mqttError("failed to exec sql, reason:%s sql:%s", tstrerror(code), sql); + } else { + mqttTrace("successfully to exec sql:%s", sql); + } + taos_free_result(res); + } else { + mqttError("failed to parse mqtt message"); + } } void* mqttClientRefresher(void* client) { - while (mttIsRuning) { + while (tsMqttIsRuning) { mqtt_sync((struct mqtt_client*)client); taosMsleep(100); } - mqttDebug("quit refresher"); + + mqttDebug("mqtt quit refresher"); return NULL; } -void mqttCleanup(int status, int sockfd, pthread_t* client_daemon) { -#if 0 +void mqttCleanupRes(int status, int sockfd, pthread_t* client_daemon) { mqttInfo("clean up mqtt module"); - if (sockfd != -1) close(sockfd); - if (client_daemon != NULL) pthread_cancel(*client_daemon); -#endif -} - -void mqttInitConnCb(void* param, TAOS_RES* result, int32_t code) { - if (code < 0) { - mqttError("mqtt:%d, connect to database failed, reason:%s", code, tstrerror(code)); - taos_close(mqttConnect); - mqttConnect = NULL; - return; + if (sockfd != -1) { + close(sockfd); } - mqttDebug("mqtt:%d, connect to database success, reason:%s", code, tstrerror(code)); -} -void mqttQueryInsertCallback(void* param, TAOS_RES* result, int32_t code) { - if (code < 0) { - mqttError("mqtt:%d, save data failed, code:%s", code, tstrerror(code)); - } else if (code == 0) { - mqttError("mqtt:%d, save data failed, affect rows:%d", code, code); - } else { - mqttInfo("mqtt:%d, save data success, code:%s", code, tstrerror(code)); + if (client_daemon != NULL) { + pthread_cancel(*client_daemon); } } -void mqttReconnectClient(struct mqtt_client* client, void** reconnect_state_vptr) { - mqttInfo("reconnect client"); - struct reconnect_state_t* reconnect_state = *((struct reconnect_state_t**)reconnect_state_vptr); +void mqttReconnectClient(struct mqtt_client* client, void** unused) { + mqttInfo("mqtt tries to connect to the mqtt server"); - /* Close the clients socket if this isn't the initial reconnect call */ if (client->error != MQTT_ERROR_INITIAL_RECONNECT) { close(client->socketfd); } - /* Perform error handling here. */ if (client->error != MQTT_ERROR_INITIAL_RECONNECT) { - mqttError("mqttReconnectClient: called while client was in error state \"%s\"", mqtt_error_str(client->error)); + mqttError("mqtt client was in error state %s", mqtt_error_str(client->error)); } - /* Open a new socket. */ - int sockfd = open_nb_socket(reconnect_state->hostname, reconnect_state->port); - if (sockfd == -1) { - mqttError("failed to open socket: "); - mqttCleanup(EXIT_FAILURE, sockfd, NULL); + int sockfd = open_nb_socket(tsMqttHostName, tsMqttPort); + if (sockfd < 0) { + mqttError("mqtt client failed to open socket %s:%s", tsMqttHostName, tsMqttPort); + //mqttCleanupRes(EXIT_FAILURE, sockfd, NULL); + return; } - /* Reinitialize the client. */ - mqtt_reinit(client, sockfd, reconnect_state->sendbuf, reconnect_state->sendbufsz, reconnect_state->recvbuf, - reconnect_state->recvbufsz); - - /* Ensure we have a clean session */ - uint8_t connect_flags = MQTT_CONNECT_CLEAN_SESSION; - /* Send connection request to the broker. */ - mqtt_connect(client, reconnect_state->client_id, NULL, NULL, 0, reconnect_state->user_name, reconnect_state->password,connect_flags, 400); - - /* Subscribe to the topic. */ - mqtt_subscribe(client, reconnect_state->topic, 0); + mqtt_reinit(client, sockfd, tsMqttStatus.sendbuf, tsMqttStatus.sendbufsz, tsMqttStatus.recvbuf, tsMqttStatus.recvbufsz); + mqtt_connect(client, tsMqttClientId, NULL, NULL, 0, tsMqttUser, tsMqttPass, MQTT_CONNECT_CLEAN_SESSION, 400); + mqtt_subscribe(client, tsMqttTopic, 0); } \ No newline at end of file diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index e2bee4285f..c1024f0080 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -8,10 +8,9 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(query ${SRC}) SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w) +TARGET_LINK_LIBRARIES(query tsdb tutil) IF (TD_LINUX) - TARGET_LINK_LIBRARIES(query tsdb tutil m rt) + TARGET_LINK_LIBRARIES(query m rt) ADD_SUBDIRECTORY(tests) -ELSEIF (TD_WINDOWS) - TARGET_LINK_LIBRARIES(query tsdb tutil) ENDIF () diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 3aecd127ef..f59bf62ec5 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -709,21 +709,21 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) { } if (terrno != 0) { - taosFreeId(pRpc->idPool, sid); // sid shall be released + taosFreeId(pRpc->idPool, sid); // sid shall be released pConn = NULL; } } - } + } if (pConn) { if (pRecv->connType == RPC_CONN_UDPS && pRpc->numOfThreads > 1) { // UDP server, assign to new connection - pRpc->index = (pRpc->index+1) % pRpc->numOfThreads; + pRpc->index = (pRpc->index + 1) % pRpc->numOfThreads; pConn->localPort = (pRpc->localPort + pRpc->index); } - + taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES); - tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid); + tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid, hashstr); } return pConn; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index b92ef4cf26..da40160bc1 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -797,9 +797,11 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { } taosTmrStopA(&pPeer->timer); - if (tsSyncNum >= tsMaxSyncNum) { + + // Ensure the sync of mnode not interrupted + if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) { sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); - taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId*10)%200, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, syncTmrCtrl, &pPeer->timer); return; } diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index cef1d0bba7..d86b104f23 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -3,13 +3,10 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) +ADD_LIBRARY(tsdb ${SRC}) +TARGET_LINK_LIBRARIES(tsdb common tutil) IF (TD_LINUX) - ADD_LIBRARY(tsdb ${SRC}) - TARGET_LINK_LIBRARIES(tsdb common tutil) # Someone has no gtest directory, so comment it # ADD_SUBDIRECTORY(tests) -ELSEIF (TD_WINDOWS) - ADD_LIBRARY(tsdb ${SRC}) - TARGET_LINK_LIBRARIES(tsdb common tutil) ENDIF () diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 51c3a625e8..626ad77da2 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -26,11 +26,13 @@ const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; -static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); -static void tsdbDestroyFile(SFile *pFile); -static int compFGroup(const void *arg1, const void *arg2); -static int keyFGroupCompFunc(const void *key, const void *fgroup); -static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); +static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); +static void tsdbDestroyFile(SFile *pFile); +static int compFGroup(const void *arg1, const void *arg2); +static int keyFGroupCompFunc(const void *key, const void *fgroup); +static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); +static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep); +static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days); // ---------------- INTERNAL FUNCTIONS ---------------- STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) { @@ -79,9 +81,11 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { int vid = 0; regex_t regex1, regex2; int code = 0; + char fname[TSDB_FILENAME_LEN] = "\0"; SFileGroup fileGroup = {0}; STsdbFileH *pFileH = pRepo->tsdbFileH; + STsdbCfg * pCfg = &(pRepo->config); tDataDir = tsdbGetDataDirName(pRepo->rootDir); if (tDataDir == NULL) { @@ -108,6 +112,8 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { goto _err; } + int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); + struct dirent *dp = NULL; while ((dp = readdir(dir)) != NULL) { if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; @@ -120,6 +126,14 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { continue; } + if (fid < mfid) { + for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { + tsdbGetDataFileName(pRepo->rootDir, pCfg->tsdbId, fid, type, fname); + (void)remove(fname); + } + continue; + } + if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue; memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); fileGroup.fileId = fid; @@ -128,12 +142,30 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { } else if (code == REG_NOMATCH) { code = regexec(®ex2, dp->d_name, 0, NULL, 0); if (code == 0) { - tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name); - char *fname = malloc(strlen(tDataDir) + strlen(dp->d_name) + 2); - if (fname == NULL) goto _err; - sprintf(fname, "%s/%s", tDataDir, dp->d_name); - (void)remove(fname); - free(fname); + size_t tsize = strlen(tDataDir) + strlen(dp->d_name) + 2; + char * fname1 = malloc(tsize); + if (fname1 == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + sprintf(fname1, "%s/%s", tDataDir, dp->d_name); + + tsize = tsize + 64; + char *fname2 = malloc(tsize); + if (fname2 == NULL) { + free(fname1); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + sprintf(fname2, "%s/%s_back_%" PRId64, tDataDir, dp->d_name, taosGetTimestamp(TSDB_TIME_PRECISION_MILLI)); + + (void)rename(fname1, fname2); + + tsdbDebug("vgId:%d file %s exists, backup it as %s", REPO_ID(pRepo), fname1, fname2); + + free(fname1); + free(fname2); + continue; } else if (code == REG_NOMATCH) { tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); continue; @@ -146,6 +178,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { pFileH->pFGroup[pFileH->nFGroups++] = fileGroup; qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); + tsdbDebug("vgId:%d file group %d is restored, nFGroups %d", REPO_ID(pRepo), fileGroup.fileId, pFileH->nFGroups); } regfree(®ex1); @@ -179,8 +212,18 @@ void tsdbCloseFileH(STsdbRepo *pRepo) { SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) { STsdbFileH *pFileH = pRepo->tsdbFileH; + STsdbCfg * pCfg = &(pRepo->config); - if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL; + if (pFileH->nFGroups >= pFileH->maxFGroups) { + int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); + if (pFileH->pFGroup[0].fileId < mfid) { + pthread_rwlock_wrlock(&pFileH->fhlock); + tsdbRemoveFileGroup(pRepo, &(pFileH->pFGroup[0])); + pthread_rwlock_unlock(&pFileH->fhlock); + } + } + + ASSERT(pFileH->nFGroups < pFileH->maxFGroups); SFileGroup fGroup; SFileGroup *pFGroup = &fGroup; @@ -342,8 +385,7 @@ void tsdbFitRetention(STsdbRepo *pRepo) { STsdbFileH *pFileH = pRepo->tsdbFileH; SFileGroup *pGroup = pFileH->pFGroup; - int mfid = (int)(TSDB_KEY_FILEID(taosGetTimestamp(pCfg->precision), pCfg->daysPerFile, pCfg->precision) - - TSDB_MAX_FILE(pCfg->keep, pCfg->daysPerFile)); + int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); pthread_rwlock_wrlock(&(pFileH->fhlock)); @@ -547,3 +589,11 @@ static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) { } } } + +static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep) { + return (TSKEY)(taosGetTimestamp(precision) - keep * tsMsPerDay[precision]); +} + +static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) { + return (int)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision)); +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index db24eae148..bb0f00ef53 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -767,7 +767,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa int32_t lsize = tsize; int32_t keyLen = 0; for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) { - if (tcol >= nColsNotAllNull) break; + if (ncol != 0 && tcol >= nColsNotAllNull) break; SDataCol *pDataCol = pDataCols->cols + ncol; SCompCol *pCompCol = pCompData->cols + tcol; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 897bbe3180..96096db5cd 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1186,7 +1186,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl // the time window should always be ascending order: skey <= ekey cur->win = (STimeWindow) {.skey = tsArray[start], .ekey = tsArray[end]}; - cur->mixBlock = (start > 0 && end < pBlockInfo->rows - 1); + cur->mixBlock = (numOfRows != pBlockInfo->rows); cur->lastKey = tsArray[endPos] + step; cur->blockCompleted = true; @@ -1731,12 +1731,13 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) { assert(pQueryHandle->window.skey == pQueryHandle->window.ekey); // starts from the buffer in case of descending timestamp order check data blocks - // todo consider the query time window, current last_row does not apply the query time window size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); int32_t i = 0; while(i < numOfTables) { STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); + + // the first qualified table for interpolation query if (pQueryHandle->window.skey <= pCheckInfo->pTableObj->lastKey && pCheckInfo->pTableObj->lastKey != TSKEY_INITIAL_VAL) { break; diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index b2778b69d9..e63a085cc8 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -3,9 +3,10 @@ PROJECT(TDengine) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) +TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4) IF (TD_LINUX) - TARGET_LINK_LIBRARIES(tutil pthread osdetail m rt lz4) + TARGET_LINK_LIBRARIES(tutil m rt) ADD_SUBDIRECTORY(tests) FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) @@ -24,7 +25,7 @@ IF (TD_LINUX) ENDIF () ELSEIF (TD_WINDOWS) - TARGET_LINK_LIBRARIES(tutil iconv regex pthread osdetail winmm IPHLPAPI ws2_32 lz4 wepoll) + TARGET_LINK_LIBRARIES(tutil iconv regex winmm IPHLPAPI ws2_32 wepoll) ELSEIF(TD_DARWIN) - TARGET_LINK_LIBRARIES(tutil iconv pthread osdetail lz4) + TARGET_LINK_LIBRARIES(tutil iconv) ENDIF() diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 875c597008..0ec55841a0 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -270,7 +270,7 @@ void taosReadGlobalLogCfg() { } wordfree(&full_path); - taosReadLogOption("tsLogDir", tsLogDir); + taosReadLogOption("logDir", tsLogDir); sprintf(fileName, "%s/taos.cfg", configDir); fp = fopen(fileName, "r"); @@ -288,9 +288,9 @@ void taosReadGlobalLogCfg() { option = value = NULL; olen = vlen = 0; - taosGetline(&line, &len, fp); + taosGetline(&line, &len, fp); line[len - 1] = 0; - + paGetToken(line, &option, &olen); if (olen == 0) continue; option[olen] = 0; diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 766301914a..a8587de767 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -62,6 +62,7 @@ typedef struct { pthread_mutex_t logMutex; } SLogObj; +int32_t tsLogKeepDays = 0; int32_t tsAsyncLog = 1; float tsTotalLogDirGB = 0; float tsAvailLogDirGB = 0; @@ -78,6 +79,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen static SLogBuff *taosLogBuffNew(int32_t bufSize); static void taosCloseLogByFd(int32_t oldFd); static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum); +extern void taosPrintGlobalCfg(); static int32_t taosStartLog() { pthread_attr_t threadAttr; @@ -136,11 +138,24 @@ static void taosUnLockFile(int32_t fd) { } } +static void taosKeepOldLog(char *oldName) { + if (tsLogKeepDays <= 0) return; + + int64_t fileSec = taosGetTimestampSec(); + char fileName[LOG_FILE_NAME_LEN + 20]; + snprintf(fileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64, tsLogObj.logName, fileSec); + + taosRename(oldName, fileName); + taosRemoveOldLogFiles(tsLogDir, tsLogKeepDays); +} + static void *taosThreadToOpenNewFile(void *param) { - char name[LOG_FILE_NAME_LEN + 20]; + char keepName[LOG_FILE_NAME_LEN + 20]; + sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag); tsLogObj.flag ^= 1; tsLogObj.lines = 0; + char name[LOG_FILE_NAME_LEN + 20]; sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag); umask(0); @@ -150,6 +165,7 @@ static void *taosThreadToOpenNewFile(void *param) { uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno)); return NULL; } + taosLockFile(fd); (void)lseek(fd, 0, SEEK_SET); @@ -157,9 +173,13 @@ static void *taosThreadToOpenNewFile(void *param) { tsLogObj.logHandle->fd = fd; tsLogObj.lines = 0; tsLogObj.openInProgress = 0; - uInfo("new log file is opened!!!"); - taosCloseLogByFd(oldFd); + + uInfo(" new log file:%d is opened", tsLogObj.flag); + uInfo("=================================="); + taosPrintGlobalCfg(); + taosKeepOldLog(keepName); + return NULL; } @@ -264,20 +284,23 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcat(name, ".0"); } + if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { + strcpy(name, fn); + strcat(name, ".1"); + } + + bool log0Exist = stat(name, &logstat0) >= 0; + bool log1Exist = stat(name, &logstat1) >= 0; + // if none of the log files exist, open 0, if both exists, open the old one - if (stat(name, &logstat0) < 0) { + if (!log0Exist && !log1Exist) { tsLogObj.flag = 0; + } else if (!log1Exist) { + tsLogObj.flag = 0; + } else if (!log0Exist) { + tsLogObj.flag = 1; } else { - if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { - strcpy(name, fn); - strcat(name, ".1"); - } - - if (stat(name, &logstat1) < 0) { - tsLogObj.flag = 1; - } else { - tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1; - } + tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1; } char fileName[LOG_FILE_NAME_LEN + 50] = "\0"; diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 5a1430baed..3793f3d3a9 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -73,7 +73,7 @@ static void *bindUdpPort(void *sarg) { continue; } if (iDataNum > 0) { - printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum); + printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); //printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16); sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size); @@ -138,7 +138,7 @@ static void *bindTcpPort(void *sarg) { if (errno == EINTR) { continue; } else { - printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", inet_ntoa(clientAddr.sin_addr), port, strerror(errno)); + printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", taosInetNtoa(clientAddr.sin_addr), port, strerror(errno)); taosCloseSocket(serverSocket); return NULL; } @@ -149,7 +149,7 @@ static void *bindTcpPort(void *sarg) { } } - printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum); + printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); if (iDataNum > 0) { send(client, buffer, iDataNum, 0); } @@ -197,7 +197,7 @@ static int checkTcpPort(info_s *info) { struct in_addr ipStr; memcpy(&ipStr, &info->hostIp, 4); - sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port); + sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", taosInetNtoa(ipStr), info->port); sprintf(sendbuf + info->pktLen - 16, "1122334455667788"); send(clientSocket, sendbuf, info->pktLen, 0); @@ -267,7 +267,7 @@ static int checkUdpPort(info_s *info) { struct in_addr ipStr; memcpy(&ipStr, &info->hostIp, 4); - sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port); + sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", taosInetNtoa(ipStr), info->port); sprintf(sendbuf + info->pktLen - 16, "1122334455667788"); socklen_t sin_size = sizeof(*(struct sockaddr *)&serverAddr); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index c294e86839..4a2d9859b9 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -47,6 +47,15 @@ static void vnodeNotifyRole(void *ahandle, int8_t role); static void vnodeCtrlFlow(void *handle, int32_t mseconds); static int vnodeNotifyFileSynced(void *ahandle, uint64_t fversion); +#ifndef _SYNC +tsync_h syncStart(const SSyncInfo *info) { return NULL; } +int32_t syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype) { return 0; } +void syncStop(tsync_h shandle) {} +int32_t syncReconfig(tsync_h shandle, const SSyncCfg * cfg) { return 0; } +int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; } +void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {} +#endif + int32_t vnodeInitResources() { vnodeInitWriteFp(); vnodeInitReadFp(); @@ -289,12 +298,16 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { syncInfo.notifyFileSynced = vnodeNotifyFileSynced; pVnode->sync = syncStart(&syncInfo); +#ifndef _SYNC + pVnode->role = TAOS_SYNC_ROLE_MASTER; +#else if (pVnode->sync == NULL) { vError("vgId:%d, failed to open sync module, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); vnodeCleanUp(pVnode); return terrno; } +#endif pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId); if (pVnode->qMgmt == NULL) { @@ -369,7 +382,13 @@ void vnodeRelease(void *pVnodeRaw) { char newDir[TSDB_FILENAME_LEN] = {0}; sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId); sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId); - taosRename(rootDir, newDir); + + if (0 == tsEnableVnodeBak) { + vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId); + } else { + taosRename(rootDir, newDir); + } + taosRemoveDir(rootDir); dnodeSendStatusMsgToMnode(); } @@ -658,9 +677,13 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) { len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum); len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); + + vInfo("vgId:%d, save vnode cfg, replica:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.replications); for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId); len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp); + vInfo("vgId:%d, save vnode cfg, nodeId:%d nodeEp:%s", pVnodeCfg->cfg.vgId, pVnodeCfg->nodes[i].nodeId, + pVnodeCfg->nodes[i].nodeEp); if (i < pVnodeCfg->cfg.replications - 1) { len += snprintf(content + len, maxLen - len, " },{\n"); diff --git a/src/wal/CMakeLists.txt b/src/wal/CMakeLists.txt index e7d5ae1510..359e09287a 100644 --- a/src/wal/CMakeLists.txt +++ b/src/wal/CMakeLists.txt @@ -7,6 +7,5 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC) IF (TD_LINUX) ADD_LIBRARY(twal ${SRC}) TARGET_LINK_LIBRARIES(twal tutil common) - ADD_SUBDIRECTORY(test) ENDIF () diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 50313a0a0c..f0234f2bd7 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.2 + 2.0.4 diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 5f31d36d6e..881ea0d6bf 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.2 + 2.0.4 @@ -76,6 +76,24 @@ + + + src/main/resources + + **/*.properties + **/*.xml + + true + + + src/main/java + + **/*.properties + **/*.xml + + + + org.springframework.boot diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/RainStationController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/RainStationController.java new file mode 100644 index 0000000000..844ac21bb8 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/RainStationController.java @@ -0,0 +1,28 @@ +package com.taosdata.jdbc.springbootdemo.controller; + + +import com.taosdata.jdbc.springbootdemo.domain.Rainfall; +import com.taosdata.jdbc.springbootdemo.service.RainStationService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.*; + +@RestController +@RequestMapping("/rainstation") +public class RainStationController { + + @Autowired + private RainStationService service; + + @GetMapping("/init") + public boolean init() { + service.init(); + service.createTable(); + return true; + } + + @PostMapping("/insert") + public int insert(@RequestBody Rainfall rainfall){ + return service.insert(rainfall); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java index 9123abd97b..56a58fbb4d 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java @@ -16,43 +16,47 @@ public class WeatherController { /** * create database and table + * * @return */ @GetMapping("/init") - public boolean init(){ + public boolean init() { return weatherService.init(); } /** * Pagination Query + * * @param limit * @param offset * @return */ @GetMapping("/{limit}/{offset}") - public List queryWeather(@PathVariable Long limit, @PathVariable Long offset){ + public List queryWeather(@PathVariable Long limit, @PathVariable Long offset) { return weatherService.query(limit, offset); } /** * upload single weather info + * * @param temperature * @param humidity * @return */ @PostMapping("/{temperature}/{humidity}") - public int saveWeather(@PathVariable int temperature, @PathVariable float humidity){ + public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) { return weatherService.save(temperature, humidity); } /** * upload multi weather info + * * @param weatherList * @return */ @PostMapping("/batch") - public int batchSaveWeather(@RequestBody List weatherList){ + public int batchSaveWeather(@RequestBody List weatherList) { return weatherService.save(weatherList); } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.java new file mode 100644 index 0000000000..a9266acb30 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.java @@ -0,0 +1,15 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import java.util.Map; + +public interface DatabaseMapper { + + int createDatabase(String dbname); + + int dropDatabase(String dbname); + + int creatDatabaseWithParameters(Map map); + + int useDatabase(String dbname); + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.xml new file mode 100644 index 0000000000..329f75b582 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/DatabaseMapper.xml @@ -0,0 +1,44 @@ + + + + + + + create database if not exists ${dbname} + + + + DROP database if exists ${dbname} + + + + + CREATE database if not EXISTS ${dbname} + + KEEP ${keep} + + + DAYS ${days} + + + REPLICA ${replica} + + + cache ${cache} + + + blocks ${blocks} + + + minrows ${minrows} + + + maxrows ${maxrows} + + + + + use ${dbname} + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.java new file mode 100644 index 0000000000..f0efbf40ba --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.java @@ -0,0 +1,9 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import java.util.Map; + +public interface RainfallMapper { + + + int save(Map map); +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.xml new file mode 100644 index 0000000000..319b4f3974 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/RainfallMapper.xml @@ -0,0 +1,11 @@ + + + + + + + INSERT INTO ${table} using ${dbname}.${stable} tags(#{values.station_code}, #{values.station_name}) (ts, name, code, rainfall) values (#{values.ts}, #{values.name}, #{values.code}, #{values.rainfall}) + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.java new file mode 100644 index 0000000000..7601bf974c --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.java @@ -0,0 +1,8 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import com.taosdata.jdbc.springbootdemo.domain.TableMetadata; + +public interface TableMapper { + + boolean createSTable(TableMetadata tableMetadata); +} \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.xml new file mode 100644 index 0000000000..5a272eadb4 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/TableMapper.xml @@ -0,0 +1,21 @@ + + + + + + + create table if not exists ${dbname}.${tablename} + + ${field.name} ${field.type} + + TAGS + + ${tag.name} ${tag.type} + + + + + drop ${tablename} + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.xml similarity index 100% rename from tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml rename to tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.xml diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/FieldMetadata.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/FieldMetadata.java new file mode 100644 index 0000000000..619b5a303d --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/FieldMetadata.java @@ -0,0 +1,28 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +public class FieldMetadata { + + private String name; + private String type; + + public FieldMetadata(String name, String type) { + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Rainfall.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Rainfall.java new file mode 100644 index 0000000000..93e199d7e6 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Rainfall.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +import com.fasterxml.jackson.annotation.JsonFormat; + +import java.sql.Timestamp; + +public class Rainfall { + + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8") + private Timestamp ts; + private String name; + private String code; + private float rainfall; + private String station_code; + private String station_name; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getCode() { + return code; + } + + public void setCode(String code) { + this.code = code; + } + + public float getRainfall() { + return rainfall; + } + + public void setRainfall(float rainfall) { + this.rainfall = rainfall; + } + + public String getStation_code() { + return station_code; + } + + public void setStation_code(String station_code) { + this.station_code = station_code; + } + + public String getStation_name() { + return station_name; + } + + public void setStation_name(String station_name) { + this.station_name = station_name; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TableMetadata.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TableMetadata.java new file mode 100644 index 0000000000..74bb434f08 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TableMetadata.java @@ -0,0 +1,43 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +import java.util.List; + +public class TableMetadata { + + private String dbname; + private String tablename; + private List fields; + private List tags; + + public String getDbname() { + return dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public String getTablename() { + return tablename; + } + + public void setTablename(String tablename) { + this.tablename = tablename; + } + + public List getFields() { + return fields; + } + + public void setFields(List fields) { + this.fields = fields; + } + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TagMetadata.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TagMetadata.java new file mode 100644 index 0000000000..755ecc0075 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/TagMetadata.java @@ -0,0 +1,27 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +public class TagMetadata { + private String name; + private String type; + + public TagMetadata(String name, String type) { + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java index 9547a8a89b..cd7de447ea 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java @@ -1,9 +1,12 @@ package com.taosdata.jdbc.springbootdemo.domain; +import com.fasterxml.jackson.annotation.JsonFormat; + import java.sql.Timestamp; public class Weather { + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8") private Timestamp ts; private int temperature; diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/RainStationService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/RainStationService.java new file mode 100644 index 0000000000..3ea63c1760 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/RainStationService.java @@ -0,0 +1,72 @@ +package com.taosdata.jdbc.springbootdemo.service; + +import com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper; +import com.taosdata.jdbc.springbootdemo.dao.RainfallMapper; +import com.taosdata.jdbc.springbootdemo.dao.TableMapper; +import com.taosdata.jdbc.springbootdemo.domain.FieldMetadata; +import com.taosdata.jdbc.springbootdemo.domain.Rainfall; +import com.taosdata.jdbc.springbootdemo.domain.TableMetadata; +import com.taosdata.jdbc.springbootdemo.domain.TagMetadata; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Service +public class RainStationService { + + @Autowired + private DatabaseMapper databaseMapper; + @Autowired + private TableMapper tableMapper; + @Autowired + private RainfallMapper rainfallMapper; + + public boolean init() { + databaseMapper.dropDatabase("rainstation"); + + Map map = new HashMap<>(); + map.put("dbname", "rainstation"); + map.put("keep", "36500"); + map.put("days", "30"); + map.put("blocks", "4"); + databaseMapper.creatDatabaseWithParameters(map); + + databaseMapper.useDatabase("rainstation"); + return true; + } + + public boolean createTable() { + TableMetadata tableMetadata = new TableMetadata(); + tableMetadata.setDbname("rainstation"); + tableMetadata.setTablename("monitoring"); + + List fields = new ArrayList<>(); + fields.add(new FieldMetadata("ts", "timestamp")); + fields.add(new FieldMetadata("name", "NCHAR(10)")); + fields.add(new FieldMetadata("code", " BINARY(8)")); + fields.add(new FieldMetadata("rainfall", "float")); + tableMetadata.setFields(fields); + + List tags = new ArrayList<>(); + tags.add(new TagMetadata("station_code", "BINARY(8)")); + tags.add(new TagMetadata("station_name", "NCHAR(10)")); + tableMetadata.setTags(tags); + + tableMapper.createSTable(tableMetadata); + return true; + } + + + public int insert(Rainfall rainfall) { + Map map = new HashMap<>(); + map.put("dbname", "rainstation"); + map.put("table", "S_53646"); + map.put("stable", "monitoring"); + map.put("values", rainfall); + return rainfallMapper.save(map); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java index 396d70bf92..b950a9a4e4 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java @@ -14,10 +14,8 @@ public class WeatherService { private WeatherMapper weatherMapper; public boolean init() { - weatherMapper.createDB(); weatherMapper.createTable(); - return true; } diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties index 926559a90c..683884fcdd 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -1,6 +1,6 @@ # datasource config spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.url=jdbc:TAOS://localhost:6030/log spring.datasource.username=root spring.datasource.password=taosdata diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index f6be6aae49..df1a9f595b 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -49,4 +49,4 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR # Now we are all let, and let's see if we can find a crash. Note we pass all params -python3 ./crash_gen.py $@ +python3.8 ./crash_gen.py $@ diff --git a/tests/pytest/insert/tagSepcified.py b/tests/pytest/insert/tagSepcified.py new file mode 100644 index 0000000000..213cf119a0 --- /dev/null +++ b/tests/pytest/insert/tagSepcified.py @@ -0,0 +1,49 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table xcxlog (ts timestamp, user_id int, path BINARY(40),scene int) tags(appid bigint, adzone_id int,ip bigint,session_id bigint)') + tdSql.error("insert into d1000004(user_id,path,scene,ts) using xcxlog tags(1000004,145,97160) values (97160,'pagex/goods/taoke',1086,now)") + tdSql.execute("insert into d1000004_145(user_id,path,scene,ts) using xcxlog(appid,adzone_id,session_id,ip) tags(1000004,145,97160,1717171445) values (97160,'pagex/goods/taoke',1086,now)") + + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'd1000004_145') + + tdSql.query("select * from xcxlog") + tdSql.checkRows(1) + tdSql.checkData(0, 4, 1000004) + tdSql.checkData(0, 5, 145) + tdSql.checkData(0, 6, 1717171445) + tdSql.checkData(0, 7, 97160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryGroupbySort.py b/tests/pytest/query/queryGroupbySort.py new file mode 100644 index 0000000000..28ef897f42 --- /dev/null +++ b/tests/pytest/query/queryGroupbySort.py @@ -0,0 +1,50 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute( + "create table stb(ts timestamp,i int) tags (p_id nchar(20));") + tdSql.execute( + "insert into tb using stb tags('11231') values (%d, %d) (%d, %d) (%d, %d) (%d, %d)" + % (self.ts, 12, self.ts + 1, 15, self.ts + 2, 15, self.ts + 3, 12)) + + tdSql.query(''' select last(ts) p_time,i from stb where p_id='11231' and ts>=%d and ts <=%d + group by i order by time desc limit 100 ''' % (self.ts, self.ts + 4)) + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.003000") + tdSql.checkData(1, 0, "2018-09-17 09:00:00.002000") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 1e1d02959f..627d712474 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -122,8 +122,8 @@ class TDSql: return self.cursor.istype(col, dataType) def checkData(self, row, col, data): - self.checkRowCol(row, col) - if self.queryResult[row][col] != data: + self.checkRowCol(row, col) + if str(self.queryResult[row][col]) != str(data): if isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001: tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % (self.sql, row, col, self.queryResult[row][col], data)) diff --git a/tests/script/general/compute/count.sim b/tests/script/general/compute/count.sim index aef30c8b6c..c2ed5af3ca 100644 --- a/tests/script/general/compute/count.sim +++ b/tests/script/general/compute/count.sim @@ -52,6 +52,12 @@ if $data00 != $rowNum then return -1 endi +sql select count(1) from $tb +print ===> select count(1) from $tb => $data00 +if $data00 != $rowNum then + return -1 +endi + sql select count(tbcol) from $tb print ===> $data00 if $data00 != $rowNum then @@ -102,13 +108,20 @@ if $data00 != $totalNum then return -1 endi +print =============== step8 +sql select count(1) from $mt +print ===> $data00 +if $data00 != $totalNum then + return -1 +endi + sql select count(tbcol) from $mt print ===> $data00 if $data00 != $totalNum then return -1 endi -print =============== step8 +print =============== step10 sql select count(tbcol) as c from $mt where ts < now + 4m print ===> $data00 if $data00 != 50 then @@ -171,4 +184,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/connection/mqtt.sim b/tests/script/general/connection/mqtt.sim new file mode 100644 index 0000000000..4b291f91ea --- /dev/null +++ b/tests/script/general/connection/mqtt.sim @@ -0,0 +1,19 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100000 +system sh/cfg.sh -n dnode1 -c http -v 1 +system sh/cfg.sh -n dnode1 -c mqtt -v 1 + +system sh/exec.sh -n dnode1 -s start + +sleep 3000 +sql connect +sql create database mqttdb; +sql create table mqttdb.devices(ts timestamp, value double) tags(name binary(32), model binary(32), serial binary(16), param binary(16), unit binary(16)); + +sleep 1000 +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/db/alter_option.sim b/tests/script/general/db/alter_option.sim index 12a49c59b5..49c75966ca 100644 --- a/tests/script/general/db/alter_option.sim +++ b/tests/script/general/db/alter_option.sim @@ -7,11 +7,10 @@ system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect -print ============================ dnode1 start +print ============= create database sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 sql show databases -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 if $data00 != db then return -1 endi @@ -37,27 +36,240 @@ if $data09 != 4 then return -1 endi -print =============== step2 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -return -sql_error alter database db cache 256 -sql_error alter database db blocks 1 -sql_error alter database db days 10 -sql_error alter database db keep 10 -sql_error alter database db minRows 350 -sql_error alter database db minRows 550 -sql_error alter database db ctime 5000 -sql_error alter database db precision "us" -sql_error alter database db comp 3 -sql_error alter database db wal 1 +print ============== step name +sql_error alter database db name d1 +sql_error alter database db name d2 + +print ============== step ntables +sql_error alter database db ntables -1 +sql_error alter database db ntables 0 +sql_error alter database db ntables 1 +sql_error alter database db ntables 10 + +print ============== step vgroups +sql_error alter database db vgroups -1 +sql_error alter database db vgroups 0 +sql_error alter database db vgroups 1 +sql_error alter database db vgroups 10 + +print ============== step replica sql_error alter database db replica 2 +sql_error alter database db replica 3 +sql_error alter database db replica 0 +sql alter database db replica 1 +sql show databases +print replica $data4_db +if $data4_db != 1 then + return -1 +endi + +print ============== step quorum +sql show databases +print quorum $data5_db +if $data5_db != 1 then + return -1 +endi + +sql alter database db quorum 1 +sql show databases +print quorum $data5_db +if $data5_db != 1 then + return -1 +endi + +sql alter database db quorum 2 +sql show databases +print quorum $data5_db +if $data5_db != 2 then + return -1 +endi + +sql alter database db quorum 3 +sql show databases +print quorum $data5_db +if $data5_db != 3 then + return -1 +endi + +sql alter database db quorum 3 +sql alter database db quorum 2 +sql alter database db quorum 1 +sql_error alter database db quorum 0 +sql_error alter database db quorum 4 +sql_error alter database db quorum 5 +sql_error alter database db quorum -1 + +print ============== step days +sql_error alter database db days 0 +sql_error alter database db days 1 +sql_error alter database db days 2 +sql_error alter database db days 10 +sql_error alter database db days 50 +sql_error alter database db days 100 + +print ============== step keep +sql show databases +print keep $data7_db +if $data7_db != 20,20,20 then + return -1 +endi + +sql alter database db keep 10 +sql show databases +print keep $data7_db +if $data7_db != 20,20,10 then + return -1 +endi + +sql alter database db keep 20 +sql show databases +print keep $data7_db +if $data7_db != 20,20,20 then + return -1 +endi -print ============== step3 -sql alter database db comp 1 -sql alter database db blocks 40 sql alter database db keep 30 +sql show databases +print keep $data7_db +if $data7_db != 20,20,30 then + return -1 +endi + +sql alter database db keep 40 +sql alter database db keep 30 +sql alter database db keep 20 +sql alter database db keep 10 +sql_error alter database db keep 9 +sql_error alter database db keep 1 +sql alter database db keep 0 +sql alter database db keep -1 +sql_error alter database db keep 365001 + +print ============== step cache +sql_error alter database db cache 60 +sql_error alter database db cache 50 +sql_error alter database db cache 20 +sql_error alter database db cache 3 +sql_error alter database db cache 129 +sql_error alter database db cache 300 +sql_error alter database db cache 0 +sql_error alter database db cache -1 + +print ============== step blocks +sql show databases +print blocks $data9_db +if $data9_db != 4 then + return -1 +endi + +sql alter database db blocks 10 +sql show databases +print blocks $data9_db +if $data9_db != 10 then + return -1 +endi + +sql alter database db blocks 20 +sql show databases +print blocks $data9_db +if $data9_db != 20 then + return -1 +endi + +sql alter database db blocks 30 +sql show databases +print blocks $data9_db +if $data9_db != 30 then + return -1 +endi + +sql alter database db blocks 40 +sql alter database db blocks 30 +sql alter database db blocks 20 +sql alter database db blocks 10 +sql_error alter database db blocks 2 +sql_error alter database db blocks 1 +sql alter database db blocks 0 +sql_error alter database db blocks -1 +sql_error alter database db blocks 10001 + +print ============== step minrows +sql_error alter database db minrows 1 +sql_error alter database db minrows 100 +sql_error alter database db minrows 1000 + +print ============== step maxrows +sql_error alter database db maxrows 1 +sql_error alter database db maxrows 100 +sql_error alter database db maxrows 1000 + +print ============== step wallevel +sql show databases +print wallevel $data12_db +if $data12_db != 1 then + return -1 +endi + +sql alter database db wal 1 +sql show databases +print wal $data12_db +if $data12_db != 1 then + return -1 +endi + +sql_error alter database db wal 2 +sql_error alter database db wal 0 +sql_error alter database db wal 3 +sql_error alter database db wal 4 +sql_error alter database db wal -1 +sql_error alter database db wal 1000 + +print ============== step fsync +sql_error alter database db fsync 2 +sql_error alter database db fsync 3 +sql_error alter database db fsync 4 +sql_error alter database db fsync -1 +sql_error alter database db fsync 1000 + +print ============== step comp +sql show databases +print comp $data14_db +if $data14_db != 2 then + return -1 +endi + +sql alter database db comp 1 +sql show databases +print comp $data14_db +if $data14_db != 1 then + return -1 +endi + +sql alter database db comp 2 +sql show databases +print comp $data14_db +if $data14_db != 2 then + return -1 +endi + +sql alter database db comp 0 +sql show databases +print comp $data14_db +if $data14_db != 0 then + return -1 +endi + +sql_error alter database db comp 3 +sql_error alter database db comp 4 +sql_error alter database db comp 5 +sql_error alter database db comp -1 +print ============== step precision +sql_error alter database db prec 'us' -#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +print ============== step status +sql_error alter database db status 'delete' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/db/delete_reusevnode.sim b/tests/script/general/db/delete_reusevnode.sim index 79783d6dda..5cfe7729ed 100644 --- a/tests/script/general/db/delete_reusevnode.sim +++ b/tests/script/general/db/delete_reusevnode.sim @@ -50,7 +50,7 @@ $tbPrefix = t $i = 0 while $i < 10 $db = db . $i - sql create database $db maxTables 4 + sql create database $db sql use $db sql create table st (ts timestamp, i int) tags(j int); diff --git a/tests/script/general/db/delete_reusevnode2.sim b/tests/script/general/db/delete_reusevnode2.sim index e54266e312..9fa1969425 100644 --- a/tests/script/general/db/delete_reusevnode2.sim +++ b/tests/script/general/db/delete_reusevnode2.sim @@ -8,7 +8,7 @@ sql connect print ======== step1 -sql create database db maxTables 4; +sql create database db; sql use db $tbPrefix = t diff --git a/tests/script/general/db/dropdnodes.sim b/tests/script/general/db/dropdnodes.sim index be160910c5..884a88490e 100644 --- a/tests/script/general/db/dropdnodes.sim +++ b/tests/script/general/db/dropdnodes.sim @@ -15,7 +15,7 @@ sql connect sql create dnode $hostname2 sleep 2000 -sql create database db maxTables 4 +sql create database db sql use db print ========== step1 diff --git a/tests/script/general/insert/insert_drop.sim b/tests/script/general/insert/insert_drop.sim index cf29656067..80c16ff8e4 100644 --- a/tests/script/general/insert/insert_drop.sim +++ b/tests/script/general/insert/insert_drop.sim @@ -19,7 +19,7 @@ $stb = stb sql drop database $db -x step1 step1: -sql create database $db maxtables 10 ctime 30 +sql create database $db ctime 30 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int) tags(t1 int) diff --git a/tests/script/general/parser/commit.sim b/tests/script/general/parser/commit.sim index 31f457cfae..4d85806b69 100644 --- a/tests/script/general/parser/commit.sim +++ b/tests/script/general/parser/commit.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxTablesperVnode -v 100 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -23,7 +24,7 @@ $stb = $stbPrefix . $i sql drop database $db -x step1 step1: -sql create database $db maxrows 255 maxtables 100 ctime 3600 +sql create database $db maxrows 255 ctime 3600 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/first_last.sim b/tests/script/general/parser/first_last.sim index fa2d7675d2..46431b0848 100644 --- a/tests/script/general/parser/first_last.sim +++ b/tests/script/general/parser/first_last.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxTablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -22,7 +23,7 @@ $stb = $stbPrefix . $i sql drop database $db -x step1 step1: -sql create database $db maxrows 400 cache 1 maxTables 4 +sql create database $db maxrows 400 cache 1 sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index 70edf3535b..255e00ca41 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -28,7 +29,7 @@ $tstart = 100000 sql drop database if exits $db -x step1 step1: -sql create database if not exists $db maxTables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index f17e28c1da..ef3245ccaf 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -4,6 +4,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c debugFlag -v 135 system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 + system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -24,7 +26,7 @@ $tstart = 100000 sql drop database if exits $db -x step1 step1: -sql create database if not exists $db maxTables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/join_multivnode.sim b/tests/script/general/parser/join_multivnode.sim index 4cf1d36672..51f1ef11c7 100644 --- a/tests/script/general/parser/join_multivnode.sim +++ b/tests/script/general/parser/join_multivnode.sim @@ -2,6 +2,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 + system sh/exec.sh -n dnode1 -s start sql connect sleep 1000 @@ -22,7 +24,7 @@ $tstart = 100000 sql drop database if exits $db -x step1 step1: -sql create database if not exists $db maxTables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim index 48f6e65a4f..6321823fe2 100644 --- a/tests/script/general/parser/lastrow.sim +++ b/tests/script/general/parser/lastrow.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -21,7 +22,7 @@ $stb = $stbPrefix . $i sql drop database $db -x step1 step1: -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/mixed_blocks.sim b/tests/script/general/parser/mixed_blocks.sim index 569decaa14..41082bb144 100644 --- a/tests/script/general/parser/mixed_blocks.sim +++ b/tests/script/general/parser/mixed_blocks.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 3000 @@ -22,7 +23,7 @@ sql drop database if exists $db $paramRows = 200 $rowNum = $paramRows * 4 $rowNum = $rowNum / 5 -sql create database $db maxrows $paramRows maxTables 4 +sql create database $db maxrows $paramRows print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 bool, c6 binary(10), c7 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/projection_limit_offset.sim b/tests/script/general/parser/projection_limit_offset.sim index 2b89946ef8..fbff99d58f 100644 --- a/tests/script/general/parser/projection_limit_offset.sim +++ b/tests/script/general/parser/projection_limit_offset.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -22,7 +23,7 @@ $tstart = 100000 sql drop database if exits $db -x step1 step1: -sql create database if not exists $db maxTables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/selectResNum.sim b/tests/script/general/parser/selectResNum.sim index 319e034c0c..42cedc034b 100644 --- a/tests/script/general/parser/selectResNum.sim +++ b/tests/script/general/parser/selectResNum.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 200 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -23,7 +24,7 @@ $stb = $stbPrefix . $i sql drop database $db -x step1 step1: -sql create database $db cache 16 maxtables 200 +sql create database $db cache 16 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 9f944a586b..68d145a5f2 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -23,7 +24,7 @@ $tstart = 100000 sql drop database if exists $db -x step1 step1: -sql create database if not exists $db maxTables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/single_row_in_tb.sim b/tests/script/general/parser/single_row_in_tb.sim index 2313a98b41..4305ae1b5d 100644 --- a/tests/script/general/parser/single_row_in_tb.sim +++ b/tests/script/general/parser/single_row_in_tb.sim @@ -2,6 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -15,7 +16,7 @@ $db = $dbPrefix $stb = $stbPrefix sql drop database if exists $db -sql create database $db maxrows 200 maxTables 4 +sql create database $db maxrows 200 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 bool, c6 binary(10), c7 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/slimit.sim b/tests/script/general/parser/slimit.sim index 2ce3d0c2ac..edbf0c8cc0 100644 --- a/tests/script/general/parser/slimit.sim +++ b/tests/script/general/parser/slimit.sim @@ -21,7 +21,7 @@ $db = $dbPrefix . $i $stb = $stbPrefix . $i sql drop database if exists $db -sql create database $db maxrows 200 cache 16 maxTables 4 +sql create database $db maxrows 200 cache 16 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 binary(15), t2 int, t3 bigint, t4 nchar(10), t5 double, t6 bool) diff --git a/tests/script/general/parser/timestamp.sim b/tests/script/general/parser/timestamp.sim index 28bbc9df0e..67da0f0869 100644 --- a/tests/script/general/parser/timestamp.sim +++ b/tests/script/general/parser/timestamp.sim @@ -2,6 +2,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 + system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -20,7 +22,7 @@ $db = $dbPrefix . $i $stb = $stbPrefix . $i sql drop database if exists $db -sql create database $db maxrows 200 maxTables 4 +sql create database $db maxrows 200 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 timestamp, c2 int) tags(t1 binary(20)) diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index 8e529b4eb4..2faee55460 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -2,6 +2,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 200 + system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -23,7 +25,7 @@ $stb = $stbPrefix . $i sql drop database $db -x step1 step1: -sql create database $db cache 16 maxtables 200 +sql create database $db cache 16 print ====== create tables sql use $db sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index fbd1c211b9..9e178537a2 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -4,6 +4,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c debugFlag -v 135 system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 + system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -27,7 +29,7 @@ $mt1 = $mtPrefix . $j sql drop database if exits $db -x step1 step1: -sql create database if not exists $db maxtables 4 +sql create database if not exists $db sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 710156a4ff..dd3b11c2dc 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -21,7 +21,7 @@ $mt = $mtPrefix . $i sql drop database if exits $db -x step1 step1: -sql create database if not exists $db maxTables 4 +sql create database if not exists $db sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) diff --git a/tests/script/general/stable/dnode3.sim b/tests/script/general/stable/dnode3.sim index 76652229d2..2859f644bb 100644 --- a/tests/script/general/stable/dnode3.sim +++ b/tests/script/general/stable/dnode3.sim @@ -54,7 +54,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/general/table/limit.sim b/tests/script/general/table/limit.sim index 6287807954..18597f2e1c 100644 --- a/tests/script/general/table/limit.sim +++ b/tests/script/general/table/limit.sim @@ -17,7 +17,7 @@ $db = $dbPrefix . $i $tb = $tbPrefix . $i print =================== step 0 -sql create database $db maxtables 129 +sql create database $db sql use $db sql show vgroups if $rows != 0 then diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 40598332d9..063d11bd9d 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -253,6 +253,7 @@ cd ../../../debug; make ./test.sh -f unique/cluster/balance2.sim ./test.sh -f unique/cluster/balance3.sim ./test.sh -f unique/cluster/cache.sim +./test.sh -f unique/cluster/vgroup100.sim ./test.sh -f unique/column/replica3.sim @@ -293,6 +294,7 @@ cd ../../../debug; make ./test.sh -f unique/stable/replica3_dnode6.sim ./test.sh -f unique/stable/replica3_vnode3.sim +./test.sh -f unique/mnode/mgmt20.sim ./test.sh -f unique/mnode/mgmt21.sim ./test.sh -f unique/mnode/mgmt22.sim ./test.sh -f unique/mnode/mgmt23.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 498f536726..0d444a5a6e 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -119,7 +119,7 @@ echo "tsdbDebugFlag 135" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG echo "jnidebugFlag 135" >> $TAOS_CFG echo "odbcdebugFlag 135" >> $TAOS_CFG -echo "httpDebugFlag 143" >> $TAOS_CFG +echo "httpDebugFlag 135" >> $TAOS_CFG echo "monitorDebugFlag 135" >> $TAOS_CFG echo "mqttDebugFlag 135" >> $TAOS_CFG echo "qdebugFlag 135" >> $TAOS_CFG diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim index 60c08b21e9..ae511fe2b0 100644 --- a/tests/script/unique/account/paras.sim +++ b/tests/script/unique/account/paras.sim @@ -14,10 +14,10 @@ print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != root then return -1 endi -if $data02 != 3/10 then +if $data02 != 3/128 then return -1 endi -if $data03 != 0/64 then +if $data03 != 0/128 then return -1 endi if $data04 != 0/2147483647 then diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim index b18edf0670..4f80c2389e 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim @@ -61,7 +61,7 @@ $totalTableNum = 10 $sleepTimer = 3000 $db = db -sql create database $db replica 2 maxTables $totalTableNum +sql create database $db replica 2 sql use $db # create table , insert data diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim index 4f737b9c40..fb0650b78a 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim @@ -61,7 +61,7 @@ $totalTableNum = 4 $sleepTimer = 3000 $db = db -sql create database $db cache 1 replica 2 maxTables $totalTableNum +sql create database $db cache 1 replica 2 sql use $db # create table , insert data diff --git a/tests/script/unique/arbitrator/sync_replica3_createTable.sim b/tests/script/unique/arbitrator/sync_replica3_createTable.sim index 3bf274a3ea..d593577bee 100644 --- a/tests/script/unique/arbitrator/sync_replica3_createTable.sim +++ b/tests/script/unique/arbitrator/sync_replica3_createTable.sim @@ -61,8 +61,8 @@ $totalTableNum = 20 $sleepTimer = 3000 $db = db -print create database $db replica 3 maxTables $totalTableNum -sql create database $db replica 3 maxTables $totalTableNum +print create database $db replica 3 +sql create database $db replica 3 sql use $db # create table , insert data diff --git a/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim b/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim index c8fe96008b..1ef499534f 100644 --- a/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim +++ b/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim @@ -62,8 +62,8 @@ $sleepTimer = 3000 $maxTables = $totalTableNum * 2 $db = db -print create database $db replica 3 maxTables $maxTables -sql create database $db replica 3 maxTables $maxTables +print create database $db replica 3 +sql create database $db replica 3 sql use $db # create table , insert data diff --git a/tests/script/unique/big/maxvnodes.sim b/tests/script/unique/big/maxvnodes.sim index eb6e0b3b53..662d391e47 100644 --- a/tests/script/unique/big/maxvnodes.sim +++ b/tests/script/unique/big/maxvnodes.sim @@ -18,7 +18,7 @@ print ========== prepare data system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect -sql create database db blocks 3 cache 1 maxTables $maxTables +sql create database db blocks 3 cache 1 sql use db print ========== step1 diff --git a/tests/script/unique/big/restartSpeed.sim b/tests/script/unique/big/restartSpeed.sim index 9d490fcb8b..ea4edefda8 100644 --- a/tests/script/unique/big/restartSpeed.sim +++ b/tests/script/unique/big/restartSpeed.sim @@ -16,7 +16,7 @@ print ========== prepare data system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect -sql create database db blocks 3 cache 1 maxTables $maxTables +sql create database db blocks 3 cache 1 sql use db print ========== step1 diff --git a/tests/script/unique/cluster/balance1.sim b/tests/script/unique/cluster/balance1.sim index 44f44e3976..728ead25fe 100644 --- a/tests/script/unique/cluster/balance1.sim +++ b/tests/script/unique/cluster/balance1.sim @@ -40,7 +40,7 @@ print ========= start dnode1 system sh/exec.sh -n dnode1 -s start sql connect -sql create database c_b1_d1 maxTables 4 +sql create database c_b1_d1 sql use c_b1_d1 sql create table c_b1_t1 (t timestamp, i int) @@ -50,7 +50,7 @@ sql insert into c_b1_t1 values(1520000022013, 13) sql insert into c_b1_t1 values(1520000023012, 12) sql insert into c_b1_t1 values(1520000024011, 11) -sql create database c_b1_d2 maxTables 4 +sql create database c_b1_d2 sql use c_b1_d2 sql create table c_b1_t2 (t timestamp, i int) sql insert into c_b1_t2 values(1520000020025, 25) @@ -107,7 +107,7 @@ print dnode2 ==> $dnode2Role print ============================== step3 print ========= add db3 -sql create database c_b1_d3 maxTables 4 +sql create database c_b1_d3 sql use c_b1_d3 sql create table c_b1_t3 (t timestamp, i int) sql insert into c_b1_t3 values(1520000020035, 35) @@ -280,7 +280,7 @@ if $dnode4Role != slave then endi print ============================== step10 -sql create database c_b1_d4 maxTables 4 +sql create database c_b1_d4 sql use c_b1_d4 sql create table c_b1_t4 (t timestamp, i int) sql insert into c_b1_t4 values(1520000020045, 45) @@ -318,7 +318,7 @@ sql use c_b1_d2 sql insert into c_b1_t2 values(1520000025026, 26) print ============================== step12 -sql create database c_b1_d5 maxTables 4 +sql create database c_b1_d5 sql use c_b1_d5 sql_error create table c_b1_t5 (t timestamp, i int) -x error3 @@ -343,7 +343,7 @@ sql insert into c_b1_t5 values(1520000022053, 53) sql insert into c_b1_t5 values(1520000023052, 52) sql insert into c_b1_t5 values(1520000024051, 51) -sql create database c_b1_d6 maxTables 4 +sql create database c_b1_d6 sql use c_b1_d6 sql create table c_b1_t6 (t timestamp, i int) sql insert into c_b1_t6 values(1520000020065, 65) @@ -375,7 +375,7 @@ sql create dnode $hostname6 system sh/exec.sh -n dnode6 -s start sleep 15000 -sql create database c_b1_d7 maxTables 4 +sql create database c_b1_d7 sql use c_b1_d7 sql create table c_b1_t7 (t timestamp, i int) sql insert into c_b1_t7 values(1520000020075, 75) @@ -384,7 +384,7 @@ sql insert into c_b1_t7 values(1520000022073, 73) sql insert into c_b1_t7 values(1520000023072, 72) sql insert into c_b1_t7 values(1520000024071, 71) -sql create database c_b1_d8 maxTables 4 +sql create database c_b1_d8 sql use c_b1_d8 sql create table c_b1_t8 (t timestamp, i int) sql insert into c_b1_t8 values(1520000020085, 85) diff --git a/tests/script/unique/cluster/balance2.sim b/tests/script/unique/cluster/balance2.sim index 0c880d36ff..6d2c4bdad7 100644 --- a/tests/script/unique/cluster/balance2.sim +++ b/tests/script/unique/cluster/balance2.sim @@ -47,7 +47,7 @@ system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start sleep 4001 -sql create database c_b2_d1 replica 2 maxTables 4 +sql create database c_b2_d1 replica 2 sql use c_b2_d1 sql create table c_b2_t1 (t timestamp, i int) sql insert into c_b2_t1 values(1520000020015, 15) @@ -56,7 +56,7 @@ sql insert into c_b2_t1 values(1520000022013, 13) sql insert into c_b2_t1 values(1520000023012, 12) sql insert into c_b2_t1 values(1520000024011, 11) -sql create database c_b2_d2 replica 2 maxTables 4 +sql create database c_b2_d2 replica 2 sql use c_b2_d2 sql create table c_b2_t2 (t timestamp, i int) sql insert into c_b2_t2 values(1520000020025, 25) @@ -65,7 +65,7 @@ sql insert into c_b2_t2 values(1520000022023, 23) sql insert into c_b2_t2 values(1520000023022, 22) sql insert into c_b2_t2 values(1520000024021, 21) -sql create database c_b2_d3 replica 2 maxTables 4 +sql create database c_b2_d3 replica 2 sql use c_b2_d3 sql create table c_b2_t3 (t timestamp, i int) sql insert into c_b2_t3 values(1520000020035, 35) diff --git a/tests/script/unique/cluster/cluster_main.sim b/tests/script/unique/cluster/cluster_main.sim index 7d4a35c7f2..f0a9b1a214 100644 --- a/tests/script/unique/cluster/cluster_main.sim +++ b/tests/script/unique/cluster/cluster_main.sim @@ -57,7 +57,7 @@ sleep 3000 print ============== step2: create db1 with replica 3 $db = db1 print create database $db replica 3 -#sql create database $db replica 3 maxTables $totalTableNum +#sql create database $db replica 3 sql create database $db replica 3 sql use $db diff --git a/tests/script/unique/cluster/cluster_main0.sim b/tests/script/unique/cluster/cluster_main0.sim index ef76f8823f..9f775c0cef 100644 --- a/tests/script/unique/cluster/cluster_main0.sim +++ b/tests/script/unique/cluster/cluster_main0.sim @@ -57,7 +57,7 @@ sleep 3000 print ============== step2: create db1 with replica 3 $db = db1 print create database $db replica 3 -#sql create database $db replica 3 maxTables $totalTableNum +#sql create database $db replica 3 sql create database $db replica 3 sql use $db diff --git a/tests/script/unique/cluster/cluster_main1.sim b/tests/script/unique/cluster/cluster_main1.sim index 4537b09091..7796f1ea00 100644 --- a/tests/script/unique/cluster/cluster_main1.sim +++ b/tests/script/unique/cluster/cluster_main1.sim @@ -58,7 +58,7 @@ print ============== step2: create db1 with replica 3 $replica = 3 $db = db1 print create database $db replica $replica -#sql create database $db replica 3 maxTables $totalTableNum +#sql create database $db replica 3 sql create database $db replica $replica sql use $db diff --git a/tests/script/unique/cluster/cluster_main2.sim b/tests/script/unique/cluster/cluster_main2.sim index 84ea3871ef..4866154681 100644 --- a/tests/script/unique/cluster/cluster_main2.sim +++ b/tests/script/unique/cluster/cluster_main2.sim @@ -58,7 +58,7 @@ print ============== step2: create db1 with replica 3 $replica = 3 $db = db1 print create database $db replica $replica -#sql create database $db replica 3 maxTables $totalTableNum +#sql create database $db replica 3 sql create database $db replica $replica sql use $db diff --git a/tests/script/unique/cluster/vgroup100.sim b/tests/script/unique/cluster/vgroup100.sim new file mode 100644 index 0000000000..cddb38cefd --- /dev/null +++ b/tests/script/unique/cluster/vgroup100.sim @@ -0,0 +1,127 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c maxTables -v 4 +system sh/cfg.sh -n dnode2 -c maxTables -v 4 +system sh/cfg.sh -n dnode3 -c maxTables -v 4 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0 + +print ============================== step1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +print ============================== step2 +print ========= start dnode2 +sql create dnode $hostname2 +system sh/exec.sh -n dnode2 -s start +sql create dnode $hostname3 +system sh/exec.sh -n dnode3 -s start + +sleep 5000 +sql show mnodes +$dnode1Role = $data2_1 +$dnode2Role = $data2_2 +$dnode3Role = $data2_3 + +print $dnode1Role +print $dnode2Role +print $dnode3Role + +print ============================== step3 +$count = 2 +while $count < 102 + $db = d . $count + $tb = $db . .t + sql create database $db replica 3 cache 1 blocks 3 + sql create table $tb (ts timestamp, i int) + sql insert into $tb values(now, 1) + $count = $count + 1 + print insert into $tb values(now, 1) ==> finished +endw + +print ============================== step4 + +$count = 2 +while $count < 102 + $db = d . $count + $tb = $db . .t + sql select * from $tb + if $rows != 1 then + print select * from $tb + return -1 + endi + $count = $count + 1 + print select * from $tb ==> rows: $rows +endw + +print ============================== step5 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 5000 + +print ============================== step6 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 + +print ============================== step7 + +$x = 0 +show7: + $x = $x + 1 + sleep 2000 + if $x == 50 then + return -1 + endi + +sql show mnodes -x show7 +$dnode1Role = $data2_1 +$dnode2Role = $data2_2 +$dnode3Role = $data2_3 +if $dnode1Role != master then + goto show7 +endi +if $dnode2Role != slave then + goto show7 +endi +if $dnode2Role != slave then + goto show7 +endi + +print ============================== step8 +$x = 0 +show8: + $x = $x + 1 + sleep 2000 + if $x == 20 then + return -1 + endi + +$count = 2 +while $count < 102 + $db = d . $count + $tb = $db . .t + sql select * from $tb + if $rows != 1 then + print select * from $tb + goto show8 + endi + $count = $count + 1 + print select * from $tb ==> rows: $rows +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/alternativeRole.sim b/tests/script/unique/dnode/alternativeRole.sim index fb9d344d20..ab37c1603a 100644 --- a/tests/script/unique/dnode/alternativeRole.sim +++ b/tests/script/unique/dnode/alternativeRole.sim @@ -56,7 +56,7 @@ if $data2_3 != slave then endi print ========== step2 -sql create database d1 maxTables 4 +sql create database d1 sql create table d1.t1 (ts timestamp, i int) sql create table d1.t2 (ts timestamp, i int) sql create table d1.t3 (ts timestamp, i int) diff --git a/tests/script/unique/dnode/balance1.sim b/tests/script/unique/dnode/balance1.sim index 5b1615dc28..b246197742 100644 --- a/tests/script/unique/dnode/balance1.sim +++ b/tests/script/unique/dnode/balance1.sim @@ -30,7 +30,7 @@ system sh/exec.sh -n dnode1 -s start sql connect sleep 3000 -sql create database d1 maxTables 4 +sql create database d1 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(now+1s, 15) sql insert into d1.t1 values(now+2s, 14) @@ -68,7 +68,7 @@ if $data2_2 != 1 then endi print ========== step3 -sql create database d2 maxTables 4 +sql create database d2 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(now+1s, 25) sql insert into d2.t2 values(now+2s, 24) @@ -139,7 +139,7 @@ if $data2_3 != 2 then endi print ========== step6 -sql create database d3 maxTables 4 +sql create database d3 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(now+1s, 35) sql insert into d3.t3 values(now+2s, 34) @@ -193,7 +193,7 @@ if $data2_4 != 1 then endi print ========== step8 -sql create database d4 maxTables 4 +sql create database d4 sql create table d4.t4 (t timestamp, i int) sql insert into d4.t4 values(now+1s, 45) sql insert into d4.t4 values(now+2s, 44) diff --git a/tests/script/unique/dnode/balance2.sim b/tests/script/unique/dnode/balance2.sim index e23562d8b4..1a80e890a0 100644 --- a/tests/script/unique/dnode/balance2.sim +++ b/tests/script/unique/dnode/balance2.sim @@ -28,7 +28,7 @@ system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start sleep 3000 -sql create database d1 replica 2 maxTables 4 +sql create database d1 replica 2 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(now+1s, 15) sql insert into d1.t1 values(now+2s, 14) @@ -36,7 +36,7 @@ sql insert into d1.t1 values(now+3s, 13) sql insert into d1.t1 values(now+4s, 12) sql insert into d1.t1 values(now+5s, 11) -sql create database d2 replica 2 maxTables 4 +sql create database d2 replica 2 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(now+1s, 25) sql insert into d2.t2 values(now+2s, 24) @@ -117,7 +117,7 @@ if $data2_4 != 2 then endi print ========== step4 -sql create database d3 replica 2 maxTables 4 +sql create database d3 replica 2 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(now+1s, 35) sql insert into d3.t3 values(now+2s, 34) diff --git a/tests/script/unique/dnode/balance3.sim b/tests/script/unique/dnode/balance3.sim index b2adb24dfa..1f81fde968 100644 --- a/tests/script/unique/dnode/balance3.sim +++ b/tests/script/unique/dnode/balance3.sim @@ -33,7 +33,7 @@ system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode4 -s start sleep 3000 -sql create database d1 replica 3 maxTables 4 +sql create database d1 replica 3 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(now+1s, 15) sql insert into d1.t1 values(now+2s, 14) @@ -41,7 +41,7 @@ sql insert into d1.t1 values(now+3s, 13) sql insert into d1.t1 values(now+4s, 12) sql insert into d1.t1 values(now+5s, 11) -sql create database d2 replica 3 maxTables 4 +sql create database d2 replica 3 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(now+1s, 25) sql insert into d2.t2 values(now+2s, 24) @@ -136,7 +136,7 @@ if $data2_5 != 2 then endi print ========== step4 -sql create database d3 replica 3 maxTables 4 +sql create database d3 replica 3 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(now+1s, 35) sql insert into d3.t3 values(now+2s, 34) diff --git a/tests/script/unique/dnode/balancex.sim b/tests/script/unique/dnode/balancex.sim index 0d5da5bbf6..6f3b7dfb74 100644 --- a/tests/script/unique/dnode/balancex.sim +++ b/tests/script/unique/dnode/balancex.sim @@ -20,7 +20,7 @@ system sh/exec.sh -n dnode1 -s start sql connect sleep 3000 -sql create database d1 maxTables 4 +sql create database d1 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(now+1s, 15) sql insert into d1.t1 values(now+2s, 14) @@ -28,7 +28,7 @@ sql insert into d1.t1 values(now+3s, 13) sql insert into d1.t1 values(now+4s, 12) sql insert into d1.t1 values(now+5s, 11) -sql create database d2 maxTables 4 +sql create database d2 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(now+1s, 25) sql insert into d2.t2 values(now+2s, 24) @@ -65,7 +65,7 @@ if $data2_2 != 2 then endi print ========== step3 -sql create database d3 replica 2 maxTables 4 +sql create database d3 replica 2 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(now+1s, 35) sql insert into d3.t3 values(now+2s, 34) diff --git a/tests/script/unique/dnode/offline2.sim b/tests/script/unique/dnode/offline2.sim index 6db6ca4013..2d1467ad59 100644 --- a/tests/script/unique/dnode/offline2.sim +++ b/tests/script/unique/dnode/offline2.sim @@ -27,7 +27,7 @@ sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start sleep 3000 -sql create database d1 replica 2 maxTables 4 +sql create database d1 replica 2 sql create table d1.t1(ts timestamp, i int) sql insert into d1.t1 values(1588262400001, 1) diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim index 246808c56c..7786b9f9d1 100644 --- a/tests/script/unique/dnode/remove1.sim +++ b/tests/script/unique/dnode/remove1.sim @@ -20,7 +20,7 @@ system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect -sql create database d1 maxTables 4 +sql create database d1 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(now+1s, 15) sql insert into d1.t1 values(now+2s, 14) @@ -28,7 +28,7 @@ sql insert into d1.t1 values(now+3s, 13) sql insert into d1.t1 values(now+4s, 12) sql insert into d1.t1 values(now+5s, 11) -sql create database d2 maxTables 4 +sql create database d2 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(now+1s, 25) sql insert into d2.t2 values(now+2s, 24) @@ -47,7 +47,7 @@ sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start sleep 9000 -sql create database d3 replica 2 maxTables 4 +sql create database d3 replica 2 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(now+1s, 35) sql insert into d3.t3 values(now+2s, 34) diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim index cf9954c767..cd0331235a 100644 --- a/tests/script/unique/dnode/remove2.sim +++ b/tests/script/unique/dnode/remove2.sim @@ -20,7 +20,7 @@ system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect -sql create database d1 maxTables 4 +sql create database d1 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(1588262400001, 15) sql insert into d1.t1 values(1588262400002, 14) @@ -28,7 +28,7 @@ sql insert into d1.t1 values(1588262400003, 13) sql insert into d1.t1 values(1588262400004, 12) sql insert into d1.t1 values(1588262400005, 11) -sql create database d2 maxTables 4 +sql create database d2 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(1588262400001, 25) sql insert into d2.t2 values(1588262400002, 24) @@ -47,7 +47,7 @@ sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start sleep 9000 -sql create database d3 replica 2 maxTables 4 +sql create database d3 replica 2 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(1588262400001, 35) sql insert into d3.t3 values(1588262400002, 34) diff --git a/tests/script/unique/dnode/vnode_clean.sim b/tests/script/unique/dnode/vnode_clean.sim index e1ee1da2aa..6df4bf78e8 100644 --- a/tests/script/unique/dnode/vnode_clean.sim +++ b/tests/script/unique/dnode/vnode_clean.sim @@ -19,7 +19,7 @@ print ========== step1 system sh/exec.sh -n dnode1 -s start sql connect -sql create database d1 maxTables 4 +sql create database d1 sql create table d1.t1 (t timestamp, i int) sql insert into d1.t1 values(now+1s, 15) sql insert into d1.t1 values(now+2s, 14) @@ -55,7 +55,7 @@ if $data2_2 != 1 then endi print ========== step3 -sql create database d2 maxTables 4 +sql create database d2 sql create table d2.t2 (t timestamp, i int) sql insert into d2.t2 values(now+1s, 25) @@ -123,7 +123,7 @@ if $data2_3 != 2 then endi print ========== step6 -sql create database d3 maxTables 4 +sql create database d3 sql create table d3.t3 (t timestamp, i int) sql insert into d3.t3 values(now+1s, 35) sql insert into d3.t3 values(now+2s, 34) diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim new file mode 100644 index 0000000000..e51d429925 --- /dev/null +++ b/tests/script/unique/mnode/mgmt20.sim @@ -0,0 +1,71 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 + +system sh/cfg.sh -n dnode1 -c monitor -v 1 +system sh/cfg.sh -n dnode2 -c monitor -v 1 + +print ============== step1 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sleep 3000 +sql connect + +print ============== step2 +sql create dnode $hostname2 + +$x = 0 +show2: + $x = $x + 1 + sleep 2000 + if $x == 10 then + return -1 + endi + +sql show mnodes +print dnode1 ==> $data2_1 +print dnode2 ==> $data2_2 +if $data2_1 != master then + goto show2 +endi +if $data2_2 != slave then + goto show2 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT + +print ============== step3 +system sh/exec.sh -n dnode2 -s start +sleep 10000 + +system sh/exec.sh -n dnode1 -s start +sql connect + +print =============== step4 +sql select * from log.dn1 +$d1_first = $rows +sql select * from log.dn2 +$d2_first = $rows + +sleep 3000 +sql select * from log.dn1 +$d1_second = $rows +sql select * from log.dn2 +$d2_second = $rows + +print dnode1 $d1_first $d1_second +print dnode2 $d2_first $d2_second +if $d1_first >= $d1_second then + return -1 +endi + +if $d2_first >= $d2_second then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/stable/balance_replica1.sim b/tests/script/unique/stable/balance_replica1.sim index 362f8ccf7f..3ea158eb39 100644 --- a/tests/script/unique/stable/balance_replica1.sim +++ b/tests/script/unique/stable/balance_replica1.sim @@ -29,7 +29,7 @@ $db = $dbPrefix $mt = $mtPrefix $st = $stPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) diff --git a/tests/script/unique/stable/dnode2.sim b/tests/script/unique/stable/dnode2.sim index 441323ff3c..5c227f8cec 100644 --- a/tests/script/unique/stable/dnode2.sim +++ b/tests/script/unique/stable/dnode2.sim @@ -38,7 +38,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stable/dnode2_stop.sim b/tests/script/unique/stable/dnode2_stop.sim index d25c68cbba..19c6de33b3 100644 --- a/tests/script/unique/stable/dnode2_stop.sim +++ b/tests/script/unique/stable/dnode2_stop.sim @@ -43,7 +43,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stable/dnode3.sim b/tests/script/unique/stable/dnode3.sim index 712ec04b8c..5fe37faa71 100644 --- a/tests/script/unique/stable/dnode3.sim +++ b/tests/script/unique/stable/dnode3.sim @@ -47,7 +47,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stable/replica2_dnode4.sim b/tests/script/unique/stable/replica2_dnode4.sim index c7a9767fcc..4f8211d5d4 100644 --- a/tests/script/unique/stable/replica2_dnode4.sim +++ b/tests/script/unique/stable/replica2_dnode4.sim @@ -54,7 +54,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db replica 2 maxTables 4 +sql create database $db replica 2 sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stable/replica2_vnode3.sim b/tests/script/unique/stable/replica2_vnode3.sim index 84af380106..47d45c3d3d 100644 --- a/tests/script/unique/stable/replica2_vnode3.sim +++ b/tests/script/unique/stable/replica2_vnode3.sim @@ -37,7 +37,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db replica 2 maxTables 4 +sql create database $db replica 2 sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stable/replica3_dnode6.sim b/tests/script/unique/stable/replica3_dnode6.sim index 83ef334301..eeffb86cdb 100644 --- a/tests/script/unique/stable/replica3_dnode6.sim +++ b/tests/script/unique/stable/replica3_dnode6.sim @@ -78,7 +78,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db replica 3 maxTables 4 +sql create database $db replica 3 sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stable/replica3_vnode3.sim b/tests/script/unique/stable/replica3_vnode3.sim index 3cd7b92477..bc700b7dda 100644 --- a/tests/script/unique/stable/replica3_vnode3.sim +++ b/tests/script/unique/stable/replica3_vnode3.sim @@ -54,7 +54,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql create database $db replica 3 maxTables 4 +sql create database $db replica 3 sql use $db sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/unique/stream/metrics_balance.sim b/tests/script/unique/stream/metrics_balance.sim index c043044499..36086fe4b8 100644 --- a/tests/script/unique/stream/metrics_balance.sim +++ b/tests/script/unique/stream/metrics_balance.sim @@ -32,7 +32,7 @@ sql connect print ============== step1 $db = $dbPrefix -sql create database $db maxTables 4 +sql create database $db sql use $db $i = 0 diff --git a/tests/script/unique/stream/table_balance.sim b/tests/script/unique/stream/table_balance.sim index 6c8958aa48..facb7df459 100644 --- a/tests/script/unique/stream/table_balance.sim +++ b/tests/script/unique/stream/table_balance.sim @@ -34,7 +34,7 @@ $db = $dbPrefix $mt = $mtPrefix $st = $stPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim index d3ea375e1f..d2437e4920 100644 --- a/tests/script/unique/stream/table_move.sim +++ b/tests/script/unique/stream/table_move.sim @@ -65,7 +65,7 @@ $db = $dbPrefix . $i $mt = $mtPrefix . $i $st = $stPrefix . $i -sql create database $db maxTables 4 +sql create database $db sql use $db sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) diff --git a/tests/script/unique/vnode/replica3_vgroup.sim b/tests/script/unique/vnode/replica3_vgroup.sim index 09ca261a06..11295ba0a4 100644 --- a/tests/script/unique/vnode/replica3_vgroup.sim +++ b/tests/script/unique/vnode/replica3_vgroup.sim @@ -26,7 +26,7 @@ sleep 3000 print =================== step 1 -sql create database $db replica 3 maxtables 100 +sql create database $db replica 3 sql use $db sql create table st (ts timestamp, speed int) tags (t1 int) sleep 3001 diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index f201f149a1..8dcf83806f 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -840,14 +840,14 @@ void simInitsimCmdList() { cmdno = SIM_CMD_EXP; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "exp"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = NULL; simCmdList[cmdno].executeCmd = simExecuteExpCmd; cmdno = SIM_CMD_IF; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "if"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseIfCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -855,7 +855,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_ELIF; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "elif"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseElifCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -863,7 +863,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_ELSE; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "else"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseElseCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -871,7 +871,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_ENDI; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "endi"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseEndiCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -879,7 +879,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_WHILE; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "while"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseWhileCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -887,7 +887,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_ENDW; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "endw"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseEndwCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -895,7 +895,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SWITCH; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "switch"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSwitchCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -903,7 +903,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_CASE; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "case"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseCaseCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -911,7 +911,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_DEFAULT; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "default"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseDefaultCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -919,7 +919,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_BREAK; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "break"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseBreakCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -927,7 +927,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_CONTINUE; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "continue"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseContinueCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -935,7 +935,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_ENDS; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "ends"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseEndsCmd; simCmdList[cmdno].executeCmd = NULL; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -943,7 +943,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SLEEP; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "sleep"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSleepCmd; simCmdList[cmdno].executeCmd = simExecuteSleepCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -951,7 +951,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_GOTO; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "goto"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseGotoCmd; simCmdList[cmdno].executeCmd = simExecuteGotoCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -959,7 +959,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_RUN; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "run"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseRunCmd; simCmdList[cmdno].executeCmd = simExecuteRunCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -967,7 +967,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_RUN_BACK; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "run_back"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseRunBackCmd; simCmdList[cmdno].executeCmd = simExecuteRunBackCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -975,7 +975,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SYSTEM; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "system"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSystemCmd; simCmdList[cmdno].executeCmd = simExecuteSystemCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -983,7 +983,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SYSTEM_CONTENT; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "system_content"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSystemContentCmd; simCmdList[cmdno].executeCmd = simExecuteSystemContentCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -991,7 +991,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_PRINT; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "print"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParsePrintCmd; simCmdList[cmdno].executeCmd = simExecutePrintCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -999,7 +999,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SQL; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "sql"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSqlCmd; simCmdList[cmdno].executeCmd = simExecuteSqlCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -1007,7 +1007,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SQL_ERROR; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "sql_error"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSqlErrorCmd; simCmdList[cmdno].executeCmd = simExecuteSqlErrorCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -1015,7 +1015,7 @@ void simInitsimCmdList() { cmdno = SIM_CMD_SQL_SLOW; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "sql_slow"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseSqlSlowCmd; simCmdList[cmdno].executeCmd = simExecuteSqlSlowCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); @@ -1024,14 +1024,14 @@ void simInitsimCmdList() { cmdno = SIM_CMD_TEST; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "test"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = NULL; simCmdList[cmdno].executeCmd = simExecuteTestCmd; cmdno = SIM_CMD_RETURN; simCmdList[cmdno].cmdno = cmdno; strcpy(simCmdList[cmdno].name, "return"); - simCmdList[cmdno].nlen = (int)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); simCmdList[cmdno].parseCmd = simParseReturnCmd; simCmdList[cmdno].executeCmd = simExecuteReturnCmd; simAddCmdIntoHash(&(simCmdList[cmdno]));