Merge branch 'develop' into mtodevelop

This commit is contained in:
Shengliang Guan 2020-09-10 13:59:33 +08:00 committed by GitHub
commit b8e47d3017
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
178 changed files with 4069 additions and 2359 deletions

View File

@ -13,6 +13,9 @@ ENDIF ()
SET(TD_ACCOUNT FALSE) SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE) SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE) SET(TD_GRANT FALSE)
SET(TD_SYNC TRUE)
SET(TD_MQTT TRUE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_COVER FALSE) SET(TD_COVER FALSE)
SET(TD_MEM_CHECK FALSE) SET(TD_MEM_CHECK FALSE)

View File

@ -3,6 +3,7 @@
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
[![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine) [![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine)
[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com) [![TDengine](TDenginelogo.png)](https://www.taosdata.com)

View File

@ -121,7 +121,21 @@ func (alert *Alert) refresh(rule *Rule, values map[string]interface{}) {
alert.Values = values alert.Values = values
res := rule.Expr.Eval(func(key string) interface{} { res := rule.Expr.Eval(func(key string) interface{} {
// ToLower is required as column name in result is in lower case // ToLower is required as column name in result is in lower case
return alert.Values[strings.ToLower(key)] i := alert.Values[strings.ToLower(key)]
switch v := i.(type) {
case int8:
return int64(v)
case int16:
return int64(v)
case int:
return int64(v)
case int32:
return int64(v)
case float32:
return float64(v)
default:
return v
}
}) })
val, ok := res.(bool) val, ok := res.(bool)

View File

@ -13,6 +13,18 @@ IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT) ADD_DEFINITIONS(-D_GRANT)
ENDIF () ENDIF ()
IF (TD_SYNC)
ADD_DEFINITIONS(-D_SYNC)
ENDIF ()
IF (TD_MQTT)
ADD_DEFINITIONS(-D_MQTT)
ENDIF ()
IF (TD_TSDB_PLUGINS)
ADD_DEFINITIONS(-D_TSDB_PLUGINS)
ENDIF ()
IF (TD_GODLL) IF (TD_GODLL)
ADD_DEFINITIONS(-D_TD_GO_DLL_) ADD_DEFINITIONS(-D_TD_GO_DLL_)
ENDIF () ENDIF ()

View File

@ -42,6 +42,16 @@ IF (${MEM_CHECK} MATCHES "true")
MESSAGE(STATUS "build with memory check") MESSAGE(STATUS "build with memory check")
ENDIF () ENDIF ()
IF (${MQTT} MATCHES "false")
SET(TD_MQTT FALSE)
MESSAGE(STATUS "build without mqtt module")
ENDIF ()
IF (${SYNC} MATCHES "false")
SET(TD_SYNC FALSE)
MESSAGE(STATUS "build without sync module")
ENDIF ()
IF (${RANDOM_FILE_FAIL} MATCHES "true") IF (${RANDOM_FILE_FAIL} MATCHES "true")
SET(TD_RANDOM_FILE_FAIL TRUE) SET(TD_RANDOM_FILE_FAIL TRUE)
MESSAGE(STATUS "build with random-file-fail enabled") MESSAGE(STATUS "build with random-file-fail enabled")

View File

@ -114,6 +114,9 @@ ELSEIF (${OSTYPE} MATCHES "Ningsi80")
MESSAGE(STATUS "input osType: Ningsi80") MESSAGE(STATUS "input osType: Ningsi80")
ELSEIF (${OSTYPE} MATCHES "Linux") ELSEIF (${OSTYPE} MATCHES "Linux")
MESSAGE(STATUS "input osType: Linux") MESSAGE(STATUS "input osType: Linux")
ELSEIF (${OSTYPE} MATCHES "Alpine")
MESSAGE(STATUS "input osType: Alpine")
SET(TD_APLHINE TRUE)
ELSE () ELSE ()
MESSAGE(STATUS "input osType unknown: " ${OSTYPE}) MESSAGE(STATUS "input osType unknown: " ${OSTYPE})
ENDIF () ENDIF ()

2
deps/CMakeLists.txt vendored
View File

@ -10,6 +10,6 @@ ADD_SUBDIRECTORY(cJson)
ADD_SUBDIRECTORY(wepoll) ADD_SUBDIRECTORY(wepoll)
ADD_SUBDIRECTORY(MsvcLibX) ADD_SUBDIRECTORY(MsvcLibX)
IF (TD_LINUX) IF (TD_LINUX AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C) ADD_SUBDIRECTORY(MQTT-C)
ENDIF () ENDIF ()

View File

@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
project(MQTT-C VERSION 1.1.2 LANGUAGES C)
# MQTT-C build options # MQTT-C build options
option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF)

View File

@ -95,6 +95,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
- [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等 - [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等
- [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数)支持各种聚合函数、选择函数、计算函数如avg, min, diff等 - [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数)支持各种聚合函数、选择函数、计算函数如avg, min, diff等
- [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理 - [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理
- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制)TAOS SQL的边界限制
- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code)TDengine 2.0 错误码以及对应的十进制码
## TDengine的技术设计 ## TDengine的技术设计

View File

@ -82,13 +82,39 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
``` ```
删除数据库。所包含的全部数据表将被删除,谨慎使用 删除数据库。所包含的全部数据表将被删除,谨慎使用
- **修改数据库参数**
```mysql
ALTER DATABASE db_name COMP 2;
```
COMP参数是指修改数据库文件压缩标志位取值范围为[0, 2]. 0表示不压缩1表示一阶段压缩2表示两阶段压缩。
```mysql
ALTER DATABASE db_name REPLICA 2;
```
REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用副本数必须小于dnode的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
```
KEEP参数是指修改数据文件保存的天数缺省值为3650取值范围[days, 365000]必须大于或等于days参数值。
```mysql
ALTER DATABASE db_name QUORUM 365;
```
QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制quorum设为1具有master角色的虚拟节点自己确认即可。对于同步复制需要至少大于等于2。原则上Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
```mysql
ALTER DATABASE db_name BLOCKS 365;
```
BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块因此一个VNODE的用的内存大小粗略为cache * blocks。取值范围[3, 1000]。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
- **显示系统所有数据库** - **显示系统所有数据库**
```mysql ```mysql
SHOW DATABASES; SHOW DATABASES;
``` ```
## 表管理 ## 表管理
- **创建数据表** - **创建数据表**

View File

@ -0,0 +1,173 @@
# TDengine 2.0 错误码以及对应的十进制码
| Code | bit | error code | 错误描述 | 十进制错误码 |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|

View File

@ -47,6 +47,8 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
因为TDengine具有很好的水平扩展能力根据总量再根据单个物理机或虚拟机的资源就可以轻松决定需要购置多少台物理机或虚拟机了。 因为TDengine具有很好的水平扩展能力根据总量再根据单个物理机或虚拟机的资源就可以轻松决定需要购置多少台物理机或虚拟机了。
**立即计算CPU、内存、存储请参见<a href='https://www.taosdata.com/config/config.html'>资源估算方法</a>**
## 容错和灾备 ## 容错和灾备
### 容错 ### 容错
@ -91,6 +93,7 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
- rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode - rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode
- debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值 - debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值
- numOfLogLines单个日志文件允许的最大行数。默认值10,000,000行。 - numOfLogLines单个日志文件允许的最大行数。默认值10,000,000行。
- logKeepDays日志文件的最长保存时间。大于0时日志文件会被重命名为taosdlog.xxx其中xxx为日志文件最后修改的时间戳单位为秒。默认值0天。
- maxSQLLength单条SQL语句允许最长限制。默认值65380字节。 - maxSQLLength单条SQL语句允许最长限制。默认值65380字节。
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。

View File

@ -228,7 +228,8 @@ resultSet.close();
stmt.close(); stmt.close();
conn.close(); conn.close();
``` ```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 > `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用 ## 与连接池使用
**HikariCP** **HikariCP**

View File

@ -47,66 +47,11 @@
检查服务器侧TCP端口连接是否工作`nc -l {port}` 检查服务器侧TCP端口连接是否工作`nc -l {port}`
检查客户端侧TCP端口链接是否工作`nc {hostIP} {port}` 检查客户端侧TCP端口链接是否工作`nc {hostIP} {port}`
10. 可以使用taos程序内嵌的网络连通检测功能验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP 10. 也可以使用taos程序内嵌的网络连通检测功能来验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server表示运行检测服务端功能-n client表示运行检测客户端功能。
1首先在服务器上停止taosd服务
2在服务器上运行taos内嵌的网络连通检测的服务端功能taos -n server -P 6030 -e 6042 -l 1000
3在客户端运行taos内嵌的网络连通检测的客户端功能taos -n client -h host -P 6030 -e 6042 -l 1000
-n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
-h指示服务端名称可以是ip地址或fqdn格式。如192.168.1.160,或 192.168.1.160:6030或 hostname1或hostname1:6030。缺省值是127.0.0.1。
-P 检测的起始端口号缺省值是6030
-e检测的结束端口号必须大于等于起始端口号缺省值是6042
-l指定检测端口连通的报文长度最大64000字节缺省值是1000字节测试时服务端和客户端必须指定相同
服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
对于起始端口号有三种设置方式:缺省值、-h指定、-P指定优先级是-P指定 > -h指定 > 缺省值。
客户端运行的输出样例:
`sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
`host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
`tcp port:6030 test ok. udp port:6030 test ok.`
`tcp port:6031 test ok. udp port:6031 test ok.`
`tcp port:6032 test ok. udp port:6032 test ok.`
`tcp port:6033 test ok. udp port:6033 test ok.`
`tcp port:6034 test ok. udp port:6034 test ok.`
`tcp port:6035 test ok. udp port:6035 test ok.`
`tcp port:6036 test ok. udp port:6036 test ok.`
`tcp port:6037 test ok. udp port:6037 test ok.`
`tcp port:6038 test ok. udp port:6038 test ok.`
`tcp port:6039 test ok. udp port:6039 test ok.`
`tcp port:6040 test ok. udp port:6040 test ok.`
`tcp port:6041 test ok. udp port:6041 test ok.`
`tcp port:6042 test ok. udp port:6042 test ok.`
如果某个端口不通,会输出 `portxxxx test fail`的信息。
## 6. 遇到错误“Unexpected generic error in RPC” 我怎么办?
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN" 我怎么办?
产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查 产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查
1. 请检查连接的服务器的FQDN是否正确 1. 请检查连接的服务器的FQDN是否正确

View File

@ -122,11 +122,14 @@
# number of replications, for cluster only # number of replications, for cluster only
# replica 1 # replica 1
# mqtt uri # mqtt hostname
# mqttBrokerAddress mqtt://username:password@hostname:1883/taos/ # mqttHostName test.mosquitto.org
# mqtt client name # mqtt port
# mqttBrokerClientId taos_mqtt # mqttPort 1883
# mqtt topic
# mqttTopic /test
# the compressed rpc message, option: # the compressed rpc message, option:
# -1 (no compression) # -1 (no compression)
@ -186,6 +189,9 @@
# max number of rows per log filters # max number of rows per log filters
# numOfLogLines 10000000 # numOfLogLines 10000000
# time of keeping log files, days
# logKeepDays 0
# enable/disable async log # enable/disable async log
# asyncLog 1 # asyncLog 1

View File

@ -1,6 +1,6 @@
name: tdengine name: tdengine
base: core18 # the base snap is the execution environment for this snap base: core18
version: '2.0.2.0' # just for humans, typically '1.2+git' or '1.3.2' version: 'RELEASE_VERSION'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
@ -18,41 +18,18 @@ apps:
- network - network
- network-bind - network-bind
- system-observe - system-observe
- systemfiles
taos: taos:
command: taoswrapper.sh command: taoswrapper.sh
plugs: plugs:
- network - network
- system-observe - system-observe
- systemfiles
- historyfile
taosdemo: taosdemo:
command: usr/bin/taosdemo command: usr/bin/taosdemo
plugs: plugs:
- network - network
plugs:
historyfile:
interface: personal-files
read:
- $HOME/.taos_history
write:
- $HOME/.taos_history
systemfiles:
interface: system-files
read:
- /etc/taos
- /var/lib/taos
- /var/log/taos
- /tmp
write:
- /var/log/taos
- /var/lib/taos
- /tmp
parts: parts:
script: script:
plugin: dump plugin: dump
@ -95,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.2.0 - usr/lib/libtaos.so.RELEASE_VERSION
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so
@ -115,8 +92,3 @@ layout:
bind: $SNAP_DATA/var/log/taos bind: $SNAP_DATA/var/log/taos
/etc/taos: /etc/taos:
bind: $SNAP_DATA/etc/taos bind: $SNAP_DATA/etc/taos
hooks:
install:
plugs: [systemfiles, historyfile]

View File

@ -10,7 +10,9 @@ ADD_SUBDIRECTORY(client)
ADD_SUBDIRECTORY(query) ADD_SUBDIRECTORY(query)
ADD_SUBDIRECTORY(kit) ADD_SUBDIRECTORY(kit)
ADD_SUBDIRECTORY(plugins) ADD_SUBDIRECTORY(plugins)
ADD_SUBDIRECTORY(sync) IF (TD_SYNC)
ADD_SUBDIRECTORY(sync)
ENDIF ()
ADD_SUBDIRECTORY(balance) ADD_SUBDIRECTORY(balance)
ADD_SUBDIRECTORY(mnode) ADD_SUBDIRECTORY(mnode)
ADD_SUBDIRECTORY(vnode) ADD_SUBDIRECTORY(vnode)

View File

@ -232,8 +232,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) { } else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
assert(pInfo->pDCLInfo->nTokens == 1); assert(pInfo->pDCLInfo->nTokens == 1);
if (tscSetTableFullName(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) { code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); if(code != TSDB_CODE_SUCCESS) {
return code;
} }
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) { } else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
pzName->n = strdequote(pzName->z); pzName->n = strdequote(pzName->z);
@ -348,8 +349,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: { case TSDB_SQL_DESCRIBE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0]; SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg2 = "table name is too long";
const char* msg1 = "invalid table name"; const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
@ -710,7 +711,9 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
} }
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) { int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
const char* msg = "name too long"; const char* msg1 = "name too long";
const char* msg2 = "invalid db name";
const char *msg = msg1;
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -728,16 +731,14 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa
} else { // get current DB name first, then set it into path } else { // get current DB name first, then set it into path
SStrToken t = {0}; SStrToken t = {0};
getCurrentDBName(pSql, &t); getCurrentDBName(pSql, &t);
if (t.n == 0) {
msg = msg2;
}
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL); code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
} }
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
} free(oldName);
if (code != TSDB_CODE_SUCCESS) {
free(oldName);
return code; return code;
} }
@ -1072,7 +1073,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
/* db name is not specified, the tableName dose not include db name */ /* db name is not specified, the tableName dose not include db name */
if (pDB != NULL) { if (pDB != NULL) {
if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) { if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) {
return TSDB_CODE_TSC_INVALID_SQL; return TSDB_CODE_TSC_INVALID_SQL;
} }
@ -1597,13 +1598,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SColumnIndex index = COLUMN_INDEX_INITIALIZER; SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pItem->pNode->pParam != NULL) { if (pItem->pNode->pParam != NULL) {
SStrToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
if (pToken->z == NULL || pToken->n == 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0];
if (pParamElem->pNode->nSQLOptr == TK_ALL) { SStrToken* pToken = &pParamElem->pNode->colInfo;
short sqlOptr = pParamElem->pNode->nSQLOptr;
if ((pToken->z == NULL || pToken->n == 0)
&& (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (sqlOptr == TK_ALL) {
// select table.* // select table.*
// check if the table name is valid or not // check if the table name is valid or not
SStrToken tmpToken = pParamElem->pNode->colInfo; SStrToken tmpToken = pParamElem->pNode->colInfo;
@ -1615,6 +1617,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
} else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
char buf[8] = {0};
int64_t val = -1;
tVariant* pVariant = &pParamElem->pNode->val;
if (pVariant->nType == TSDB_DATA_TYPE_BIGINT) {
tVariantDump(pVariant, buf, TSDB_DATA_TYPE_BIGINT, true);
val = GET_INT64_VAL(buf);
}
if (val == 1) {
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
} else {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
} else { } else {
// count the number of meters created according to the super table // count the number of meters created according to the super table
if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@ -2739,27 +2756,31 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
} }
} }
int32_t retVal = TSDB_CODE_SUCCESS;
if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) { if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) {
tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false); retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false);
} else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
if (colType == TSDB_DATA_TYPE_BINARY) {
pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE);
pColumnFilter->len = pRight->val.nLen;
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
} else if (colType == TSDB_DATA_TYPE_NCHAR) { } else if (colType == TSDB_DATA_TYPE_BINARY) {
// pRight->val.nLen + 1 is larger than the actual nchar string length pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + TSDB_NCHAR_SIZE);
pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE); pColumnFilter->len = pRight->val.nLen;
retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false); } else if (colType == TSDB_DATA_TYPE_NCHAR) {
// pRight->val.nLen + 1 is larger than the actual nchar string length
pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE);
retVal = tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
size_t len = twcslen((wchar_t*)pColumnFilter->pz);
pColumnFilter->len = len * TSDB_NCHAR_SIZE;
size_t len = twcslen((wchar_t*)pColumnFilter->pz); } else {
pColumnFilter->len = len * TSDB_NCHAR_SIZE; retVal = tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
} else {
tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
}
} }
if (retVal != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
switch (pExpr->nSQLOptr) { switch (pExpr->nSQLOptr) {
case TK_LE: case TK_LE:
pColumnFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL; pColumnFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL;
@ -4430,7 +4451,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const int32_t DEFAULT_TABLE_INDEX = 0; const int32_t DEFAULT_TABLE_INDEX = 0;
const char* msg1 = "invalid table name"; const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
const char* msg3 = "manipulation of tag available for super table"; const char* msg3 = "manipulation of tag available for super table";
const char* msg4 = "set tag value only available for table"; const char* msg4 = "set tag value only available for table";
const char* msg5 = "only support add one tag"; const char* msg5 = "only support add one tag";
@ -4463,7 +4483,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} }
if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return TSDB_CODE_TSC_INVALID_SQL;
} }
int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo); int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo);
@ -5088,7 +5108,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBIn
} }
static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
pMsg->maxTables = htonl(pCreateDb->maxTablesPerVnode); pMsg->maxTables = htonl(-1); // max tables can not be set anymore
pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize); pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize);
pMsg->totalBlocks = htonl(pCreateDb->numOfBlocks); pMsg->totalBlocks = htonl(pCreateDb->numOfBlocks);
pMsg->daysPerFile = htonl(pCreateDb->daysPerFile); pMsg->daysPerFile = htonl(pCreateDb->daysPerFile);
@ -5724,7 +5744,6 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) {
const char* msg1 = "invalid table name"; const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
@ -5745,7 +5764,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
} }
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return TSDB_CODE_TSC_INVALID_SQL;
} }
if (!validateTableColumnInfo(pFieldList, pCmd) || if (!validateTableColumnInfo(pFieldList, pCmd) ||
@ -5800,7 +5819,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
} }
if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return TSDB_CODE_TSC_INVALID_SQL;
} }
// get meter meta from mnode // get meter meta from mnode
@ -5992,7 +6011,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0));
const char* msg0 = "invalid table name"; const char* msg0 = "invalid table name";
const char* msg1 = "table name too long"; //const char* msg1 = "table name too long";
const char* msg2 = "point interpolation query needs timestamp"; const char* msg2 = "point interpolation query needs timestamp";
const char* msg5 = "fill only available for interval query"; const char* msg5 = "fill only available for interval query";
const char* msg6 = "start(end) time of query range required or time range too large"; const char* msg6 = "start(end) time of query range required or time range too large";
@ -6065,7 +6084,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz};
if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return TSDB_CODE_TSC_INVALID_SQL;
} }
tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar;

View File

@ -406,7 +406,7 @@ int doProcessSql(SSqlObj *pSql) {
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pRes->code = code; pRes->code = code;
tscQueueAsyncRes(pSql); tscQueueAsyncRes(pSql);
return pRes->code; return code;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -2013,7 +2013,8 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
return 0; return 0;
} }
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
taosCacheEmpty(tscCacheHandle); taosCacheEmpty(tscCacheHandle);
return 0; return 0;
} }

View File

@ -718,6 +718,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
// TODO: all subqueries should be freed correctly before close this connection.
void tscCloseTscObj(STscObj* pObj) { void tscCloseTscObj(STscObj* pObj) {
assert(pObj != NULL); assert(pObj != NULL);
@ -727,6 +728,7 @@ void tscCloseTscObj(STscObj* pObj) {
if (pObj->pDnodeConn != NULL) { if (pObj->pDnodeConn != NULL) {
rpcClose(pObj->pDnodeConn); rpcClose(pObj->pDnodeConn);
pObj->pDnodeConn = NULL;
} }
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, pObj->pDnodeConn); tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, pObj->pDnodeConn);

View File

@ -325,8 +325,6 @@ void tdResetKVRowBuilder(SKVRowBuilder *pBuilder);
SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
ASSERT(pBuilder->nCols == 0 || colId > pBuilder->pColIdx[pBuilder->nCols - 1].colId);
if (pBuilder->nCols >= pBuilder->tCols) { if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2; pBuilder->tCols *= 2;
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);

View File

@ -104,8 +104,12 @@ extern int32_t tsTelegrafUseFieldNum;
// mqtt // mqtt
extern int32_t tsEnableMqttModule; extern int32_t tsEnableMqttModule;
extern char tsMqttBrokerAddress[]; extern char tsMqttHostName[];
extern char tsMqttBrokerClientId[]; extern char tsMqttPort[];
extern char tsMqttUser[];
extern char tsMqttPass[];
extern char tsMqttClientId[];
extern char tsMqttTopic[];
// monitor // monitor
extern int32_t tsEnableMonitorModule; extern int32_t tsEnableMonitorModule;
@ -154,6 +158,7 @@ extern char buildinfo[];
// log // log
extern int32_t tsAsyncLog; extern int32_t tsAsyncLog;
extern int32_t tsNumOfLogLines; extern int32_t tsNumOfLogLines;
extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag; extern int32_t dDebugFlag;
extern int32_t vDebugFlag; extern int32_t vDebugFlag;
extern int32_t mDebugFlag; extern int32_t mDebugFlag;

View File

@ -137,8 +137,12 @@ int32_t tsTelegrafUseFieldNum = 0;
// mqtt // mqtt
int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default
char tsMqttBrokerAddress[128] = {0}; char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
char tsMqttBrokerClientId[128] = {0}; char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
// monitor // monitor
int32_t tsEnableMonitorModule = 1; int32_t tsEnableMonitorModule = 1;
@ -247,8 +251,11 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) { for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i; SGlobalCfg *cfg = tsGlobalConfig + i;
if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue;
int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue;
*((int32_t *)cfg->ptr) = vint; *((int32_t *)cfg->ptr) = vint;
@ -767,26 +774,36 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "mqttBrokerAddress"; cfg.option = "mqttHostName";
cfg.ptr = tsMqttBrokerAddress; cfg.ptr = tsMqttHostName;
cfg.valType = TAOS_CFG_VTYPE_STRING; cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0; cfg.minValue = 0;
cfg.maxValue = 0; cfg.maxValue = 0;
cfg.ptrLength = 126; cfg.ptrLength = TSDB_MQTT_HOSTNAME_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "mqttBrokerClientId"; cfg.option = "mqttPort";
cfg.ptr = tsMqttBrokerClientId; cfg.ptr = tsMqttPort;
cfg.valType = TAOS_CFG_VTYPE_STRING; cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0; cfg.minValue = 0;
cfg.maxValue = 0; cfg.maxValue = 0;
cfg.ptrLength = 126; cfg.ptrLength = TSDB_MQTT_PORT_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "mqttTopic";
cfg.ptr = tsMqttTopic;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = TSDB_MQTT_TOPIC_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "compressMsgSize"; cfg.option = "compressMsgSize";
cfg.ptr = &tsCompressMsgSize; cfg.ptr = &tsCompressMsgSize;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -996,12 +1013,22 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsNumOfLogLines; cfg.ptr = &tsNumOfLogLines;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10000; cfg.minValue = 1000;
cfg.maxValue = 2000000000; cfg.maxValue = 2000000000;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "logKeepDays";
cfg.ptr = &tsLogKeepDays;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 365000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "asyncLog"; cfg.option = "asyncLog";
cfg.ptr = &tsAsyncLog; cfg.ptr = &tsAsyncLog;
cfg.valType = TAOS_CFG_VTYPE_INT16; cfg.valType = TAOS_CFG_VTYPE_INT16;
@ -1270,6 +1297,9 @@ void taosInitGlobalCfg() {
} }
bool taosCheckGlobalCfg() { bool taosCheckGlobalCfg() {
char fqdn[TSDB_FQDN_LEN];
uint16_t port;
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag(); taosSetAllDebugFlag();
} }
@ -1278,17 +1308,23 @@ bool taosCheckGlobalCfg() {
taosGetFqdn(tsLocalFqdn); taosGetFqdn(tsLocalFqdn);
} }
snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%d", tsLocalFqdn, tsServerPort); snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%u", tsLocalFqdn, tsServerPort);
uInfo("localEp is: %s", tsLocalEp); uInfo("localEp is: %s", tsLocalEp);
if (tsFirst[0] == 0) { if (tsFirst[0] == 0) {
strcpy(tsFirst, tsLocalEp); strcpy(tsFirst, tsLocalEp);
} else {
taosGetFqdnPortFromEp(tsFirst, fqdn, &port);
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", fqdn, port);
} }
if (tsSecond[0] == 0) { if (tsSecond[0] == 0) {
strcpy(tsSecond, tsLocalEp); strcpy(tsSecond, tsLocalEp);
} else {
taosGetFqdnPortFromEp(tsSecond, fqdn, &port);
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", fqdn, port);
} }
taosGetSystemInfo(); taosGetSystemInfo();
tsSetLocale(); tsSetLocale();

View File

@ -0,0 +1,81 @@
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class QueryDataTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))";
statement.executeUpdate(createTableSql);
}
@Test
public void testQueryBinaryData() throws SQLException{
String insertSql = "insert into " + stbName + " values(now, 'taosda')";
System.out.println(insertSql);
statement.executeUpdate(insertSql);
String querySql = "select * from " + stbName;
ResultSet rs = statement.executeQuery(querySql);
while(rs.next()) {
String name = rs.getString(2) + "001";
System.out.println("name = " + name);
assertEquals(name, "taosda001");
}
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
}
}

View File

@ -11,10 +11,12 @@ AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX) IF (TD_LINUX)
ADD_EXECUTABLE(taosd ${SRC}) ADD_EXECUTABLE(taosd ${SRC})
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4)
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosd mnode taos_static monitor http mqtt tsdb twal vnode cJson lz4 balance sync) TARGET_LINK_LIBRARIES(taosd taos_static)
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(taosd mnode taos monitor http mqtt tsdb twal vnode cJson lz4 balance sync) TARGET_LINK_LIBRARIES(taosd taos)
ENDIF () ENDIF ()
IF (TD_ACCOUNT) IF (TD_ACCOUNT)
@ -25,6 +27,14 @@ IF (TD_LINUX)
TARGET_LINK_LIBRARIES(taosd grant) TARGET_LINK_LIBRARIES(taosd grant)
ENDIF () ENDIF ()
IF (TD_MQTT)
TARGET_LINK_LIBRARIES(taosd mqtt)
ENDIF ()
IF (TD_SYNC)
TARGET_LINK_LIBRARIES(taosd balance sync)
ENDIF ()
SET(PREPARE_ENV_CMD "prepare_env_cmd") SET(PREPARE_ENV_CMD "prepare_env_cmd")
SET(PREPARE_ENV_TARGET "prepare_env_target") SET(PREPARE_ENV_TARGET "prepare_env_target")
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}

View File

@ -611,7 +611,7 @@ static bool dnodeReadMnodeInfos() {
} }
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
cJSON* nodeInfo = cJSON_GetArrayItem(nodeInfos, i); cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
if (nodeInfo == NULL) continue; if (nodeInfo == NULL) continue;
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
@ -627,7 +627,7 @@ static bool dnodeReadMnodeInfos() {
goto PARSE_OVER; goto PARSE_OVER;
} }
strncpy(tsDMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN); strncpy(tsDMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
} }
ret = true; ret = true;

View File

@ -62,6 +62,7 @@ static void dnodeAllocModules() {
dnodeSetModuleStatus(TSDB_MOD_HTTP); dnodeSetModuleStatus(TSDB_MOD_HTTP);
} }
#ifdef _MQTT
tsModule[TSDB_MOD_MQTT].enable = (tsEnableMqttModule == 1); tsModule[TSDB_MOD_MQTT].enable = (tsEnableMqttModule == 1);
tsModule[TSDB_MOD_MQTT].name = "mqtt"; tsModule[TSDB_MOD_MQTT].name = "mqtt";
tsModule[TSDB_MOD_MQTT].initFp = mqttInitSystem; tsModule[TSDB_MOD_MQTT].initFp = mqttInitSystem;
@ -71,6 +72,7 @@ static void dnodeAllocModules() {
if (tsEnableMqttModule) { if (tsEnableMqttModule) {
dnodeSetModuleStatus(TSDB_MOD_MQTT); dnodeSetModuleStatus(TSDB_MOD_MQTT);
} }
#endif
tsModule[TSDB_MOD_MONITOR].enable = (tsEnableMonitorModule == 1); tsModule[TSDB_MOD_MONITOR].enable = (tsEnableMonitorModule == 1);
tsModule[TSDB_MOD_MONITOR].name = "monitor"; tsModule[TSDB_MOD_MONITOR].name = "monitor";

View File

@ -154,15 +154,15 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
rpcMsg.contLen = sizeof(SDMAuthMsg); rpcMsg.contLen = sizeof(SDMAuthMsg);
rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH; rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH;
dDebug("user:%s, send auth msg to mnode", user); dDebug("user:%s, send auth msg to mnodes", user);
SRpcMsg rpcRsp = {0}; SRpcMsg rpcRsp = {0};
dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp); dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp);
if (rpcRsp.code != 0) { if (rpcRsp.code != 0) {
dError("user:%s, auth msg received from mnode, error:%s", user, tstrerror(rpcRsp.code)); dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code));
} else { } else {
SDMAuthRsp *pRsp = rpcRsp.pCont; SDMAuthRsp *pRsp = rpcRsp.pCont;
dDebug("user:%s, auth msg received from mnode", user); dDebug("user:%s, auth msg received from mnodes", user);
memcpy(secret, pRsp->secret, TSDB_KEY_LEN); memcpy(secret, pRsp->secret, TSDB_KEY_LEN);
memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN); memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN);
*spi = pRsp->spi; *spi = pRsp->spi;

View File

@ -272,6 +272,13 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_SHOW_SQL_LEN 64 #define TSDB_SHOW_SQL_LEN 64
#define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_MQTT_HOSTNAME_LEN 64
#define TSDB_MQTT_PORT_LEN 8
#define TSDB_MQTT_USER_LEN 24
#define TSDB_MQTT_PASS_LEN 24
#define TSDB_MQTT_TOPIC_LEN 64
#define TSDB_MQTT_CLIENT_ID_LEN 32
#define TSDB_METER_STATE_OFFLINE 0 #define TSDB_METER_STATE_OFFLINE 0
#define TSDB_METER_STATE_ONLLINE 1 #define TSDB_METER_STATE_ONLLINE 1

View File

@ -19,11 +19,11 @@
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include <stdint.h>
int32_t mqttInitSystem(); int32_t mqttInitSystem();
int32_t mqttStartSystem(); int32_t mqttStartSystem();
void mqttStopSystem(); void mqttStopSystem();
void mqttCleanUpSystem(); void mqttCleanUpSystem();
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -38,6 +38,10 @@ extern "C" {
#define TSDB_STATUS_COMMIT_START 1 #define TSDB_STATUS_COMMIT_START 1
#define TSDB_STATUS_COMMIT_OVER 2 #define TSDB_STATUS_COMMIT_OVER 2
// TSDB STATE DEFINITION
#define TSDB_STATE_OK 0x0
#define TSDB_STATE_BAD_FILE 0x1
// --------- TSDB APPLICATION HANDLE DEFINITION // --------- TSDB APPLICATION HANDLE DEFINITION
typedef struct { typedef struct {
void *appH; void *appH;
@ -80,6 +84,7 @@ int32_t tsdbDropRepo(char *rootDir);
TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH); TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH);
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit); void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg); int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg);
int tsdbGetState(TSDB_REPO_T *repo);
// --------- TSDB TABLE DEFINITION // --------- TSDB TABLE DEFINITION
typedef struct { typedef struct {

View File

@ -101,127 +101,126 @@
#define TK_CONNS 83 #define TK_CONNS 83
#define TK_STATE 84 #define TK_STATE 84
#define TK_KEEP 85 #define TK_KEEP 85
#define TK_MAXTABLES 86 #define TK_CACHE 86
#define TK_CACHE 87 #define TK_REPLICA 87
#define TK_REPLICA 88 #define TK_QUORUM 88
#define TK_QUORUM 89 #define TK_DAYS 89
#define TK_DAYS 90 #define TK_MINROWS 90
#define TK_MINROWS 91 #define TK_MAXROWS 91
#define TK_MAXROWS 92 #define TK_BLOCKS 92
#define TK_BLOCKS 93 #define TK_CTIME 93
#define TK_CTIME 94 #define TK_WAL 94
#define TK_WAL 95 #define TK_FSYNC 95
#define TK_FSYNC 96 #define TK_COMP 96
#define TK_COMP 97 #define TK_PRECISION 97
#define TK_PRECISION 98 #define TK_LP 98
#define TK_LP 99 #define TK_RP 99
#define TK_RP 100 #define TK_TAGS 100
#define TK_TAGS 101 #define TK_USING 101
#define TK_USING 102 #define TK_AS 102
#define TK_AS 103 #define TK_COMMA 103
#define TK_COMMA 104 #define TK_NULL 104
#define TK_NULL 105 #define TK_SELECT 105
#define TK_SELECT 106 #define TK_UNION 106
#define TK_UNION 107 #define TK_ALL 107
#define TK_ALL 108 #define TK_FROM 108
#define TK_FROM 109 #define TK_VARIABLE 109
#define TK_VARIABLE 110 #define TK_INTERVAL 110
#define TK_INTERVAL 111 #define TK_FILL 111
#define TK_FILL 112 #define TK_SLIDING 112
#define TK_SLIDING 113 #define TK_ORDER 113
#define TK_ORDER 114 #define TK_BY 114
#define TK_BY 115 #define TK_ASC 115
#define TK_ASC 116 #define TK_DESC 116
#define TK_DESC 117 #define TK_GROUP 117
#define TK_GROUP 118 #define TK_HAVING 118
#define TK_HAVING 119 #define TK_LIMIT 119
#define TK_LIMIT 120 #define TK_OFFSET 120
#define TK_OFFSET 121 #define TK_SLIMIT 121
#define TK_SLIMIT 122 #define TK_SOFFSET 122
#define TK_SOFFSET 123 #define TK_WHERE 123
#define TK_WHERE 124 #define TK_NOW 124
#define TK_NOW 125 #define TK_RESET 125
#define TK_RESET 126 #define TK_QUERY 126
#define TK_QUERY 127 #define TK_ADD 127
#define TK_ADD 128 #define TK_COLUMN 128
#define TK_COLUMN 129 #define TK_TAG 129
#define TK_TAG 130 #define TK_CHANGE 130
#define TK_CHANGE 131 #define TK_SET 131
#define TK_SET 132 #define TK_KILL 132
#define TK_KILL 133 #define TK_CONNECTION 133
#define TK_CONNECTION 134 #define TK_STREAM 134
#define TK_STREAM 135 #define TK_COLON 135
#define TK_COLON 136 #define TK_ABORT 136
#define TK_ABORT 137 #define TK_AFTER 137
#define TK_AFTER 138 #define TK_ATTACH 138
#define TK_ATTACH 139 #define TK_BEFORE 139
#define TK_BEFORE 140 #define TK_BEGIN 140
#define TK_BEGIN 141 #define TK_CASCADE 141
#define TK_CASCADE 142 #define TK_CLUSTER 142
#define TK_CLUSTER 143 #define TK_CONFLICT 143
#define TK_CONFLICT 144 #define TK_COPY 144
#define TK_COPY 145 #define TK_DEFERRED 145
#define TK_DEFERRED 146 #define TK_DELIMITERS 146
#define TK_DELIMITERS 147 #define TK_DETACH 147
#define TK_DETACH 148 #define TK_EACH 148
#define TK_EACH 149 #define TK_END 149
#define TK_END 150 #define TK_EXPLAIN 150
#define TK_EXPLAIN 151 #define TK_FAIL 151
#define TK_FAIL 152 #define TK_FOR 152
#define TK_FOR 153 #define TK_IGNORE 153
#define TK_IGNORE 154 #define TK_IMMEDIATE 154
#define TK_IMMEDIATE 155 #define TK_INITIALLY 155
#define TK_INITIALLY 156 #define TK_INSTEAD 156
#define TK_INSTEAD 157 #define TK_MATCH 157
#define TK_MATCH 158 #define TK_KEY 158
#define TK_KEY 159 #define TK_OF 159
#define TK_OF 160 #define TK_RAISE 160
#define TK_RAISE 161 #define TK_REPLACE 161
#define TK_REPLACE 162 #define TK_RESTRICT 162
#define TK_RESTRICT 163 #define TK_ROW 163
#define TK_ROW 164 #define TK_STATEMENT 164
#define TK_STATEMENT 165 #define TK_TRIGGER 165
#define TK_TRIGGER 166 #define TK_VIEW 166
#define TK_VIEW 167 #define TK_COUNT 167
#define TK_COUNT 168 #define TK_SUM 168
#define TK_SUM 169 #define TK_AVG 169
#define TK_AVG 170 #define TK_MIN 170
#define TK_MIN 171 #define TK_MAX 171
#define TK_MAX 172 #define TK_FIRST 172
#define TK_FIRST 173 #define TK_LAST 173
#define TK_LAST 174 #define TK_TOP 174
#define TK_TOP 175 #define TK_BOTTOM 175
#define TK_BOTTOM 176 #define TK_STDDEV 176
#define TK_STDDEV 177 #define TK_PERCENTILE 177
#define TK_PERCENTILE 178 #define TK_APERCENTILE 178
#define TK_APERCENTILE 179 #define TK_LEASTSQUARES 179
#define TK_LEASTSQUARES 180 #define TK_HISTOGRAM 180
#define TK_HISTOGRAM 181 #define TK_DIFF 181
#define TK_DIFF 182 #define TK_SPREAD 182
#define TK_SPREAD 183 #define TK_TWA 183
#define TK_TWA 184 #define TK_INTERP 184
#define TK_INTERP 185 #define TK_LAST_ROW 185
#define TK_LAST_ROW 186 #define TK_RATE 186
#define TK_RATE 187 #define TK_IRATE 187
#define TK_IRATE 188 #define TK_SUM_RATE 188
#define TK_SUM_RATE 189 #define TK_SUM_IRATE 189
#define TK_SUM_IRATE 190 #define TK_AVG_RATE 190
#define TK_AVG_RATE 191 #define TK_AVG_IRATE 191
#define TK_AVG_IRATE 192 #define TK_TBID 192
#define TK_TBID 193 #define TK_SEMI 193
#define TK_SEMI 194 #define TK_NONE 194
#define TK_NONE 195 #define TK_PREV 195
#define TK_PREV 196 #define TK_LINEAR 196
#define TK_LINEAR 197 #define TK_IMPORT 197
#define TK_IMPORT 198 #define TK_METRIC 198
#define TK_METRIC 199 #define TK_TBNAME 199
#define TK_TBNAME 200 #define TK_JOIN 200
#define TK_JOIN 201 #define TK_METRICS 201
#define TK_METRICS 202 #define TK_STABLE 202
#define TK_STABLE 203 #define TK_INSERT 203
#define TK_INSERT 204 #define TK_INTO 204
#define TK_INTO 205 #define TK_VALUES 205
#define TK_VALUES 206
#define TK_SPACE 300 #define TK_SPACE 300

View File

@ -409,7 +409,7 @@ void set_terminal_mode() {
} }
} }
void get_history_path(char *history) { sprintf(history, "%s/%s", getpwuid(getuid())->pw_dir, HISTORY_FILE); } void get_history_path(char *history) { sprintf(history, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void clearScreen(int ecmd_pos, int cursor_pos) { void clearScreen(int ecmd_pos, int cursor_pos) {
struct winsize w; struct winsize w;

View File

@ -211,8 +211,8 @@ static int32_t mnodeCreateRootAcct() {
strcpy(pAcct->user, TSDB_DEFAULT_USER); strcpy(pAcct->user, TSDB_DEFAULT_USER);
taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass); taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass);
pAcct->cfg = (SAcctCfg){ pAcct->cfg = (SAcctCfg){
.maxUsers = 10, .maxUsers = 128,
.maxDbs = 64, .maxDbs = 128,
.maxTimeSeries = INT32_MAX, .maxTimeSeries = INT32_MAX,
.maxConnections = 1024, .maxConnections = 1024,
.maxStreams = 1000, .maxStreams = 1000,

View File

@ -242,6 +242,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
#if 0
if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) { if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) {
mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep); mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, pCfg->daysToKeep);
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
@ -251,6 +252,7 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2); mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2);
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
#endif
if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) { if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) {
mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock, mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock,
@ -310,6 +312,13 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
#ifndef _SYNC
if (pCfg->replications != 1) {
mError("invalid db option replications:%d can only be 1 in this version", pCfg->replications);
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
#endif
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -72,12 +72,16 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
pDnode->lastAccess = tsAccessSquence; pDnode->lastAccess = tsAccessSquence;
} }
mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) { static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj; SDnodeObj *pDnode = pOper->pObj;
#ifndef _SYNC
mnodeDropAllDnodeVgroups(pDnode);
#endif
mnodeDropMnodeLocal(pDnode->dnodeId); mnodeDropMnodeLocal(pDnode->dnodeId);
balanceAsyncNotify(); balanceAsyncNotify();
@ -585,7 +589,11 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
mInfo("dnode:%d, start to drop it", pDnode->dnodeId); mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
#ifndef _SYNC
int32_t code = mnodeDropDnode(pDnode, pMsg);
#else
int32_t code = balanceDropDnode(pDnode); int32_t code = balanceDropDnode(pDnode);
#endif
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
return code; return code;
} }
@ -1043,3 +1051,59 @@ static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole) {
} }
} }
#ifndef _SYNC
int32_t balanceInit() { return TSDB_CODE_SUCCESS; }
void balanceCleanUp() {}
void balanceAsyncNotify() {}
void balanceSyncNotify() {}
void balanceReset() {}
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; }
char* syncRole[] = {
"offline",
"unsynced",
"syncing",
"slave",
"master"
};
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
void * pIter = NULL;
SDnodeObj *pDnode = NULL;
SDnodeObj *pSelDnode = NULL;
float vnodeUsage = 1000.0;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (pDnode->numOfCores > 0 && pDnode->openVnodes < TSDB_MAX_VNODES) {
float openVnodes = pDnode->openVnodes;
if (pDnode->isMgmt) openVnodes += tsMnodeEqualVnodeNum;
float usage = openVnodes / pDnode->numOfCores;
if (usage <= vnodeUsage) {
pSelDnode = pDnode;
vnodeUsage = usage;
}
}
mnodeDecDnodeRef(pDnode);
}
sdbFreeIter(pIter);
if (pSelDnode == NULL) {
mError("failed to alloc vnode to vgroup");
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
}
pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId;
pVgroup->vnodeGid[0].pDnode = pSelDnode;
mDebug("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes);
return TSDB_CODE_SUCCESS;
}
#endif

View File

@ -68,6 +68,7 @@ static int32_t mnodeMnodeActionInsert(SSdbOper *pOper) {
pDnode->isMgmt = true; pDnode->isMgmt = true;
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
mInfo("mnode:%d, fqdn:%s ep:%s port:%d, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -98,8 +98,10 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
.connId = connId, .connId = connId,
.stime = taosGetTimestampMs() .stime = taosGetTimestampMs()
}; };
tstrncpy(connObj.user, user, sizeof(connObj.user)); tstrncpy(connObj.user, user, sizeof(connObj.user));
connObj.lastAccess = connObj.stime;
SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME * 1000); SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME * 1000);
mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
@ -244,6 +246,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
if (pConnObj->lastAccess < pConnObj->stime) pConnObj->lastAccess = pConnObj->stime;
*(int64_t *)pWrite = pConnObj->lastAccess; *(int64_t *)pWrite = pConnObj->lastAccess;
cols++; cols++;

View File

@ -612,8 +612,8 @@ static int sdbWrite(void *param, void *data, int type) {
} else if (action == SDB_ACTION_DELETE) { } else if (action == SDB_ACTION_DELETE) {
void *pRow = sdbGetRowMeta(pTable, pHead->cont); void *pRow = sdbGetRowMeta(pTable, pHead->cont);
if (pRow == NULL) { if (pRow == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName, sdbDebug("table:%s, object:%s not exist in hash, ignore delete action", pTable->tableName,
pHead->cont); sdbGetKeyStr(pTable, pHead->cont));
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSdbOper oper = {.table = pTable, .pObj = pRow}; SSdbOper oper = {.table = pTable, .pObj = pRow};
@ -621,8 +621,8 @@ static int sdbWrite(void *param, void *data, int type) {
} else if (action == SDB_ACTION_UPDATE) { } else if (action == SDB_ACTION_UPDATE) {
void *pRow = sdbGetRowMeta(pTable, pHead->cont); void *pRow = sdbGetRowMeta(pTable, pHead->cont);
if (pRow == NULL) { if (pRow == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName, sdbDebug("table:%s, object:%s not exist in hash, ignore update action", pTable->tableName,
pHead->cont); sdbGetKeyStr(pTable, pHead->cont));
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};

View File

@ -1711,14 +1711,20 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
mnodeDestroyChildTable(pTable); mnodeDestroyChildTable(pTable);
return TSDB_CODE_MND_INVALID_TABLE_NAME; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} }
pTable->suid = pMsg->pSTable->uid; pTable->suid = pMsg->pSTable->uid;
pTable->uid = (((uint64_t)pTable->vgId) << 40) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 16) + pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 24) +
(sdbGetVersion() & ((1ul << 16) - 1ul)); ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pTable->superTable = pMsg->pSTable; pTable->superTable = pMsg->pSTable;
} else { } else {
pTable->uid = (((uint64_t)pTable->vgId) << 40) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 16) + if (pTable->info.type == TSDB_SUPER_TABLE) {
(sdbGetVersion() & ((1ul << 16) - 1ul)); int64_t us = taosGetTimestampUs();
pTable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
} else {
pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 24) +
((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
}
pTable->sversion = 0; pTable->sversion = 0;
pTable->numOfColumns = htons(pCreate->numOfColumns); pTable->numOfColumns = htons(pCreate->numOfColumns);
pTable->sqlLen = htons(pCreate->sqlLen); pTable->sqlLen = htons(pCreate->sqlLen);

View File

@ -581,7 +581,7 @@ void mnodeDropAllUsers(SAcctObj *pAcct) {
int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, char *ckey) { int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, char *ckey) {
if (!sdbIsMaster()) { if (!sdbIsMaster()) {
*secret = 0; *secret = 0;
mDebug("user:%s, failed to auth user, reason:%s", user, tstrerror(TSDB_CODE_APP_NOT_READY)); mDebug("user:%s, failed to auth user, mnode is not master", user);
return TSDB_CODE_APP_NOT_READY; return TSDB_CODE_APP_NOT_READY;
} }

View File

@ -36,7 +36,7 @@ extern "C" {
#include "osLinux32.h" #include "osLinux32.h"
#endif #endif
#ifdef _TD_ALPINE #ifdef _ALPINE
#include "osAlpine.h" #include "osAlpine.h"
#endif #endif

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef TDENGINE_OS_LINUX64_H #ifndef TDENGINE_OS_ALPINE_H
#define TDENGINE_OS_LINUX64_H #define TDENGINE_OS_ALPINE_H
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {

View File

@ -24,6 +24,7 @@ extern "C" {
void taosRemoveDir(char *rootDir); void taosRemoveDir(char *rootDir);
int taosMkDir(const char *pathname, mode_t mode); int taosMkDir(const char *pathname, mode_t mode);
void taosRename(char* oldName, char *newName); void taosRename(char* oldName, char *newName);
void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -75,7 +75,9 @@ extern "C" {
#include <fcntl.h> #include <fcntl.h>
#include <sys/utsname.h> #include <sys/utsname.h>
#include <sys/resource.h> #include <sys/resource.h>
#ifndef _ALPINE
#include <error.h> #include <error.h>
#endif
#include <linux/sysctl.h> #include <linux/sysctl.h>
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -39,6 +39,7 @@
#include <Winsock2.h> #include <Winsock2.h>
#include <time.h> #include <time.h>
#include <inttypes.h> #include <inttypes.h>
#include <conio.h>
#include "msvcProcess.h" #include "msvcProcess.h"
#include "msvcDirect.h" #include "msvcDirect.h"
#include "msvcFcntl.h" #include "msvcFcntl.h"
@ -58,8 +59,6 @@ extern "C" {
int32_t BUILDIN_CTZL(uint64_t val); int32_t BUILDIN_CTZL(uint64_t val);
int32_t BUILDIN_CTZ(uint32_t val); int32_t BUILDIN_CTZ(uint32_t val);
#define TAOS_OS_FUNC_DIR
#define TAOS_OS_FUNC_FILE #define TAOS_OS_FUNC_FILE
#define TAOS_OS_FUNC_FILE_ISREG #define TAOS_OS_FUNC_FILE_ISREG
#define TAOS_OS_FUNC_FILE_ISDIR #define TAOS_OS_FUNC_FILE_ISDIR

View File

@ -18,8 +18,6 @@
#include "tglobal.h" #include "tglobal.h"
#include "tulog.h" #include "tulog.h"
#ifndef TAOS_OS_FUNC_DIR
void taosRemoveDir(char *rootDir) { void taosRemoveDir(char *rootDir) {
DIR *dir = opendir(rootDir); DIR *dir = opendir(rootDir);
if (dir == NULL) return; if (dir == NULL) return;
@ -51,18 +49,54 @@ int taosMkDir(const char *path, mode_t mode) {
} }
void taosRename(char* oldName, char *newName) { void taosRename(char* oldName, char *newName) {
if (0 == tsEnableVnodeBak) {
uInfo("vnode backup not enabled");
return;
}
// if newName in not empty, rename return fail. // if newName in not empty, rename return fail.
// the newName must be empty or does not exist // the newName must be empty or does not exist
if (rename(oldName, newName)) { if (rename(oldName, newName)) {
uError("%s is modify to %s fail, reason:%s", oldName, newName, strerror(errno)); uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno));
} else { } else {
uInfo("%s is modify to %s success!", oldName, newName); uInfo("successfully to rename file %s to %s", oldName, newName);
} }
} }
#endif void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) {
DIR *dir = opendir(rootDir);
if (dir == NULL) return;
int64_t sec = taosGetTimestampSec();
struct dirent *de = NULL;
while ((de = readdir(dir)) != NULL) {
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
char filename[1024];
snprintf(filename, 1023, "%s/%s", rootDir, de->d_name);
if (de->d_type & DT_DIR) {
continue;
} else {
// struct stat fState;
// if (stat(fname, &fState) < 0) {
// continue;
// }
int32_t len = (int32_t)strlen(filename);
int64_t fileSec = 0;
for (int i = len - 1; i >= 0; i--) {
if (filename[i] == '.') {
fileSec = atoll(filename + i + 1);
break;
}
}
if (fileSec <= 100) continue;
int32_t days = (int32_t)(ABS(sec - fileSec) / 86400 + 1);
if (days > keepDays) {
(void)remove(filename);
uInfo("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays);
} else {
uTrace("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays);
}
}
}
closedir(dir);
rmdir(rootDir);
}

View File

@ -579,7 +579,11 @@ void taosSetCoreDump() {
struct rlimit rlim; struct rlimit rlim;
struct rlimit rlim_new; struct rlimit rlim_new;
if (getrlimit(RLIMIT_CORE, &rlim) == 0) { if (getrlimit(RLIMIT_CORE, &rlim) == 0) {
#ifndef _ALPINE
uInfo("the old unlimited para: rlim_cur=%" PRIu64 ", rlim_max=%" PRIu64, rlim.rlim_cur, rlim.rlim_max); uInfo("the old unlimited para: rlim_cur=%" PRIu64 ", rlim_max=%" PRIu64, rlim.rlim_cur, rlim.rlim_max);
#else
uInfo("the old unlimited para: rlim_cur=%llu, rlim_max=%llu", rlim.rlim_cur, rlim.rlim_max);
#endif
rlim_new.rlim_cur = RLIM_INFINITY; rlim_new.rlim_cur = RLIM_INFINITY;
rlim_new.rlim_max = RLIM_INFINITY; rlim_new.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &rlim_new) != 0) { if (setrlimit(RLIMIT_CORE, &rlim_new) != 0) {
@ -591,7 +595,11 @@ void taosSetCoreDump() {
} }
if (getrlimit(RLIMIT_CORE, &rlim) == 0) { if (getrlimit(RLIMIT_CORE, &rlim) == 0) {
#ifndef _ALPINE
uInfo("the new unlimited para: rlim_cur=%" PRIu64 ", rlim_max=%" PRIu64, rlim.rlim_cur, rlim.rlim_max); uInfo("the new unlimited para: rlim_cur=%" PRIu64 ", rlim_max=%" PRIu64, rlim.rlim_cur, rlim.rlim_max);
#else
uInfo("the new unlimited para: rlim_cur=%llu, rlim_max=%llu", rlim.rlim_cur, rlim.rlim_max);
#endif
} }
#ifndef _TD_ARM_ #ifndef _TD_ARM_
@ -659,4 +667,4 @@ bool taosGetSystemUid(char *uid) {
return false; return false;
} }
#endif #endif

View File

@ -58,11 +58,20 @@ char *strsep(char **stringp, const char *delim) {
char *getpass(const char *prefix) { char *getpass(const char *prefix) {
static char passwd[TSDB_KEY_LEN] = {0}; static char passwd[TSDB_KEY_LEN] = {0};
memset(passwd, 0, TSDB_KEY_LEN);
printf("%s", prefix); printf("%s", prefix);
scanf("%s", passwd);
char n = getchar(); int32_t index = 0;
char ch;
while (index < TSDB_KEY_LEN) {
ch = getch();
if (ch == '\n' || ch == '\r') {
break;
} else {
passwd[index++] = ch;
}
}
return passwd; return passwd;
} }
@ -131,11 +140,11 @@ int tasoUcs4Compare(void *f1_ucs4, void *f2_ucs4, int bytes) {
} }
/* Copy memory to memory until the specified number of bytes /* Copy memory to memory until the specified number of bytes
has been copied, return pointer to following byte. has been copied, return pointer to following byte.
Overlap is NOT handled correctly. */ Overlap is NOT handled correctly. */
void *mempcpy(void *dest, const void *src, size_t len) { void *mempcpy(void *dest, const void *src, size_t len) {
return (char*)memcpy(dest, src, len) + len; return (char*)memcpy(dest, src, len) + len;
} }
/* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */ /* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */

View File

@ -3,4 +3,6 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(monitor) ADD_SUBDIRECTORY(monitor)
ADD_SUBDIRECTORY(http) ADD_SUBDIRECTORY(http)
ADD_SUBDIRECTORY(mqtt) IF (TD_MQTT)
ADD_SUBDIRECTORY(mqtt)
ENDIF ()

View File

@ -11,11 +11,12 @@ AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX) IF (TD_LINUX)
ADD_LIBRARY(http ${SRC}) ADD_LIBRARY(http ${SRC})
TARGET_LINK_LIBRARIES(http z)
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(http taos_static z) TARGET_LINK_LIBRARIES(http taos_static)
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(http taos z) TARGET_LINK_LIBRARIES(http taos)
ENDIF () ENDIF ()
IF (TD_ADMIN) IF (TD_ADMIN)

View File

@ -2,11 +2,11 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
AUX_SOURCE_DIRECTORY(./src SRC) AUX_SOURCE_DIRECTORY(./src SRC)
IF (TD_LINUX) IF (TD_LINUX)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
ADD_LIBRARY(monitor ${SRC}) ADD_LIBRARY(monitor ${SRC})
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)

View File

@ -25,6 +25,7 @@
#include "tsclient.h" #include "tsclient.h"
#include "dnode.h" #include "dnode.h"
#include "monitor.h" #include "monitor.h"
#include "taoserror.h"
#define monitorFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }} #define monitorFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }}
#define monitorError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }} #define monitorError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }}
@ -33,129 +34,159 @@
#define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} #define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} #define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }}
#define SQL_LENGTH 1024 #define SQL_LENGTH 1030
#define LOG_LEN_STR 100 #define LOG_LEN_STR 100
#define IP_LEN_STR TSDB_EP_LEN #define IP_LEN_STR TSDB_EP_LEN
#define CHECK_INTERVAL 1000 #define CHECK_INTERVAL 1000
typedef enum { typedef enum {
MONITOR_CMD_CREATE_DB, MON_CMD_CREATE_DB,
MONITOR_CMD_CREATE_TB_LOG, MON_CMD_CREATE_TB_LOG,
MONITOR_CMD_CREATE_MT_DN, MON_CMD_CREATE_MT_DN,
MONITOR_CMD_CREATE_MT_ACCT, MON_CMD_CREATE_MT_ACCT,
MONITOR_CMD_CREATE_TB_DN, MON_CMD_CREATE_TB_DN,
MONITOR_CMD_CREATE_TB_ACCT_ROOT, MON_CMD_CREATE_TB_ACCT_ROOT,
MONITOR_CMD_CREATE_TB_SLOWQUERY, MON_CMD_CREATE_TB_SLOWQUERY,
MONITOR_CMD_MAX MON_CMD_MAX
} EMonitorCommand; } EMonitorCommand;
typedef enum { typedef enum {
MONITOR_STATE_UN_INIT, MON_STATE_NOT_INIT,
MONITOR_STATE_INITIALIZING, MON_STATE_INITED
MONITOR_STATE_INITIALIZED,
MONITOR_STATE_STOPPED
} EMonitorState; } EMonitorState;
typedef struct { typedef struct {
void * conn; pthread_t thread;
void * timer; void * conn;
char ep[TSDB_EP_LEN]; char ep[TSDB_EP_LEN];
int8_t cmdIndex; int8_t cmdIndex;
int8_t state; int8_t state;
char sql[SQL_LENGTH + 1]; int8_t start; // enable/disable by mnode
void * initTimer; int8_t quiting; // taosd is quiting
void * diskTimer; char sql[SQL_LENGTH + 1];
} SMonitorConn; } SMonitorConn;
static SMonitorConn tsMonitorConn; static SMonitorConn tsMonitor = {0};
static void monitorInitConn(void *para, void *unused); static void monitorSaveSystemInfo();
static void monitorInitConnCb(void *param, TAOS_RES *result, int32_t code); static void *monitorThreadFunc(void *param);
static void monitorInitDatabase(); static void monitorBuildMonitorSql(char *sql, int32_t cmd);
static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code);
static void monitorStartTimer();
static void monitorSaveSystemInfo();
extern int32_t (*monitorStartSystemFp)(); extern int32_t (*monitorStartSystemFp)();
extern void (*monitorStopSystemFp)(); extern void (*monitorStopSystemFp)();
extern void (*monitorExecuteSQLFp)(char *sql); extern void (*monitorExecuteSQLFp)(char *sql);
static void monitorCheckDiskUsage(void *para, void *unused) {
taosGetDisk();
taosTmrReset(monitorCheckDiskUsage, CHECK_INTERVAL, NULL, tscTmr, &tsMonitorConn.diskTimer);
}
int32_t monitorInitSystem() { int32_t monitorInitSystem() {
taos_init(); if (tsMonitor.ep[0] == 0) {
taosTmrReset(monitorCheckDiskUsage, CHECK_INTERVAL, NULL, tscTmr, &tsMonitorConn.diskTimer); strcpy(tsMonitor.ep, tsLocalEp);
}
int len = strlen(tsMonitor.ep);
for (int i = 0; i < len; ++i) {
if (tsMonitor.ep[i] == ':' || tsMonitor.ep[i] == '-' || tsMonitor.ep[i] == '.') {
tsMonitor.ep[i] = '_';
}
}
pthread_attr_t thAttr;
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
if (pthread_create(&tsMonitor.thread, &thAttr, monitorThreadFunc, NULL)) {
monitorError("failed to create thread to for monitor module, reason:%s", strerror(errno));
return -1;
}
pthread_attr_destroy(&thAttr);
monitorDebug("monitor thread is launched");
monitorStartSystemFp = monitorStartSystem; monitorStartSystemFp = monitorStartSystem;
monitorStopSystemFp = monitorStopSystem; monitorStopSystemFp = monitorStopSystem;
return 0; return 0;
} }
int32_t monitorStartSystem() { int32_t monitorStartSystem() {
monitorInfo("start monitor module"); taos_init();
monitorInitSystem(); tsMonitor.start = 1;
taosTmrReset(monitorInitConn, 10, NULL, tscTmr, &tsMonitorConn.initTimer); monitorExecuteSQLFp = monitorExecuteSQL;
monitorInfo("monitor module start");
return 0; return 0;
} }
static void monitorStartSystemRetry() { static void *monitorThreadFunc(void *param) {
if (tsMonitorConn.initTimer != NULL) { monitorDebug("starting to initialize monitor module ...");
taosTmrReset(monitorInitConn, 3000, NULL, tscTmr, &tsMonitorConn.initTimer);
}
}
static void monitorInitConn(void *para, void *unused) { while (1) {
if (dnodeGetDnodeId() <= 0) { static int32_t accessTimes = 0;
monitorStartSystemRetry(); accessTimes++;
return; taosMsleep(1000);
}
monitorInfo("starting to initialize monitor service ..");
tsMonitorConn.state = MONITOR_STATE_INITIALIZING;
if (tsMonitorConn.ep[0] == 0) if (tsMonitor.quiting) {
strcpy(tsMonitorConn.ep, tsLocalEp); tsMonitor.state = MON_STATE_NOT_INIT;
monitorInfo("monitor thread will quit, for taosd is quiting");
break;
} else {
taosGetDisk();
}
int len = strlen(tsMonitorConn.ep); if (tsMonitor.start == 0) {
for (int i = 0; i < len; ++i) { continue;
if (tsMonitorConn.ep[i] == ':' || tsMonitorConn.ep[i] == '-') { }
tsMonitorConn.ep[i] = '_';
if (dnodeGetDnodeId() <= 0) {
monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module");
continue;
}
if (tsMonitor.conn == NULL) {
tsMonitor.state = MON_STATE_NOT_INIT;
tsMonitor.conn = taos_connect(NULL, "monitor", tsInternalPass, "", 0);
if (tsMonitor.conn == NULL) {
monitorError("failed to connect to database, reason:%s", tstrerror(terrno));
continue;
} else {
monitorDebug("connect to database success");
}
}
if (tsMonitor.state == MON_STATE_NOT_INIT) {
for (; tsMonitor.cmdIndex < MON_CMD_MAX; ++tsMonitor.cmdIndex) {
monitorBuildMonitorSql(tsMonitor.sql, tsMonitor.cmdIndex);
void *res = taos_query(tsMonitor.conn, tsMonitor.sql);
int code = taos_errno(res);
taos_free_result(res);
if (code != 0) {
monitorError("failed to exec sql:%s, reason:%s", tsMonitor.sql, tstrerror(code));
break;
} else {
monitorDebug("successfully to exec sql:%s", tsMonitor.sql);
}
}
if (tsMonitor.start) {
tsMonitor.state = MON_STATE_INITED;
}
}
if (tsMonitor.state == MON_STATE_INITED) {
if (accessTimes % tsMonitorInterval == 0) {
monitorSaveSystemInfo();
}
} }
} }
if (tsMonitorConn.conn == NULL) { monitorInfo("monitor thread is stopped");
taos_connect_a(NULL, "monitor", tsInternalPass, "", 0, monitorInitConnCb, &tsMonitorConn, &(tsMonitorConn.conn)); return NULL;
} else {
monitorInitDatabase();
}
} }
static void monitorInitConnCb(void *param, TAOS_RES *result, int32_t code) { static void monitorBuildMonitorSql(char *sql, int32_t cmd) {
// free it firstly in any cases.
taos_free_result(result);
if (code != TSDB_CODE_SUCCESS) {
monitorError("monitor:%p, connect to database failed, reason:%s", tsMonitorConn.conn, tstrerror(code));
taos_close(tsMonitorConn.conn);
tsMonitorConn.conn = NULL;
tsMonitorConn.state = MONITOR_STATE_UN_INIT;
monitorStartSystemRetry();
return;
}
monitorDebug("monitor:%p, connect to database success, reason:%s", tsMonitorConn.conn, tstrerror(code));
monitorInitDatabase();
}
static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
memset(sql, 0, SQL_LENGTH); memset(sql, 0, SQL_LENGTH);
if (cmd == MONITOR_CMD_CREATE_DB) { if (cmd == MON_CMD_CREATE_DB) {
snprintf(sql, SQL_LENGTH, snprintf(sql, SQL_LENGTH,
"create database if not exists %s replica 1 days 10 keep 30 cache %d " "create database if not exists %s replica 1 days 10 keep 30 cache %d "
"blocks %d maxtables 16 precision 'us'", "blocks %d precision 'us'",
tsMonitorDbName, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS); tsMonitorDbName, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS);
} else if (cmd == MONITOR_CMD_CREATE_MT_DN) { } else if (cmd == MON_CMD_CREATE_MT_DN) {
snprintf(sql, SQL_LENGTH, snprintf(sql, SQL_LENGTH,
"create table if not exists %s.dn(ts timestamp" "create table if not exists %s.dn(ts timestamp"
", cpu_taosd float, cpu_system float, cpu_cores int" ", cpu_taosd float, cpu_system float, cpu_cores int"
@ -166,10 +197,10 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
", req_http int, req_select int, req_insert int" ", req_http int, req_select int, req_insert int"
") tags (dnodeid int, fqdn binary(%d))", ") tags (dnodeid int, fqdn binary(%d))",
tsMonitorDbName, TSDB_FQDN_LEN); tsMonitorDbName, TSDB_FQDN_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_DN) { } else if (cmd == MON_CMD_CREATE_TB_DN) {
snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn%d using %s.dn tags(%d, '%s')", tsMonitorDbName, snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn%d using %s.dn tags(%d, '%s')", tsMonitorDbName,
dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp); dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp);
} else if (cmd == MONITOR_CMD_CREATE_MT_ACCT) { } else if (cmd == MON_CMD_CREATE_MT_ACCT) {
snprintf(sql, SQL_LENGTH, snprintf(sql, SQL_LENGTH,
"create table if not exists %s.acct(ts timestamp " "create table if not exists %s.acct(ts timestamp "
", currentPointsPerSecond bigint, maxPointsPerSecond bigint" ", currentPointsPerSecond bigint, maxPointsPerSecond bigint"
@ -185,15 +216,15 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
", accessState smallint" ", accessState smallint"
") tags (acctId binary(%d))", ") tags (acctId binary(%d))",
tsMonitorDbName, TSDB_USER_LEN); tsMonitorDbName, TSDB_USER_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_ACCT_ROOT) { } else if (cmd == MON_CMD_CREATE_TB_ACCT_ROOT) {
snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct_%s using %s.acct tags('%s')", tsMonitorDbName, TSDB_DEFAULT_USER, snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct_%s using %s.acct tags('%s')", tsMonitorDbName, TSDB_DEFAULT_USER,
tsMonitorDbName, TSDB_DEFAULT_USER); tsMonitorDbName, TSDB_DEFAULT_USER);
} else if (cmd == MONITOR_CMD_CREATE_TB_SLOWQUERY) { } else if (cmd == MON_CMD_CREATE_TB_SLOWQUERY) {
snprintf(sql, SQL_LENGTH, snprintf(sql, SQL_LENGTH,
"create table if not exists %s.slowquery(ts timestamp, username " "create table if not exists %s.slowquery(ts timestamp, username "
"binary(%d), created_time timestamp, time bigint, sql binary(%d))", "binary(%d), created_time timestamp, time bigint, sql binary(%d))",
tsMonitorDbName, TSDB_TABLE_FNAME_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); tsMonitorDbName, TSDB_TABLE_FNAME_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_LOG) { } else if (cmd == MON_CMD_CREATE_TB_LOG) {
snprintf(sql, SQL_LENGTH, snprintf(sql, SQL_LENGTH,
"create table if not exists %s.log(ts timestamp, level tinyint, " "create table if not exists %s.log(ts timestamp, level tinyint, "
"content binary(%d), ipaddr binary(%d))", "content binary(%d), ipaddr binary(%d))",
@ -203,75 +234,22 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
sql[SQL_LENGTH] = 0; sql[SQL_LENGTH] = 0;
} }
static void monitorInitDatabase() {
if (tsMonitorConn.cmdIndex < MONITOR_CMD_MAX) {
dnodeBuildMonitorSql(tsMonitorConn.sql, tsMonitorConn.cmdIndex);
taos_query_a(tsMonitorConn.conn, tsMonitorConn.sql, monitorInitDatabaseCb, NULL);
} else {
tsMonitorConn.state = MONITOR_STATE_INITIALIZED;
monitorExecuteSQLFp = monitorExecuteSQL;
monitorInfo("monitor service init success");
monitorStartTimer();
}
}
static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) {
if (code == TSDB_CODE_MND_TABLE_ALREADY_EXIST || code == TSDB_CODE_MND_DB_ALREADY_EXIST || code >= 0) {
monitorDebug("monitor:%p, sql success, reason:%s, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
if (tsMonitorConn.cmdIndex == MONITOR_CMD_CREATE_TB_LOG) {
monitorInfo("dnode:%s is started", tsLocalEp);
}
tsMonitorConn.cmdIndex++;
monitorInitDatabase();
} else {
monitorError("monitor:%p, sql failed, reason:%s, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
tsMonitorConn.state = MONITOR_STATE_UN_INIT;
monitorStartSystemRetry();
}
taos_free_result(result);
}
void monitorStopSystem() { void monitorStopSystem() {
if (tsMonitorConn.state == MONITOR_STATE_STOPPED) return; tsMonitor.start = 0;
tsMonitorConn.state = MONITOR_STATE_STOPPED; tsMonitor.state = MON_STATE_NOT_INIT;
monitorExecuteSQLFp = NULL; monitorExecuteSQLFp = NULL;
monitorInfo("monitor module stopped");
monitorInfo("monitor module is stopped");
if (tsMonitorConn.initTimer != NULL) {
taosTmrStopA(&(tsMonitorConn.initTimer));
}
if (tsMonitorConn.timer != NULL) {
taosTmrStopA(&(tsMonitorConn.timer));
}
if (tsMonitorConn.conn != NULL) {
taos_close(tsMonitorConn.conn);
tsMonitorConn.conn = NULL;
}
} }
void monitorCleanUpSystem() { void monitorCleanUpSystem() {
tsMonitor.quiting = 1;
monitorStopSystem(); monitorStopSystem();
monitorInfo("monitor module cleanup"); pthread_join(tsMonitor.thread, NULL);
} if (tsMonitor.conn != NULL) {
taos_close(tsMonitor.conn);
static void monitorStartTimer() { tsMonitor.conn = NULL;
taosTmrReset(monitorSaveSystemInfo, tsMonitorInterval * 1000, NULL, tscTmr, &tsMonitorConn.timer);
}
static void dnodeMontiorLogCallback(void *param, TAOS_RES *result, int32_t code) {
int32_t c = taos_errno(result);
if (c != TSDB_CODE_SUCCESS) {
monitorError("monitor:%p, save %s failed, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(c));
} else {
int32_t rows = taos_affected_rows(result);
monitorDebug("monitor:%p, save %s succ, rows:%d", tsMonitorConn.conn, (char *)param, rows);
} }
monitorInfo("monitor module is cleaned up");
taos_free_result(result);
} }
// unit is MB // unit is MB
@ -279,13 +257,13 @@ static int32_t monitorBuildMemorySql(char *sql) {
float sysMemoryUsedMB = 0; float sysMemoryUsedMB = 0;
bool suc = taosGetSysMemory(&sysMemoryUsedMB); bool suc = taosGetSysMemory(&sysMemoryUsedMB);
if (!suc) { if (!suc) {
monitorError("monitor:%p, get sys memory info failed.", tsMonitorConn.conn); monitorDebug("failed to get sys memory info");
} }
float procMemoryUsedMB = 0; float procMemoryUsedMB = 0;
suc = taosGetProcMemory(&procMemoryUsedMB); suc = taosGetProcMemory(&procMemoryUsedMB);
if (!suc) { if (!suc) {
monitorError("monitor:%p, get proc memory info failed.", tsMonitorConn.conn); monitorDebug("failed to get proc memory info");
} }
return sprintf(sql, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB); return sprintf(sql, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB);
@ -296,11 +274,11 @@ static int32_t monitorBuildCpuSql(char *sql) {
float sysCpuUsage = 0, procCpuUsage = 0; float sysCpuUsage = 0, procCpuUsage = 0;
bool suc = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage); bool suc = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage);
if (!suc) { if (!suc) {
monitorError("monitor:%p, get cpu usage failed.", tsMonitorConn.conn); monitorDebug("failed to get cpu usage");
} }
if (sysCpuUsage <= procCpuUsage) { if (sysCpuUsage <= procCpuUsage) {
sysCpuUsage = procCpuUsage + (float)0.1; sysCpuUsage = procCpuUsage + 0.1f;
} }
return sprintf(sql, ", %f, %f, %d", procCpuUsage, sysCpuUsage, tsNumOfCores); return sprintf(sql, ", %f, %f, %d", procCpuUsage, sysCpuUsage, tsNumOfCores);
@ -316,14 +294,14 @@ static int32_t monitorBuildBandSql(char *sql) {
float bandSpeedKb = 0; float bandSpeedKb = 0;
bool suc = taosGetBandSpeed(&bandSpeedKb); bool suc = taosGetBandSpeed(&bandSpeedKb);
if (!suc) { if (!suc) {
monitorError("monitor:%p, get bandwidth speed failed.", tsMonitorConn.conn); monitorDebug("failed to get bandwidth speed");
} }
return sprintf(sql, ", %f", bandSpeedKb); return sprintf(sql, ", %f", bandSpeedKb);
} }
static int32_t monitorBuildReqSql(char *sql) { static int32_t monitorBuildReqSql(char *sql) {
SDnodeStatisInfo info = dnodeGetStatisInfo(); SDnodeStatisInfo info = dnodeGetStatisInfo();
return sprintf(sql, ", %d, %d, %d)", info.httpReqNum, info.queryReqNum, info.submitReqNum); return sprintf(sql, ", %d, %d, %d)", info.httpReqNum, info.queryReqNum, info.submitReqNum);
} }
@ -331,20 +309,15 @@ static int32_t monitorBuildIoSql(char *sql) {
float readKB = 0, writeKB = 0; float readKB = 0, writeKB = 0;
bool suc = taosGetProcIO(&readKB, &writeKB); bool suc = taosGetProcIO(&readKB, &writeKB);
if (!suc) { if (!suc) {
monitorError("monitor:%p, get io info failed.", tsMonitorConn.conn); monitorDebug("failed to get io info");
} }
return sprintf(sql, ", %f, %f", readKB, writeKB); return sprintf(sql, ", %f, %f", readKB, writeKB);
} }
static void monitorSaveSystemInfo() { static void monitorSaveSystemInfo() {
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) {
monitorStartTimer();
return;
}
int64_t ts = taosGetTimestampUs(); int64_t ts = taosGetTimestampUs();
char * sql = tsMonitorConn.sql; char * sql = tsMonitor.sql;
int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts); int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts);
pos += monitorBuildCpuSql(sql + pos); pos += monitorBuildCpuSql(sql + pos);
@ -354,16 +327,31 @@ static void monitorSaveSystemInfo() {
pos += monitorBuildIoSql(sql + pos); pos += monitorBuildIoSql(sql + pos);
pos += monitorBuildReqSql(sql + pos); pos += monitorBuildReqSql(sql + pos);
monitorDebug("monitor:%p, save system info, sql:%s", tsMonitorConn.conn, sql); void *res = taos_query(tsMonitor.conn, tsMonitor.sql);
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sys"); int code = taos_errno(res);
taos_free_result(res);
if (tsMonitorConn.timer != NULL && tsMonitorConn.state != MONITOR_STATE_STOPPED) { if (code != 0) {
monitorStartTimer(); monitorError("failed to save system info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql);
} else {
monitorDebug("successfully to save system info, sql:%s", tsMonitor.sql);
} }
} }
static void montiorExecSqlCb(void *param, TAOS_RES *result, int32_t code) {
int32_t c = taos_errno(result);
if (c != TSDB_CODE_SUCCESS) {
monitorError("save %s failed, reason:%s", (char *)param, tstrerror(c));
} else {
int32_t rows = taos_affected_rows(result);
monitorDebug("save %s succ, rows:%d", (char *)param, rows);
}
taos_free_result(result);
}
void monitorSaveAcctLog(SAcctMonitorObj *pMon) { void monitorSaveAcctLog(SAcctMonitorObj *pMon) {
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; if (tsMonitor.state != MON_STATE_INITED) return;
char sql[1024] = {0}; char sql[1024] = {0};
sprintf(sql, sprintf(sql,
@ -392,19 +380,16 @@ void monitorSaveAcctLog(SAcctMonitorObj *pMon) {
pMon->totalConns, pMon->maxConns, pMon->totalConns, pMon->maxConns,
pMon->accessState); pMon->accessState);
monitorDebug("monitor:%p, save account info, sql %s", tsMonitorConn.conn, sql); monitorDebug("save account info, sql:%s", sql);
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "account"); taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "account info");
} }
void monitorSaveLog(int32_t level, const char *const format, ...) { void monitorSaveLog(int32_t level, const char *const format, ...) {
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; if (tsMonitor.state != MON_STATE_INITED) return;
va_list argpointer; va_list argpointer;
char sql[SQL_LENGTH] = {0}; char sql[SQL_LENGTH] = {0};
int32_t max_length = SQL_LENGTH - 30; int32_t max_length = SQL_LENGTH - 30;
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return;
int32_t len = snprintf(sql, (size_t)max_length, "insert into %s.log values(%" PRId64 ", %d,'", tsMonitorDbName, int32_t len = snprintf(sql, (size_t)max_length, "insert into %s.log values(%" PRId64 ", %d,'", tsMonitorDbName,
taosGetTimestampUs(), level); taosGetTimestampUs(), level);
@ -416,12 +401,13 @@ void monitorSaveLog(int32_t level, const char *const format, ...) {
len += sprintf(sql + len, "', '%s')", tsLocalEp); len += sprintf(sql + len, "', '%s')", tsLocalEp);
sql[len++] = 0; sql[len++] = 0;
monitorDebug("monitor:%p, save log, sql: %s", tsMonitorConn.conn, sql); monitorDebug("save log, sql: %s", sql);
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "log"); taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "log");
} }
void monitorExecuteSQL(char *sql) { void monitorExecuteSQL(char *sql) {
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return; if (tsMonitor.state != MON_STATE_INITED) return;
monitorDebug("monitor:%p, execute sql: %s", tsMonitorConn.conn, sql);
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sql"); monitorDebug("execute sql:%s", sql);
taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "sql");
} }

View File

@ -2,21 +2,19 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/include)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/examples/templates)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX) IF (TD_LINUX)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/include)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MQTT-C/examples/templates)
ADD_LIBRARY(mqtt ${SRC}) ADD_LIBRARY(mqtt ${SRC})
TARGET_LINK_LIBRARIES(mqtt cJson mqttc)
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(mqtt taos_static cJson mqttc) TARGET_LINK_LIBRARIES(mqtt taos_static)
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(mqtt taos cJson mqttc) TARGET_LINK_LIBRARIES(mqtt taos)
ENDIF ()
IF (TD_ADMIN)
TARGET_LINK_LIBRARIES(mqtt admin cJson)
ENDIF () ENDIF ()
ENDIF () ENDIF ()

View File

@ -23,11 +23,12 @@ extern "C" {
* @file * @file
* A simple subscriber program that performs automatic reconnections. * A simple subscriber program that performs automatic reconnections.
*/ */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "mqtt.h" #include "mqtt.h"
#include "taos.h"
#define QOS 1
#define TIMEOUT 10000L
#define MQTT_SEND_BUF_SIZE 102400
#define MQTT_RECV_BUF_SIZE 102400
/** /**
* @brief A structure that I will use to keep track of some data needed * @brief A structure that I will use to keep track of some data needed
@ -36,18 +37,12 @@ extern "C" {
* An instance of this struct will be created in my \c main(). Then, whenever * An instance of this struct will be created in my \c main(). Then, whenever
* \ref mqttReconnectClient is called, this instance will be passed. * \ref mqttReconnectClient is called, this instance will be passed.
*/ */
struct reconnect_state_t { typedef struct SMqttReconnectState {
char* hostname;
char* port;
char* topic;
char* client_id;
char* user_name;
char* password;
uint8_t* sendbuf; uint8_t* sendbuf;
size_t sendbufsz; size_t sendbufsz;
uint8_t* recvbuf; uint8_t* recvbuf;
size_t recvbufsz; size_t recvbufsz;
}; } SMqttReconnectState;
/** /**
* @brief My reconnect callback. It will reestablish the connection whenever * @brief My reconnect callback. It will reestablish the connection whenever
@ -58,7 +53,7 @@ void mqttReconnectClient(struct mqtt_client* client, void** reconnect_state_vptr
/** /**
* @brief The function will be called whenever a PUBLISH message is received. * @brief The function will be called whenever a PUBLISH message is received.
*/ */
void mqtt_PublishCallback(void** unused, struct mqtt_response_publish* published); void mqttPublishCallback(void** unused, struct mqtt_response_publish* published);
/** /**
* @brief The client's refresher. This function triggers back-end routines to * @brief The client's refresher. This function triggers back-end routines to
@ -73,12 +68,7 @@ void* mqttClientRefresher(void* client);
/** /**
* @brief Safelty closes the \p sockfd and cancels the \p client_daemon before \c exit. * @brief Safelty closes the \p sockfd and cancels the \p client_daemon before \c exit.
*/ */
void mqttCleanupRes(int status, int sockfd, pthread_t* client_daemon);
void mqttCleanup(int status, int sockfd, pthread_t* client_daemon);
void mqttInitConnCb(void* param, TAOS_RES* result, int32_t code);
void mqttQueryInsertCallback(void* param, TAOS_RES* result, int32_t code);
#define QOS 1
#define TIMEOUT 10000L
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -15,11 +15,13 @@
#ifndef TDENGINE_MQTT_PLYLOAD_H #ifndef TDENGINE_MQTT_PLYLOAD_H
#define TDENGINE_MQTT_PLYLOAD_H #define TDENGINE_MQTT_PLYLOAD_H
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
char split(char str[], char delims[], char** p_p_cmd_part, int max);
char* converJsonToSql(char* json, char* _dbname, char* _tablename); char* mqttConverJsonToSql(char* json, int maxSize);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -14,52 +14,146 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "mqttPayload.h"
#include "cJSON.h"
#include "string.h"
#include "taos.h"
#include "mqttLog.h"
#include "os.h" #include "os.h"
char split(char str[], char delims[], char** p_p_cmd_part, int max) { #include "cJSON.h"
char* token = strtok(str, delims); #include "mqttLog.h"
char part_index = 0; #include "mqttPayload.h"
char** tmp_part = p_p_cmd_part;
while (token) {
*tmp_part++ = token;
token = strtok(NULL, delims);
part_index++;
if (part_index >= max) break;
}
return part_index;
}
char* converJsonToSql(char* json, char* _dbname, char* _tablename) { // subscribe message like this
cJSON* jPlayload = cJSON_Parse(json);
char _names[102400] = {0}; /*
char _values[102400] = {0}; /test {
int i = 0; "timestamp": 1599121290,
int count = cJSON_GetArraySize(jPlayload); "gateway": {
for (; i < count; i++) "name": "AcuLink 810 Gateway",
{ "model": "AcuLink810-868",
cJSON* item = cJSON_GetArrayItem(jPlayload, i); "serial": "S8P20200207"
if (cJSON_Object == item->type) { },
mqttInfo("The item '%s' is not supported", item->string); "device": {
} else { "name": "Acuvim L V3 .221",
strcat(_names, item->string); "model": "Acuvim-L-V3",
if (i < count - 1) { "serial": "221",
strcat(_names, ","); "online": true,
} "readings": [
char* __value_json = cJSON_Print(item); {
strcat(_values, __value_json); "param": "Freq_Hz",
free(__value_json); "value": "59.977539",
if (i < count - 1) { "unit": "Hz"
strcat(_values, ","); },
} {
"param": "Va_V",
"value": "122.002907",
"unit": "V"
},
{
"param": "DI4",
"value": "5.000000",
"unit": ""
}
]
} }
}
*/
// send msg cmd
// mosquitto_pub -h test.mosquitto.org -t "/test" -m '{"timestamp": 1599121290,"gateway": {"name": "AcuLink 810 Gateway","model": "AcuLink810-868","serial": "S8P20200207"},"device": {"name": "Acuvim L V3 .221","model": "Acuvim-L-V3","serial": "221","online": true,"readings": [{"param": "Freq_Hz","value": "59.977539","unit": "Hz"},{"param": "Va_V","value": "122.002907","unit": "V"},{"param": "DI4","value": "5.000000","unit": ""}]}}'
/*
* This is an example, this function needs to be implemented in order to parse the json file into a sql statement
* Note that you need to create a super table and database before writing data
* In this case:
* create database mqttdb;
* create table mqttdb.devices(ts timestamp, value bigint) tags(name binary(32), model binary(32), serial binary(16), param binary(16), unit binary(16));
*/
char* mqttConverJsonToSql(char* json, int maxSize) {
// const int32_t maxSize = 10240;
maxSize *= 5;
char* sql = malloc(maxSize);
cJSON* root = cJSON_Parse(json);
if (root == NULL) {
mqttError("failed to parse msg, invalid json format");
goto MQTT_PARSE_OVER;
} }
cJSON_free(jPlayload);
int sqllen = strlen(_names) + strlen(_values) + strlen(_dbname) + strlen(_tablename) + 1024; cJSON* timestamp = cJSON_GetObjectItem(root, "timestamp");
char* _sql = calloc(1, sqllen); if (!timestamp || timestamp->type != cJSON_Number) {
sprintf(_sql, "INSERT INTO %s.%s (%s) VALUES(%s);", _dbname, _tablename, _names, _values); mqttError("failed to parse msg, timestamp not found");
return _sql; goto MQTT_PARSE_OVER;
}
cJSON* device = cJSON_GetObjectItem(root, "device");
if (!device) {
mqttError("failed to parse msg, device not found");
goto MQTT_PARSE_OVER;
}
cJSON* name = cJSON_GetObjectItem(device, "name");
if (!name || name->type != cJSON_String) {
mqttError("failed to parse msg, name not found");
goto MQTT_PARSE_OVER;
}
cJSON* model = cJSON_GetObjectItem(device, "model");
if (!model || model->type != cJSON_String) {
mqttError("failed to parse msg, model not found");
goto MQTT_PARSE_OVER;
}
cJSON* serial = cJSON_GetObjectItem(device, "serial");
if (!serial || serial->type != cJSON_String) {
mqttError("failed to parse msg, serial not found");
goto MQTT_PARSE_OVER;
}
cJSON* readings = cJSON_GetObjectItem(device, "readings");
if (!readings || readings->type != cJSON_Array) {
mqttError("failed to parse msg, readings not found");
goto MQTT_PARSE_OVER;
}
int count = cJSON_GetArraySize(readings);
if (count <= 0) {
mqttError("failed to parse msg, readings size smaller than 0");
goto MQTT_PARSE_OVER;
}
int len = snprintf(sql, maxSize, "insert into");
for (int i = 0; i < count; ++i) {
cJSON* reading = cJSON_GetArrayItem(readings, i);
if (reading == NULL) continue;
cJSON* param = cJSON_GetObjectItem(reading, "param");
if (!param || param->type != cJSON_String) {
mqttError("failed to parse msg, param not found");
goto MQTT_PARSE_OVER;
}
cJSON* value = cJSON_GetObjectItem(reading, "value");
if (!value || value->type != cJSON_String) {
mqttError("failed to parse msg, value not found");
goto MQTT_PARSE_OVER;
}
cJSON* unit = cJSON_GetObjectItem(reading, "unit");
if (!unit || unit->type != cJSON_String) {
mqttError("failed to parse msg, unit not found");
goto MQTT_PARSE_OVER;
}
len += snprintf(sql + len, maxSize - len,
" mqttdb.serial_%s_%s using mqttdb.devices tags('%s', '%s', '%s', '%s', '%s') values(%" PRId64 ", %s)",
serial->valuestring, param->valuestring, name->valuestring, model->valuestring, serial->valuestring,
param->valuestring, unit->valuestring, timestamp->valueint * 1000, value->valuestring);
}
cJSON_free(root);
return sql;
MQTT_PARSE_OVER:
cJSON_free(root);
free(sql);
return NULL;
} }

View File

@ -14,244 +14,131 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "os.h"
#include "cJSON.h"
#include "mqtt.h" #include "mqtt.h"
#include "mqttInit.h" #include "mqttInit.h"
#include "mqttLog.h" #include "mqttLog.h"
#include "mqttPayload.h" #include "mqttPayload.h"
#include "os.h" #include "tmqtt.h"
#include "posix_sockets.h" #include "posix_sockets.h"
#include "string.h"
#include "taos.h" #include "taos.h"
#include "tglobal.h" #include "tglobal.h"
#include "tmqtt.h" #include "taoserror.h"
#include "tsclient.h"
#include "tsocket.h"
#include "ttimer.h"
#include "mqttSystem.h"
struct mqtt_client mqttClient = {0};
pthread_t clientDaemonThread = {0};
void* mqttConnect=NULL;
struct reconnect_state_t recntStatus = {0};
char* topicPath=NULL;
int mttIsRuning = 1;
int32_t mqttInitSystem() { struct SMqttReconnectState tsMqttStatus = {0};
int rc = 0; struct mqtt_client tsMqttClient = {0};
#if 0 static pthread_t tsMqttClientDaemonThread = {0};
uint8_t sendbuf[2048]; static void* tsMqttConnect = NULL;
uint8_t recvbuf[1024]; static bool tsMqttIsRuning = false;
recntStatus.sendbuf = sendbuf;
recntStatus.sendbufsz = sizeof(sendbuf);
recntStatus.recvbuf = recvbuf;
recntStatus.recvbufsz = sizeof(recvbuf);
char* url = tsMqttBrokerAddress;
recntStatus.user_name = strstr(url, "@") != NULL ? strbetween(url, "//", ":") : NULL;
char * passStr = strstr(url, recntStatus.user_name);
if (passStr != NULL) {
recntStatus.password = strstr(url, "@") != NULL ? strbetween(passStr, ":", "@") : NULL;
}
if (strlen(url) == 0) { int32_t mqttInitSystem() { return 0; }
mqttDebug("mqtt module not init, url is null");
return rc;
}
if (strstr(url, "@") != NULL) {
recntStatus.hostname = strbetween(url, "@", ":");
} else if (strstr(strstr(url, "://") + 3, ":") != NULL) {
recntStatus.hostname = strbetween(url, "//", ":");
} else {
recntStatus.hostname = strbetween(url, "//", "/");
}
char* _begin_hostname = strstr(url, recntStatus.hostname);
if (_begin_hostname != NULL && strstr(_begin_hostname, ":") != NULL) {
recntStatus.port = strbetween(_begin_hostname, ":", "/");
} else {
recntStatus.port = strbetween("'1883'", "'", "'");
}
char* portStr = recntStatus.hostname;
if (_begin_hostname != NULL) {
char* colonStr = strstr(_begin_hostname, ":");
if (colonStr != NULL) {
portStr = recntStatus.port;
}
}
char* topicStr = strstr(url, portStr);
if (topicStr != NULL) {
topicPath = strbetween(topicStr, "/", "/");
char* _topic = "+/+/+/";
int _tpsize = strlen(topicPath) + strlen(_topic) + 1;
recntStatus.topic = calloc(1, _tpsize);
sprintf(recntStatus.topic, "/%s/%s", topicPath, _topic);
recntStatus.client_id = strlen(tsMqttBrokerClientId) < 3 ? tsMqttBrokerClientId : "taos_mqtt";
mqttConnect = NULL;
} else {
topicPath = NULL;
}
#endif
return rc;
}
int32_t mqttStartSystem() { int32_t mqttStartSystem() {
int rc = 0; tsMqttStatus.sendbufsz = MQTT_SEND_BUF_SIZE;
#if 0 tsMqttStatus.recvbufsz = MQTT_RECV_BUF_SIZE;
if (recntStatus.user_name != NULL && recntStatus.password != NULL) { tsMqttStatus.sendbuf = malloc(MQTT_SEND_BUF_SIZE);
mqttInfo("connecting to mqtt://%s:%s@%s:%s/%s/", recntStatus.user_name, recntStatus.password, tsMqttStatus.recvbuf = malloc(MQTT_RECV_BUF_SIZE);
recntStatus.hostname, recntStatus.port, topicPath); tsMqttIsRuning = true;
} else if (recntStatus.user_name != NULL && recntStatus.password == NULL) {
mqttInfo("connecting to mqtt://%s@%s:%s/%s/", recntStatus.user_name, recntStatus.hostname, recntStatus.port, mqtt_init_reconnect(&tsMqttClient, mqttReconnectClient, &tsMqttStatus, mqttPublishCallback);
topicPath); if (pthread_create(&tsMqttClientDaemonThread, NULL, mqttClientRefresher, &tsMqttClient)) {
mqttError("mqtt failed to start daemon.");
mqttCleanupRes(EXIT_FAILURE, -1, NULL);
return -1;
} }
mqtt_init_reconnect(&mqttClient, mqttReconnectClient, &recntStatus, mqtt_PublishCallback); mqttInfo("mqtt listening for topic:%s messages", tsMqttTopic);
if (pthread_create(&clientDaemonThread, NULL, mqttClientRefresher, &mqttClient)) { return 0;
mqttError("Failed to start client daemon.");
mqttCleanup(EXIT_FAILURE, -1, NULL);
rc = -1;
} else {
mqttInfo("listening for '%s' messages.", recntStatus.topic);
}
#endif
return rc;
} }
void mqttStopSystem() { void mqttStopSystem() {
#if 0 if (tsMqttIsRuning) {
mqttClient.error = MQTT_ERROR_SOCKET_ERROR; tsMqttIsRuning = false;
mttIsRuning = 0; tsMqttClient.error = MQTT_ERROR_SOCKET_ERROR;
usleep(300000U);
mqttCleanup(EXIT_SUCCESS, mqttClient.socketfd, &clientDaemonThread); taosMsleep(300);
mqttInfo("mqtt is stoped"); mqttCleanupRes(EXIT_SUCCESS, tsMqttClient.socketfd, &tsMqttClientDaemonThread);
#endif
mqttInfo("mqtt is stopped");
}
} }
void mqttCleanUpSystem() { void mqttCleanUpSystem() {
#if 0 mqttStopSystem();
mqttInfo("starting to cleanup mqtt");
free(recntStatus.user_name);
free(recntStatus.password);
free(recntStatus.hostname);
free(recntStatus.port);
free(recntStatus.topic);
free(topicPath);
mqttInfo("mqtt is cleaned up"); mqttInfo("mqtt is cleaned up");
#endif
} }
void mqtt_PublishCallback(void** unused, struct mqtt_response_publish* published) { void mqttPublishCallback(void** unused, struct mqtt_response_publish* published) {
/* note that published->topic_name is NOT null-terminated (here we'll change it to a c-string) */ const char* content = published->application_message;
char* topic_name = (char*)malloc(published->topic_name_size + 1); mqttDebug("receive mqtt message, size:%d", (int)published->application_message_size);
memcpy(topic_name, published->topic_name, published->topic_name_size);
topic_name[published->topic_name_size] = '\0';
mqttInfo("received publish('%s'): %s", topic_name, (const char*)published->application_message);
char _token[128] = {0};
char _dbname[128] = {0};
char _tablename[128] = {0};
if (mqttConnect == NULL) {
mqttInfo("connect database");
taos_connect_a(NULL, "_root", tsInternalPass, "", 0, mqttInitConnCb, &mqttClient, &mqttConnect);
}
if (topic_name[1]=='/' && strncmp((char*)&topic_name[1], topicPath, strlen(topicPath)) == 0) {
char* p_p_cmd_part[5] = {0};
char copystr[1024] = {0};
strncpy(copystr, topic_name, MIN(1024, published->topic_name_size));
char part_index = split(copystr, "/", p_p_cmd_part, 10);
if (part_index < 4) {
mqttError("The topic %s is't format '/path/token/dbname/table name/'. for expmle: '/taos/token/db/t'", topic_name);
} else {
strncpy(_token, p_p_cmd_part[1], 127);
strncpy(_dbname, p_p_cmd_part[2], 127);
strncpy(_tablename, p_p_cmd_part[3], 127);
mqttInfo("part count=%d,access token:%s,database name:%s, table name:%s", part_index, _token, _dbname,
_tablename);
if (mqttConnect != NULL) { if (tsMqttConnect == NULL) {
char* _sql = converJsonToSql((char*)published->application_message, _dbname, _tablename); tsMqttConnect = taos_connect(NULL, "_root", tsInternalPass, "", 0);
mqttInfo("query:%s", _sql); if (tsMqttConnect == NULL) {
taos_query_a(mqttConnect, _sql, mqttQueryInsertCallback, &mqttClient); mqttError("failed to connect to tdengine, reason:%s", tstrerror(terrno));
mqttInfo("free sql:%s", _sql); return;
free(_sql); } else {
} mqttInfo("successfully connected to the tdengine");
} }
} }
free(topic_name);
mqttTrace("receive mqtt message, content:%s", content);
char* sql = mqttConverJsonToSql((char*)content, (int)published->application_message_size);
if (sql != NULL) {
void* res = taos_query(tsMqttConnect, sql);
int code = taos_errno(res);
if (code != 0) {
mqttError("failed to exec sql, reason:%s sql:%s", tstrerror(code), sql);
} else {
mqttTrace("successfully to exec sql:%s", sql);
}
taos_free_result(res);
} else {
mqttError("failed to parse mqtt message");
}
} }
void* mqttClientRefresher(void* client) { void* mqttClientRefresher(void* client) {
while (mttIsRuning) { while (tsMqttIsRuning) {
mqtt_sync((struct mqtt_client*)client); mqtt_sync((struct mqtt_client*)client);
taosMsleep(100); taosMsleep(100);
} }
mqttDebug("quit refresher");
mqttDebug("mqtt quit refresher");
return NULL; return NULL;
} }
void mqttCleanup(int status, int sockfd, pthread_t* client_daemon) { void mqttCleanupRes(int status, int sockfd, pthread_t* client_daemon) {
#if 0
mqttInfo("clean up mqtt module"); mqttInfo("clean up mqtt module");
if (sockfd != -1) close(sockfd); if (sockfd != -1) {
if (client_daemon != NULL) pthread_cancel(*client_daemon); close(sockfd);
#endif
}
void mqttInitConnCb(void* param, TAOS_RES* result, int32_t code) {
if (code < 0) {
mqttError("mqtt:%d, connect to database failed, reason:%s", code, tstrerror(code));
taos_close(mqttConnect);
mqttConnect = NULL;
return;
} }
mqttDebug("mqtt:%d, connect to database success, reason:%s", code, tstrerror(code));
}
void mqttQueryInsertCallback(void* param, TAOS_RES* result, int32_t code) { if (client_daemon != NULL) {
if (code < 0) { pthread_cancel(*client_daemon);
mqttError("mqtt:%d, save data failed, code:%s", code, tstrerror(code));
} else if (code == 0) {
mqttError("mqtt:%d, save data failed, affect rows:%d", code, code);
} else {
mqttInfo("mqtt:%d, save data success, code:%s", code, tstrerror(code));
} }
} }
void mqttReconnectClient(struct mqtt_client* client, void** reconnect_state_vptr) { void mqttReconnectClient(struct mqtt_client* client, void** unused) {
mqttInfo("reconnect client"); mqttInfo("mqtt tries to connect to the mqtt server");
struct reconnect_state_t* reconnect_state = *((struct reconnect_state_t**)reconnect_state_vptr);
/* Close the clients socket if this isn't the initial reconnect call */
if (client->error != MQTT_ERROR_INITIAL_RECONNECT) { if (client->error != MQTT_ERROR_INITIAL_RECONNECT) {
close(client->socketfd); close(client->socketfd);
} }
/* Perform error handling here. */
if (client->error != MQTT_ERROR_INITIAL_RECONNECT) { if (client->error != MQTT_ERROR_INITIAL_RECONNECT) {
mqttError("mqttReconnectClient: called while client was in error state \"%s\"", mqtt_error_str(client->error)); mqttError("mqtt client was in error state %s", mqtt_error_str(client->error));
} }
/* Open a new socket. */ int sockfd = open_nb_socket(tsMqttHostName, tsMqttPort);
int sockfd = open_nb_socket(reconnect_state->hostname, reconnect_state->port); if (sockfd < 0) {
if (sockfd == -1) { mqttError("mqtt client failed to open socket %s:%s", tsMqttHostName, tsMqttPort);
mqttError("failed to open socket: "); //mqttCleanupRes(EXIT_FAILURE, sockfd, NULL);
mqttCleanup(EXIT_FAILURE, sockfd, NULL); return;
} }
/* Reinitialize the client. */ mqtt_reinit(client, sockfd, tsMqttStatus.sendbuf, tsMqttStatus.sendbufsz, tsMqttStatus.recvbuf, tsMqttStatus.recvbufsz);
mqtt_reinit(client, sockfd, reconnect_state->sendbuf, reconnect_state->sendbufsz, reconnect_state->recvbuf, mqtt_connect(client, tsMqttClientId, NULL, NULL, 0, tsMqttUser, tsMqttPass, MQTT_CONNECT_CLEAN_SESSION, 400);
reconnect_state->recvbufsz); mqtt_subscribe(client, tsMqttTopic, 0);
/* Ensure we have a clean session */
uint8_t connect_flags = MQTT_CONNECT_CLEAN_SESSION;
/* Send connection request to the broker. */
mqtt_connect(client, reconnect_state->client_id, NULL, NULL, 0, reconnect_state->user_name, reconnect_state->password,connect_flags, 400);
/* Subscribe to the topic. */
mqtt_subscribe(client, reconnect_state->topic, 0);
} }

View File

@ -8,10 +8,9 @@ INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(query ${SRC}) ADD_LIBRARY(query ${SRC})
SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w) SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w)
TARGET_LINK_LIBRARIES(query tsdb tutil)
IF (TD_LINUX) IF (TD_LINUX)
TARGET_LINK_LIBRARIES(query tsdb tutil m rt) TARGET_LINK_LIBRARIES(query m rt)
ADD_SUBDIRECTORY(tests) ADD_SUBDIRECTORY(tests)
ELSEIF (TD_WINDOWS)
TARGET_LINK_LIBRARIES(query tsdb tutil)
ENDIF () ENDIF ()

View File

@ -217,7 +217,6 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K
%destructor keep {tVariantListDestroy($$);} %destructor keep {tVariantListDestroy($$);}
keep(Y) ::= KEEP tagitemlist(X). { Y = X; } keep(Y) ::= KEEP tagitemlist(X). { Y = X; }
tables(Y) ::= MAXTABLES INTEGER(X). { Y = X; }
cache(Y) ::= CACHE INTEGER(X). { Y = X; } cache(Y) ::= CACHE INTEGER(X). { Y = X; }
replica(Y) ::= REPLICA INTEGER(X). { Y = X; } replica(Y) ::= REPLICA INTEGER(X). { Y = X; }
quorum(Y) ::= QUORUM INTEGER(X). { Y = X; } quorum(Y) ::= QUORUM INTEGER(X). { Y = X; }
@ -234,7 +233,6 @@ prec(Y) ::= PRECISION STRING(X). { Y = X; }
%type db_optr {SCreateDBInfo} %type db_optr {SCreateDBInfo}
db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);} db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);}
db_optr(Y) ::= db_optr(Z) tables(X). { Y = Z; Y.maxTablesPerVnode = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); } db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); } db_optr(Y) ::= db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol(X.z, NULL, 10); } db_optr(Y) ::= db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol(X.z, NULL, 10); }
@ -254,7 +252,6 @@ alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);}
alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) tables(X). { Y = Z; Y.maxTablesPerVnode = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; } alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); }

View File

@ -121,7 +121,6 @@ static SKeyword keywordTable[] = {
{"MINROWS", TK_MINROWS}, {"MINROWS", TK_MINROWS},
{"MAXROWS", TK_MAXROWS}, {"MAXROWS", TK_MAXROWS},
{"BLOCKS", TK_BLOCKS}, {"BLOCKS", TK_BLOCKS},
{"MAXTABLES", TK_MAXTABLES},
{"CACHE", TK_CACHE}, {"CACHE", TK_CACHE},
{"CTIME", TK_CTIME}, {"CTIME", TK_CTIME},
{"WAL", TK_WAL}, {"WAL", TK_WAL},

File diff suppressed because it is too large Load Diff

View File

@ -709,21 +709,21 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
} }
if (terrno != 0) { if (terrno != 0) {
taosFreeId(pRpc->idPool, sid); // sid shall be released taosFreeId(pRpc->idPool, sid); // sid shall be released
pConn = NULL; pConn = NULL;
} }
} }
} }
if (pConn) { if (pConn) {
if (pRecv->connType == RPC_CONN_UDPS && pRpc->numOfThreads > 1) { if (pRecv->connType == RPC_CONN_UDPS && pRpc->numOfThreads > 1) {
// UDP server, assign to new connection // UDP server, assign to new connection
pRpc->index = (pRpc->index+1) % pRpc->numOfThreads; pRpc->index = (pRpc->index + 1) % pRpc->numOfThreads;
pConn->localPort = (pRpc->localPort + pRpc->index); pConn->localPort = (pRpc->localPort + pRpc->index);
} }
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES); taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid); tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid, hashstr);
} }
return pConn; return pConn;

View File

@ -797,9 +797,11 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
} }
taosTmrStopA(&pPeer->timer); taosTmrStopA(&pPeer->timer);
if (tsSyncNum >= tsMaxSyncNum) {
// Ensure the sync of mnode not interrupted
if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) {
sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum);
taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId*10)%200, pPeer, syncTmrCtrl, &pPeer->timer); taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, syncTmrCtrl, &pPeer->timer);
return; return;
} }

View File

@ -3,13 +3,10 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(tsdb ${SRC})
TARGET_LINK_LIBRARIES(tsdb common tutil)
IF (TD_LINUX) IF (TD_LINUX)
ADD_LIBRARY(tsdb ${SRC})
TARGET_LINK_LIBRARIES(tsdb common tutil)
# Someone has no gtest directory, so comment it # Someone has no gtest directory, so comment it
# ADD_SUBDIRECTORY(tests) # ADD_SUBDIRECTORY(tests)
ELSEIF (TD_WINDOWS)
ADD_LIBRARY(tsdb ${SRC})
TARGET_LINK_LIBRARIES(tsdb common tutil)
ENDIF () ENDIF ()

View File

@ -45,6 +45,8 @@ extern int tsdbDebugFlag;
#define TSDB_FILE_DELIMITER 0xF00AFA0F #define TSDB_FILE_DELIMITER 0xF00AFA0F
#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF #define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF
#define TAOS_IN_RANGE(key, keyMin, keyLast) (((key) >= (keyMin)) && ((key) <= (keyMax)))
// NOTE: Any file format change must increase this version number by 1 // NOTE: Any file format change must increase this version number by 1
// Also, implement the convert function // Also, implement the convert function
#define TSDB_FILE_VERSION ((uint32_t)0) #define TSDB_FILE_VERSION ((uint32_t)0)
@ -318,6 +320,16 @@ typedef struct {
void* compBuffer; // Buffer for temperary compress/decompress purpose void* compBuffer; // Buffer for temperary compress/decompress purpose
} SRWHelper; } SRWHelper;
// ------------------ tsdbScan.c
typedef struct {
SFileGroup fGroup;
int numOfIdx;
SCompIdx* pCompIdx;
SCompInfo* pCompInfo;
void* pBuf;
FILE* tLogStream;
} STsdbScanHandle;
// Operations // Operations
// ------------------ tsdbMeta.c // ------------------ tsdbMeta.c
#define TSDB_INIT_NTABLES 1024 #define TSDB_INIT_NTABLES 1024
@ -475,6 +487,7 @@ int tsdbUpdateFileHeader(SFile* pFile);
int tsdbEncodeSFileInfo(void** buf, const STsdbFileInfo* pInfo); int tsdbEncodeSFileInfo(void** buf, const STsdbFileInfo* pInfo);
void* tsdbDecodeSFileInfo(void* buf, STsdbFileInfo* pInfo); void* tsdbDecodeSFileInfo(void* buf, STsdbFileInfo* pInfo);
void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup); void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup);
int tsdbLoadFileHeader(SFile* pFile, uint32_t* version);
void tsdbGetFileInfoImpl(char* fname, uint32_t* magic, int64_t* size); void tsdbGetFileInfoImpl(char* fname, uint32_t* magic, int64_t* size);
void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey); void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey);
@ -513,7 +526,10 @@ int tsdbCommitTableData(SRWHelper* pHelper, SCommitIter* pCommitIter, SDataCols
int tsdbMoveLastBlockIfNeccessary(SRWHelper* pHelper); int tsdbMoveLastBlockIfNeccessary(SRWHelper* pHelper);
int tsdbWriteCompInfo(SRWHelper* pHelper); int tsdbWriteCompInfo(SRWHelper* pHelper);
int tsdbWriteCompIdx(SRWHelper* pHelper); int tsdbWriteCompIdx(SRWHelper* pHelper);
int tsdbLoadCompIdxImpl(SFile* pFile, uint32_t offset, uint32_t len, void* buffer);
int tsdbDecodeSCompIdxImpl(void* buffer, uint32_t len, SCompIdx** ppCompIdx, int* numOfIdx);
int tsdbLoadCompIdx(SRWHelper* pHelper, void* target); int tsdbLoadCompIdx(SRWHelper* pHelper, void* target);
int tsdbLoadCompInfoImpl(SFile* pFile, SCompIdx* pIdx, SCompInfo** ppCompInfo);
int tsdbLoadCompInfo(SRWHelper* pHelper, void* target); int tsdbLoadCompInfo(SRWHelper* pHelper, void* target);
int tsdbLoadCompData(SRWHelper* phelper, SCompBlock* pcompblock, void* target); int tsdbLoadCompData(SRWHelper* phelper, SCompBlock* pcompblock, void* target);
void tsdbGetDataStatis(SRWHelper* pHelper, SDataStatis* pStatis, int numOfCols); void tsdbGetDataStatis(SRWHelper* pHelper, SDataStatis* pStatis, int numOfCols);
@ -537,7 +553,7 @@ static FORCE_INLINE int compTSKEY(const void* key1, const void* key2) {
#define TSDB_SUBMIT_MSG_HEAD_SIZE sizeof(SSubmitMsg) #define TSDB_SUBMIT_MSG_HEAD_SIZE sizeof(SSubmitMsg)
char* tsdbGetMetaFileName(char* rootDir); char* tsdbGetMetaFileName(char* rootDir);
void tsdbGetDataFileName(STsdbRepo* pRepo, int fid, int type, char* fname); void tsdbGetDataFileName(char* rootDir, int vid, int fid, int type, char* fname);
int tsdbLockRepo(STsdbRepo* pRepo); int tsdbLockRepo(STsdbRepo* pRepo);
int tsdbUnlockRepo(STsdbRepo* pRepo); int tsdbUnlockRepo(STsdbRepo* pRepo);
char* tsdbGetDataDirName(char* rootDir); char* tsdbGetDataDirName(char* rootDir);
@ -546,6 +562,16 @@ STsdbMeta* tsdbGetMeta(TSDB_REPO_T* pRepo);
STsdbFileH* tsdbGetFile(TSDB_REPO_T* pRepo); STsdbFileH* tsdbGetFile(TSDB_REPO_T* pRepo);
int tsdbCheckCommit(STsdbRepo* pRepo); int tsdbCheckCommit(STsdbRepo* pRepo);
// ------------------ tsdbScan.c
int tsdbScanFGroup(STsdbScanHandle* pScanHandle, char* rootDir, int fid);
STsdbScanHandle* tsdbNewScanHandle();
void tsdbSetScanLogStream(STsdbScanHandle* pScanHandle, FILE* fLogStream);
int tsdbSetAndOpenScanFile(STsdbScanHandle* pScanHandle, char* rootDir, int fid);
int tsdbScanSCompIdx(STsdbScanHandle* pScanHandle);
int tsdbScanSCompBlock(STsdbScanHandle* pScanHandle, int idx);
int tsdbCloseScanFile(STsdbScanHandle* pScanHandle);
void tsdbFreeScanHandle(STsdbScanHandle* pScanHandle);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -11,20 +11,4 @@
* *
* You should have received a copy of the GNU Affero General Public License * You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef TDENGINE_MQTT_SYSTEM_H
#define TDENGINE_MQTT_SYSTEM_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
int32_t mqttInitSystem();
int32_t mqttStartSystem();
void mqttStopSystem();
void mqttCleanUpSystem();
#ifdef __cplusplus
}
#endif
#endif

View File

@ -128,7 +128,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
if (fid < mfid) { if (fid < mfid) {
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
tsdbGetDataFileName(pRepo, fid, type, fname); tsdbGetDataFileName(pRepo->rootDir, pCfg->tsdbId, fid, type, fname);
(void)remove(fname); (void)remove(fname);
} }
continue; continue;
@ -345,7 +345,7 @@ int tsdbCreateFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) {
memset((void *)pFile, 0, sizeof(SFile)); memset((void *)pFile, 0, sizeof(SFile));
pFile->fd = -1; pFile->fd = -1;
tsdbGetDataFileName(pRepo, fid, type, pFile->fname); tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), fid, type, pFile->fname);
if (access(pFile->fname, F_OK) == 0) { if (access(pFile->fname, F_OK) == 0) {
tsdbError("vgId:%d file %s already exists", REPO_ID(pRepo), pFile->fname); tsdbError("vgId:%d file %s already exists", REPO_ID(pRepo), pFile->fname);
@ -466,33 +466,57 @@ void tsdbRemoveFileGroup(STsdbRepo *pRepo, SFileGroup *pFGroup) {
} }
} }
void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) { int tsdbLoadFileHeader(SFile *pFile, uint32_t *version) {
char buf[TSDB_FILE_HEAD_SIZE] = "\0"; char buf[TSDB_FILE_HEAD_SIZE] = "\0";
uint32_t version = 0;
STsdbFileInfo info = {0};
int fd = open(fname, O_RDONLY); if (lseek(pFile->fd, 0, SEEK_SET) < 0) {
if (fd < 0) goto _err; tsdbError("failed to lseek file %s to start since %s", pFile->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (taosTRead(fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) goto _err; if (taosTRead(pFile->fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
tsdbError("failed to read file %s header part with %d bytes, reason:%s", pFile->fname, TSDB_FILE_HEAD_SIZE,
strerror(errno));
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) goto _err; if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) {
tsdbError("file %s header part is corrupted with failed checksum", pFile->fname);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
void *pBuf = (void *)buf; void *pBuf = (void *)buf;
pBuf = taosDecodeFixedU32(pBuf, &version); pBuf = taosDecodeFixedU32(pBuf, version);
pBuf = tsdbDecodeSFileInfo(pBuf, &info); pBuf = tsdbDecodeSFileInfo(pBuf, &(pFile->info));
off_t offset = lseek(fd, 0, SEEK_END); return 0;
}
void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) {
uint32_t version = 0;
SFile file;
SFile * pFile = &file;
strncpy(pFile->fname, fname, TSDB_FILENAME_LEN);
pFile->fd = -1;
if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err;
if (tsdbLoadFileHeader(pFile, &version) < 0) goto _err;
off_t offset = lseek(pFile->fd, 0, SEEK_END);
if (offset < 0) goto _err; if (offset < 0) goto _err;
close(fd); tsdbCloseFile(pFile);
*magic = info.magic; *magic = pFile->info.magic;
*size = offset; *size = offset;
return; return;
_err: _err:
if (fd >= 0) close(fd); tsdbCloseFile(pFile);
*magic = TSDB_FILE_INIT_MAGIC; *magic = TSDB_FILE_INIT_MAGIC;
*size = 0; *size = 0;
} }
@ -500,34 +524,23 @@ _err:
// ---------------- LOCAL FUNCTIONS ---------------- // ---------------- LOCAL FUNCTIONS ----------------
static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) {
uint32_t version; uint32_t version;
char buf[512] = "\0";
tsdbGetDataFileName(pRepo, fid, type, pFile->fname); tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), fid, type, pFile->fname);
pFile->fd = -1; pFile->fd = -1;
if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err; if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err;
if (taosTRead(pFile->fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { if (tsdbLoadFileHeader(pFile, &version) < 0) {
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pRepo), TSDB_FILE_HEAD_SIZE, tsdbError("vgId:%d failed to load file %s header part since %s", REPO_ID(pRepo), pFile->fname, tstrerror(terrno));
pFile->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err; goto _err;
} }
if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) {
tsdbError("vgId:%d file %s head part is corrupted", REPO_ID(pRepo), pFile->fname);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
goto _err;
}
void *pBuf = buf;
pBuf = taosDecodeFixedU32(pBuf, &version);
pBuf = tsdbDecodeSFileInfo(pBuf, &(pFile->info));
if (pFile->info.size == TSDB_FILE_HEAD_SIZE) { if (pFile->info.size == TSDB_FILE_HEAD_SIZE) {
pFile->info.size = lseek(pFile->fd, 0, SEEK_END); pFile->info.size = lseek(pFile->fd, 0, SEEK_END);
} }
if (version != TSDB_FILE_VERSION) { if (version != TSDB_FILE_VERSION) {
// TODO: deal with error
tsdbError("vgId:%d file %s version %u is not the same as program version %u which may cause problem", tsdbError("vgId:%d file %s version %u is not the same as program version %u which may cause problem",
REPO_ID(pRepo), pFile->fname, version, TSDB_FILE_VERSION); REPO_ID(pRepo), pFile->fname, version, TSDB_FILE_VERSION);
} }
@ -571,6 +584,7 @@ static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) {
memset(&pFGroup->files[type].info, 0, sizeof(STsdbFileInfo)); memset(&pFGroup->files[type].info, 0, sizeof(STsdbFileInfo));
pFGroup->files[type].info.magic = TSDB_FILE_INIT_MAGIC; pFGroup->files[type].info.magic = TSDB_FILE_INIT_MAGIC;
pFGroup->state = 1; pFGroup->state = 1;
pRepo->state = TSDB_STATE_BAD_FILE;
terrno = TSDB_CODE_TDB_FILE_CORRUPTED; terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
} }
} }
@ -581,5 +595,5 @@ static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep) {
} }
static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) { static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) {
return (int32_t)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision)); return (int)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision));
} }

View File

@ -142,7 +142,6 @@ TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH) {
} }
tsdbStartStream(pRepo); tsdbStartStream(pRepo);
// pRepo->state = TSDB_REPO_STATE_ACTIVE;
tsdbDebug("vgId:%d open tsdb repository succeed!", REPO_ID(pRepo)); tsdbDebug("vgId:%d open tsdb repository succeed!", REPO_ID(pRepo));
@ -341,6 +340,10 @@ void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int
*compStorage = pRepo->stat.compStorage; *compStorage = pRepo->stat.compStorage;
} }
int tsdbGetState(TSDB_REPO_T *repo) {
return ((STsdbRepo *)repo)->state;
}
// ----------------- INTERNAL FUNCTIONS ----------------- // ----------------- INTERNAL FUNCTIONS -----------------
char *tsdbGetMetaFileName(char *rootDir) { char *tsdbGetMetaFileName(char *rootDir) {
int tlen = (int)(strlen(rootDir) + strlen(TSDB_META_FILE_NAME) + 2); int tlen = (int)(strlen(rootDir) + strlen(TSDB_META_FILE_NAME) + 2);
@ -354,8 +357,8 @@ char *tsdbGetMetaFileName(char *rootDir) {
return fname; return fname;
} }
void tsdbGetDataFileName(STsdbRepo *pRepo, int fid, int type, char *fname) { void tsdbGetDataFileName(char *rootDir, int vid, int fid, int type, char *fname) {
snprintf(fname, TSDB_FILENAME_LEN, "%s/%s/v%df%d%s", pRepo->rootDir, TSDB_DATA_DIR_NAME, REPO_ID(pRepo), fid, tsdbFileSuffix[type]); snprintf(fname, TSDB_FILENAME_LEN, "%s/%s/v%df%d%s", rootDir, TSDB_DATA_DIR_NAME, vid, fid, tsdbFileSuffix[type]);
} }
int tsdbLockRepo(STsdbRepo *pRepo) { int tsdbLockRepo(STsdbRepo *pRepo) {
@ -661,6 +664,8 @@ static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) {
goto _err; goto _err;
} }
pRepo->state = TSDB_STATE_OK;
int code = pthread_mutex_init(&pRepo->mutex, NULL); int code = pthread_mutex_init(&pRepo->mutex, NULL);
if (code != 0) { if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(code); terrno = TAOS_SYSTEM_ERROR(code);

View File

@ -102,7 +102,8 @@ void tsdbResetHelper(SRWHelper *pHelper) {
int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
ASSERT(pHelper != NULL && pGroup != NULL); ASSERT(pHelper != NULL && pGroup != NULL);
SFile *pFile = NULL; SFile * pFile = NULL;
STsdbRepo *pRepo = pHelper->pRepo;
// Clear the helper object // Clear the helper object
tsdbResetHelper(pHelper); tsdbResetHelper(pHelper);
@ -112,8 +113,10 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
// Set the files // Set the files
pHelper->files.fGroup = *pGroup; pHelper->files.fGroup = *pGroup;
if (helperType(pHelper) == TSDB_WRITE_HELPER) { if (helperType(pHelper) == TSDB_WRITE_HELPER) {
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, helperNewHeadF(pHelper)->fname); tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), pGroup->fileId, TSDB_FILE_TYPE_NHEAD,
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, helperNewLastF(pHelper)->fname); helperNewHeadF(pHelper)->fname);
tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), pGroup->fileId, TSDB_FILE_TYPE_NLAST,
helperNewLastF(pHelper)->fname);
} }
// Open the files // Open the files
@ -443,10 +446,64 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) {
return 0; return 0;
} }
int tsdbLoadCompIdxImpl(SFile *pFile, uint32_t offset, uint32_t len, void *buffer) {
const char *prefixMsg = "failed to load SCompIdx part";
if (lseek(pFile->fd, offset, SEEK_SET) < 0) {
tsdbError("%s: seek to file %s offset %u failed since %s", prefixMsg, pFile->fname, offset, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (taosTRead(pFile->fd, buffer, len) < len) {
tsdbError("%s: read file %s offset %u len %u failed since %s", prefixMsg, pFile->fname, offset, len,
strerror(errno));
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)buffer, len)) {
tsdbError("%s: file %s corrupted, offset %u len %u", prefixMsg, pFile->fname, offset, len);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
return 0;
}
int tsdbDecodeSCompIdxImpl(void *buffer, uint32_t len, SCompIdx **ppCompIdx, int *numOfIdx) {
int nIdx = 0;
void *pPtr = buffer;
while (POINTER_DISTANCE(pPtr, buffer) < (int)(len - sizeof(TSCKSUM))) {
size_t tlen = taosTSizeof(*ppCompIdx);
if (tlen < sizeof(SCompIdx) * (nIdx + 1)) {
*ppCompIdx = (SCompIdx *)taosTRealloc(*ppCompIdx, (tlen == 0) ? 1024 : tlen * 2);
if (*ppCompIdx == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
}
pPtr = tsdbDecodeSCompIdx(pPtr, &((*ppCompIdx)[nIdx]));
if (pPtr == NULL) {
tsdbError("failed to decode SCompIdx part, idx:%d", nIdx);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
nIdx++;
ASSERT(nIdx == 1 || (*ppCompIdx)[nIdx - 1].tid > (*ppCompIdx)[nIdx - 2].tid);
ASSERT(POINTER_DISTANCE(pPtr, buffer) <= (int)(len - sizeof(TSCKSUM)));
}
*numOfIdx = nIdx;
return 0;
}
int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) { int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN); ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN);
SFile *pFile = helperHeadF(pHelper); SFile *pFile = helperHeadF(pHelper);
int fd = pFile->fd;
if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) { if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) {
// If not load from file, just load it in object // If not load from file, just load it in object
@ -456,54 +513,18 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
return -1; return -1;
} }
if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) { // Load SCompIdx binary from file
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno)); if (tsdbLoadCompIdxImpl(pFile, pFile->info.offset, pFile->info.len, (void *)(pHelper->pBuffer)) < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
} }
if (taosTRead(fd, (void *)(pHelper->pBuffer), pFile->info.len) < (int)pFile->info.len) { // Decode the SCompIdx part
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len, if (tsdbDecodeSCompIdxImpl(pHelper->pBuffer, pFile->info.len, &(pHelper->idxH.pIdxArray),
pFile->fname, strerror(errno)); &(pHelper->idxH.numOfIdx)) < 0) {
terrno = TAOS_SYSTEM_ERROR(errno); tsdbError("vgId:%d failed to decode SCompIdx part from file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname,
tstrerror(errno));
return -1; return -1;
} }
if (!taosCheckChecksumWhole((uint8_t *)(pHelper->pBuffer), pFile->info.len)) {
tsdbError("vgId:%d file %s SCompIdx part is corrupted. len %u", REPO_ID(pHelper->pRepo), pFile->fname,
pFile->info.len);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
// Decode it
pHelper->idxH.numOfIdx = 0;
void *ptr = pHelper->pBuffer;
while (POINTER_DISTANCE(ptr, pHelper->pBuffer) < (int)(pFile->info.len - sizeof(TSCKSUM))) {
size_t tlen = taosTSizeof(pHelper->idxH.pIdxArray);
pHelper->idxH.numOfIdx++;
if (tlen < pHelper->idxH.numOfIdx * sizeof(SCompIdx)) {
pHelper->idxH.pIdxArray = (SCompIdx *)taosTRealloc(pHelper->idxH.pIdxArray, (tlen == 0) ? 1024 : tlen * 2);
if (pHelper->idxH.pIdxArray == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
}
ptr = tsdbDecodeSCompIdx(ptr, &(pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 1]));
if (ptr == NULL) {
tsdbError("vgId:%d file %s SCompIdx part is corrupted. len %u", REPO_ID(pHelper->pRepo), pFile->fname,
pFile->info.len);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
ASSERT(pHelper->idxH.numOfIdx == 1 || pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 1].tid >
pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 2].tid);
ASSERT(POINTER_DISTANCE(ptr, pHelper->pBuffer) <= (int)(pFile->info.len - sizeof(TSCKSUM)));
}
} }
} }
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD); helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
@ -515,36 +536,49 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
return 0; return 0;
} }
int tsdbLoadCompInfoImpl(SFile *pFile, SCompIdx *pIdx, SCompInfo **ppCompInfo) {
const char *prefixMsg = "failed to load SCompInfo/SCompBlock part";
if (lseek(pFile->fd, pIdx->offset, SEEK_SET) < 0) {
tsdbError("%s: seek to file %s offset %u failed since %s", prefixMsg, pFile->fname, pIdx->offset, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
*ppCompInfo = taosTRealloc((void *)(*ppCompInfo), pIdx->len);
if (*ppCompInfo == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
if (taosTRead(pFile->fd, (void *)(*ppCompInfo), pIdx->len) < (int)pIdx->len) {
tsdbError("%s: read file %s offset %u len %u failed since %s", prefixMsg, pFile->fname, pIdx->offset, pIdx->len,
strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)(*ppCompInfo), pIdx->len)) {
tsdbError("%s: file %s corrupted, offset %u len %u", prefixMsg, pFile->fname, pIdx->offset, pIdx->len);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
return 0;
}
int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) { int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
ASSERT(helperHasState(pHelper, TSDB_HELPER_TABLE_SET)); ASSERT(helperHasState(pHelper, TSDB_HELPER_TABLE_SET));
SCompIdx *pIdx = &(pHelper->curCompIdx); SCompIdx *pIdx = &(pHelper->curCompIdx);
int fd = helperHeadF(pHelper)->fd; SFile *pFile = helperHeadF(pHelper);
if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) { if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
if (pIdx->offset > 0) { if (pIdx->offset > 0) {
ASSERT(pIdx->uid == pHelper->tableInfo.uid); ASSERT(pIdx->uid == pHelper->tableInfo.uid);
if (lseek(fd, pIdx->offset, SEEK_SET) < 0) {
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperHeadF(pHelper)->fname,
strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
pHelper->pCompInfo = taosTRealloc((void *)pHelper->pCompInfo, pIdx->len); if (tsdbLoadCompInfoImpl(pFile, pIdx, &(pHelper->pCompInfo)) < 0) return -1;
if (taosTRead(fd, (void *)(pHelper->pCompInfo), pIdx->len) < (int)pIdx->len) {
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
helperHeadF(pHelper)->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)pHelper->pCompInfo, pIdx->len)) {
tsdbError("vgId:%d file %s SCompInfo part is corrupted, tid %d uid %" PRIu64, REPO_ID(pHelper->pRepo),
helperHeadF(pHelper)->fname, pHelper->tableInfo.tid, pHelper->tableInfo.uid);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
ASSERT(pIdx->uid == pHelper->pCompInfo->uid && pIdx->tid == pHelper->pCompInfo->tid); ASSERT(pIdx->uid == pHelper->pCompInfo->uid && pIdx->tid == pHelper->pCompInfo->tid);
} }

View File

@ -734,6 +734,11 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end);
static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols);
static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle);
static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos);
static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){
SQueryFilePos* cur = &pQueryHandle->cur; SQueryFilePos* cur = &pQueryHandle->cur;
SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
@ -742,11 +747,11 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order); SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order);
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL; TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
tsdbDebug("%p key in mem:%"PRId64", %p", pQueryHandle, key, pQueryHandle->qinfo); tsdbDebug("%p key in mem:%"PRId64", %p", pQueryHandle, key, pQueryHandle->qinfo);
cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(binfo.rows-1);
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
@ -785,14 +790,32 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer * Here the buffer is not enough, so only part of file block can be loaded into memory buffer
*/ */
assert(pQueryHandle->outputCapacity >= binfo.rows); assert(pQueryHandle->outputCapacity >= binfo.rows);
pQueryHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows; if ((cur->pos == 0 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
cur->win = binfo.window; (cur->pos == (binfo.rows - 1) && (!ASCENDING_TRAVERSE(pQueryHandle->order)))) {
cur->mixBlock = false; pQueryHandle->realNumOfRows = binfo.rows;
cur->blockCompleted = true;
cur->lastKey = binfo.window.ekey + (ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1); cur->rows = binfo.rows;
pCheckInfo->lastKey = cur->lastKey; cur->win = binfo.window;
cur->mixBlock = false;
cur->blockCompleted = true;
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
cur->lastKey = binfo.window.ekey + 1;
cur->pos = binfo.rows;
} else {
cur->lastKey = binfo.window.skey - 1;
cur->pos = -1;
}
} else { // partially copy to dest buffer
int32_t endPos = ASCENDING_TRAVERSE(pQueryHandle->order)? (binfo.rows - 1): 0;
copyAllRemainRowsFromFileBlock(pQueryHandle, pCheckInfo, &binfo, endPos);
cur->mixBlock = true;
}
assert(cur->blockCompleted);
tsdbDebug("create data block from remain file block, brange:%"PRId64"-%"PRId64", rows:%d, lastKey:%"PRId64", %p",
cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pQueryHandle);
} }
return code; return code;
@ -823,6 +846,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBl
assert(pCheckInfo->lastKey <= pBlock->keyLast); assert(pCheckInfo->lastKey <= pBlock->keyLast);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else { // the whole block is loaded in to buffer } else { // the whole block is loaded in to buffer
cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(pBlock->numOfRows - 1);
code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
} }
} else { //desc order, query ended in current block } else { //desc order, query ended in current block
@ -842,6 +866,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBl
assert(pCheckInfo->lastKey >= pBlock->keyFirst); assert(pCheckInfo->lastKey >= pBlock->keyFirst);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else { } else {
cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(pBlock->numOfRows-1);
code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
} }
} }
@ -912,7 +937,7 @@ static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
return midPos; return midPos;
} }
static int32_t copyDataFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) { int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) {
char* pData = NULL; char* pData = NULL;
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1;
@ -1137,6 +1162,47 @@ static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle) {
} }
} }
static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos) {
SQueryFilePos* cur = &pQueryHandle->cur;
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
TSKEY* tsArray = pCols->cols[0].pData;
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
int32_t pos = cur->pos;
int32_t start = cur->pos;
int32_t end = endPos;
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
assert(start >= end);
SWAP(start, end, int32_t);
}
assert(pQueryHandle->outputCapacity >= (end - start + 1));
int32_t numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, 0, start, end);
// the time window should always be ascending order: skey <= ekey
cur->win = (STimeWindow) {.skey = tsArray[start], .ekey = tsArray[end]};
cur->mixBlock = (numOfRows != pBlockInfo->rows);
cur->lastKey = tsArray[endPos] + step;
cur->blockCompleted = true;
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
moveDataToFront(pQueryHandle, numOfRows, numOfCols);
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
pos = endPos + step;
updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos);
doCheckGeneratedBlockRange(pQueryHandle);
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %p",
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey,
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
}
// only return the qualified data to client in terms of query time window, data rows in the same block but do not // only return the qualified data to client in terms of query time window, data rows in the same block but do not
// be included in the query time window will be discarded // be included in the query time window will be discarded
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) { static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) {
@ -1179,37 +1245,13 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
// compared with the data from in-memory buffer, to generate the correct timestamp array list // compared with the data from in-memory buffer, to generate the correct timestamp array list
int32_t numOfRows = 0; int32_t numOfRows = 0;
int32_t pos = cur->pos; int32_t pos = cur->pos;
cur->win = TSWINDOW_INITIALIZER; cur->win = TSWINDOW_INITIALIZER;
// no data in buffer, load data from file directly // no data in buffer, load data from file directly
if (pCheckInfo->iiter == NULL && pCheckInfo->iter == NULL) { if (pCheckInfo->iiter == NULL && pCheckInfo->iter == NULL) {
int32_t start = cur->pos; copyAllRemainRowsFromFileBlock(pQueryHandle, pCheckInfo, &blockInfo, endPos);
int32_t end = endPos;
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
SWAP(start, end, int32_t);
}
numOfRows = copyDataFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
// the time window should always be right order: skey <= ekey
cur->win = (STimeWindow) {.skey = tsArray[start], .ekey = tsArray[end]};
cur->lastKey = tsArray[endPos];
pos += (end - start + 1) * step;
cur->blockCompleted =
(((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
moveDataToFront(pQueryHandle, numOfRows, numOfCols);
updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos);
doCheckGeneratedBlockRange(pQueryHandle);
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %p",
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey,
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
return; return;
} else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
SSkipListNode* node = NULL; SSkipListNode* node = NULL;
@ -1261,7 +1303,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t qstart = 0, qend = 0; int32_t qstart = 0, qend = 0;
getQualifiedRowsPos(pQueryHandle, pos, end, numOfRows, &qstart, &qend); getQualifiedRowsPos(pQueryHandle, pos, end, numOfRows, &qstart, &qend);
numOfRows = copyDataFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, qstart, qend); numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, qstart, qend);
pos += (qend - qstart + 1) * step; pos += (qend - qstart + 1) * step;
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[qend]:tsArray[qstart]; cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[qend]:tsArray[qstart];
@ -1285,7 +1327,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t start = -1, end = -1; int32_t start = -1, end = -1;
getQualifiedRowsPos(pQueryHandle, pos, endPos, numOfRows, &start, &end); getQualifiedRowsPos(pQueryHandle, pos, endPos, numOfRows, &start, &end);
numOfRows = copyDataFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end); numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
pos += (end - start + 1) * step; pos += (end - start + 1) * step;
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[end]:tsArray[start]; cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[end]:tsArray[start];
@ -1658,7 +1700,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists
return loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo, exists); return loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo, exists);
} }
} else { } else {
tsdbDebug("%p continue in current data block, index:%d, %p", pQueryHandle, cur->slot, pQueryHandle->qinfo); tsdbDebug("%p continue in current data block, index:%d, pos:%d, %p", pQueryHandle, cur->slot, cur->pos, pQueryHandle->qinfo);
int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo);
*exists = pQueryHandle->realNumOfRows > 0; *exists = pQueryHandle->realNumOfRows > 0;
@ -1689,12 +1731,13 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
assert(pQueryHandle->window.skey == pQueryHandle->window.ekey); assert(pQueryHandle->window.skey == pQueryHandle->window.ekey);
// starts from the buffer in case of descending timestamp order check data blocks // starts from the buffer in case of descending timestamp order check data blocks
// todo consider the query time window, current last_row does not apply the query time window
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
int32_t i = 0; int32_t i = 0;
while(i < numOfTables) { while(i < numOfTables) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
// the first qualified table for interpolation query
if (pQueryHandle->window.skey <= pCheckInfo->pTableObj->lastKey && if (pQueryHandle->window.skey <= pCheckInfo->pTableObj->lastKey &&
pCheckInfo->pTableObj->lastKey != TSKEY_INITIAL_VAL) { pCheckInfo->pTableObj->lastKey != TSKEY_INITIAL_VAL) {
break; break;
@ -2156,7 +2199,7 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
} }
// todo refactor // todo refactor
int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfRows - 1); int32_t numOfRows = doCopyRowsFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfRows - 1);
// if the buffer is not full in case of descending order query, move the data in the front of the buffer // if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (!ASCENDING_TRAVERSE(pHandle->order) && numOfRows < pHandle->outputCapacity) { if (!ASCENDING_TRAVERSE(pHandle->order) && numOfRows < pHandle->outputCapacity) {

View File

@ -11,21 +11,4 @@
* *
* You should have received a copy of the GNU Affero General Public License * You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#define _DEFAULT_SOURCE
#include "os.h"
#include "tulog.h"
void taosRemoveDir(char *rootDir) {
uError("%s not implemented yet", __FUNCTION__);
}
int taosMkDir(const char *path, mode_t mode) {
uError("%s not implemented yet", __FUNCTION__);
return 0;
}
void taosMvDir(char* destDir, char *srcDir) {
uError("%s not implemented yet", __FUNCTION__);
}

36
src/tsdb/src/tsdbScan.c Normal file
View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdbMain.h"
#ifndef _TSDB_PLUGINS
int tsdbScanFGroup(STsdbScanHandle* pScanHandle, char* rootDir, int fid) { return 0; }
STsdbScanHandle* tsdbNewScanHandle() { return NULL; }
void tsdbSetScanLogStream(STsdbScanHandle* pScanHandle, FILE* fLogStream) {}
int tsdbSetAndOpenScanFile(STsdbScanHandle* pScanHandle, char* rootDir, int fid) { return 0; }
int tsdbScanSCompIdx(STsdbScanHandle* pScanHandle) { return 0; }
int tsdbScanSCompBlock(STsdbScanHandle* pScanHandle, int idx) { return 0; }
int tsdbCloseScanFile(STsdbScanHandle* pScanHandle) { return 0; }
void tsdbFreeScanHandle(STsdbScanHandle* pScanHandle) {}
#endif

View File

@ -3,9 +3,10 @@ PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(tutil ${SRC}) ADD_LIBRARY(tutil ${SRC})
TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4)
IF (TD_LINUX) IF (TD_LINUX)
TARGET_LINK_LIBRARIES(tutil pthread osdetail m rt lz4) TARGET_LINK_LIBRARIES(tutil m rt)
ADD_SUBDIRECTORY(tests) ADD_SUBDIRECTORY(tests)
FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/)
@ -24,7 +25,7 @@ IF (TD_LINUX)
ENDIF () ENDIF ()
ELSEIF (TD_WINDOWS) ELSEIF (TD_WINDOWS)
TARGET_LINK_LIBRARIES(tutil iconv regex pthread osdetail winmm IPHLPAPI ws2_32 lz4 wepoll) TARGET_LINK_LIBRARIES(tutil iconv regex winmm IPHLPAPI ws2_32 wepoll)
ELSEIF(TD_DARWIN) ELSEIF(TD_DARWIN)
TARGET_LINK_LIBRARIES(tutil iconv pthread osdetail lz4) TARGET_LINK_LIBRARIES(tutil iconv)
ENDIF() ENDIF()

View File

@ -110,7 +110,17 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da
*/ */
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen);
void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void(*fp)(void*)); /**
* apply the udf before return the result
* @param pHashObj
* @param key
* @param keyLen
* @param fp
* @param d
* @param dsize
* @return
*/
void* taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize);
/** /**
* remove item with the specified key * remove item with the specified key

View File

@ -42,7 +42,7 @@ typedef struct SCacheDataNode {
uint64_t signature; uint64_t signature;
struct STrashElem *pTNodeHeader; // point to trash node head struct STrashElem *pTNodeHeader; // point to trash node head
uint16_t keySize: 15; // max key size: 32kb uint16_t keySize: 15; // max key size: 32kb
bool inTrashCan: 1;// denote if it is in trash or not bool inTrashcan: 1;// denote if it is in trash or not
uint32_t size; // allocated size for current SCacheDataNode uint32_t size; // allocated size for current SCacheDataNode
T_REF_DECLARE() T_REF_DECLARE()
char *key; char *key;

View File

@ -255,10 +255,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da
} }
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
return taosHashGetCB(pHashObj, key, keyLen, NULL); return taosHashGetCB(pHashObj, key, keyLen, NULL, NULL, 0);
} }
void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *)) { void* taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) {
if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) { if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) {
return NULL; return NULL;
} }
@ -273,7 +273,6 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f
// no data, return directly // no data, return directly
if (atomic_load_32(&pe->num) == 0) { if (atomic_load_32(&pe->num) == 0) {
__rd_unlock(&pHashObj->lock, pHashObj->type); __rd_unlock(&pHashObj->lock, pHashObj->type);
return NULL; return NULL;
} }
@ -297,7 +296,11 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f
fp(pNode->data); fp(pNode->data);
} }
data = pNode->data; if (d != NULL) {
memcpy(d, pNode->data, dsize);
} else {
data = pNode->data;
}
} }
if (pHashObj->type == HASH_ENTRY_LOCK) { if (pHashObj->type == HASH_ENTRY_LOCK) {

View File

@ -90,7 +90,6 @@ static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force);
static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNode *pNode) { static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
if (pNode->signature != (uint64_t)pNode) { if (pNode->signature != (uint64_t)pNode) {
uError("key:%s, %p data is invalid, or has been released", pNode->key, pNode); uError("key:%s, %p data is invalid, or has been released", pNode->key, pNode);
assert(0);
return; return;
} }
@ -110,7 +109,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
static FORCE_INLINE void doRemoveElemInTrashcan(SCacheObj* pCacheObj, STrashElem *pElem) { static FORCE_INLINE void doRemoveElemInTrashcan(SCacheObj* pCacheObj, STrashElem *pElem) {
if (pElem->pData->signature != (uint64_t) pElem->pData) { if (pElem->pData->signature != (uint64_t) pElem->pData) {
uError("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); uWarn("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData);
return; return;
} }
@ -224,7 +223,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
taosTFree(p); taosTFree(p);
} else { } else {
taosAddToTrash(pCacheObj, p); taosAddToTrash(pCacheObj, p);
uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p); uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p->data);
} }
} }
@ -265,17 +264,14 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
return NULL; return NULL;
} }
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGetCB(pCacheObj->pHashTable, key, keyLen, incRefFn); SCacheDataNode* ptNode = NULL;
if (ptNode != NULL) { taosHashGetCB(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode, sizeof(void*));
assert ((*ptNode) != NULL && (int64_t) ((*ptNode)->data) != 0x40);
}
void* pData = (ptNode != NULL)? (*ptNode)->data:NULL; void* pData = (ptNode != NULL)? ptNode->data:NULL;
assert((int64_t)pData != 0x40);
if (pData != NULL) { if (pData != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, T_REF_VAL_GET(*ptNode)); uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, T_REF_VAL_GET(ptNode));
} else { } else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
@ -292,7 +288,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
SCacheDataNode *ptNode = (SCacheDataNode *)((char *)data - offset); SCacheDataNode *ptNode = (SCacheDataNode *)((char *)data - offset);
if (ptNode->signature != (uint64_t)ptNode) { if (ptNode->signature != (uint64_t)ptNode) {
uError("key: %p the data from cache is invalid", ptNode); uError("cache:%s, key: %p the data from cache is invalid", pCacheObj->name, ptNode);
return NULL; return NULL;
} }
@ -311,7 +307,7 @@ void *taosCacheTransfer(SCacheObj *pCacheObj, void **data) {
SCacheDataNode *ptNode = (SCacheDataNode *)((char *)(*data) - offset); SCacheDataNode *ptNode = (SCacheDataNode *)((char *)(*data) - offset);
if (ptNode->signature != (uint64_t)ptNode) { if (ptNode->signature != (uint64_t)ptNode) {
uError("key: %p the data from cache is invalid", ptNode); uError("cache:%s, key: %p the data from cache is invalid", pCacheObj->name, ptNode);
return NULL; return NULL;
} }
@ -334,16 +330,16 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset); SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset);
if (pNode->signature != (uint64_t)pNode) { if (pNode->signature != (uint64_t)pNode) {
uError("%p, release invalid cache data", pNode); uError("cache:%s, %p, release invalid cache data", pCacheObj->name, pNode);
return; return;
} }
*data = NULL; *data = NULL;
// note: extend lifespan before dec ref count // note: extend lifespan before dec ref count
bool inTrashCan = pNode->inTrashCan; bool inTrashcan = pNode->inTrashcan;
if (pCacheObj->extendLifespan && (!inTrashCan) && (!_remove)) { if (pCacheObj->extendLifespan && (!inTrashcan) && (!_remove)) {
atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs());
uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime); uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime);
} }
@ -354,7 +350,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
char* d = pNode->data; char* d = pNode->data;
int32_t ref = T_REF_VAL_GET(pNode); int32_t ref = T_REF_VAL_GET(pNode);
uDebug("cache:%s, key:%p, %p is released, refcnt:%d, intrash:%d", pCacheObj->name, key, d, ref - 1, inTrashCan); uDebug("cache:%s, key:%p, %p is released, refcnt:%d, in trashcan:%d", pCacheObj->name, key, d, ref - 1, inTrashcan);
/* /*
* If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users * If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users
@ -363,17 +359,25 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
* NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread
* that tries to do the same thing. * that tries to do the same thing.
*/ */
if (inTrashCan) { if (inTrashcan) {
ref = T_REF_DEC(pNode); ref = T_REF_VAL_GET(pNode);
if (ref == 0) { if (ref == 1) {
// If it is the last ref, remove it from trashcan linked-list first, and then destroy it.Otherwise, it may be
// destroyed by refresh worker if decrease ref count before removing it from linked-list.
assert(pNode->pTNodeHeader->pData == pNode); assert(pNode->pTNodeHeader->pData == pNode);
__cache_wr_lock(pCacheObj); __cache_wr_lock(pCacheObj);
doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader); doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader);
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
ref = T_REF_DEC(pNode);
assert(ref == 0);
doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader); doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader);
} else {
ref = T_REF_DEC(pNode);
assert(ref >= 0);
} }
} else { } else {
// NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread // NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread
@ -413,7 +417,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
} }
} }
} else { } else {
uDebug("cache:%s, key:%p, %p has been removed from hash table by other thread already, refcnt:%d", uDebug("cache:%s, key:%p, %p has been removed from hash table by others already, refcnt:%d",
pCacheObj->name, pNode->key, pNode->data, ref); pCacheObj->name, pNode->key, pNode->data, ref);
} }
} }
@ -424,7 +428,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
char* p = pNode->data; char* p = pNode->data;
int32_t ref = T_REF_DEC(pNode); int32_t ref = T_REF_DEC(pNode);
uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trancan:%d", pCacheObj->name, key, p, ref, inTrashCan); uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trashcan:%d", pCacheObj->name, key, p, ref, inTrashcan);
} }
} }
@ -495,7 +499,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *
} }
void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
if (pNode->inTrashCan) { /* node is already in trash */ if (pNode->inTrashcan) { /* node is already in trash */
assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode); assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
return; return;
} }
@ -503,7 +507,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
STrashElem *pElem = calloc(1, sizeof(STrashElem)); STrashElem *pElem = calloc(1, sizeof(STrashElem));
pElem->pData = pNode; pElem->pData = pNode;
pElem->prev = NULL; pElem->prev = NULL;
pNode->inTrashCan = true; pNode->inTrashcan = true;
pNode->pTNodeHeader = pElem; pNode->pTNodeHeader = pElem;
__cache_wr_lock(pCacheObj); __cache_wr_lock(pCacheObj);
@ -525,7 +529,7 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
if (pCacheObj->numOfElemsInTrash == 0) { if (pCacheObj->numOfElemsInTrash == 0) {
if (pCacheObj->pTrash != NULL) { if (pCacheObj->pTrash != NULL) {
uError("key:inconsistency data in cache, numOfElem in trash:%d", pCacheObj->numOfElemsInTrash); uError("cache:%s, key:inconsistency data in cache, numOfElem in trashcan:%d", pCacheObj->name, pCacheObj->numOfElemsInTrash);
} }
pCacheObj->pTrash = NULL; pCacheObj->pTrash = NULL;
@ -542,7 +546,7 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
} }
if (force || (T_REF_VAL_GET(pElem->pData) == 0)) { if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData->data, uDebug("cache:%s, key:%p, %p removed from trashcan. numOfElem in trashcan:%d", pCacheObj->name, pElem->pData->key, pElem->pData->data,
pCacheObj->numOfElemsInTrash - 1); pCacheObj->numOfElemsInTrash - 1);
STrashElem *p = pElem; STrashElem *p = pElem;

View File

@ -270,7 +270,7 @@ void taosReadGlobalLogCfg() {
} }
wordfree(&full_path); wordfree(&full_path);
taosReadLogOption("tsLogDir", tsLogDir); taosReadLogOption("logDir", tsLogDir);
sprintf(fileName, "%s/taos.cfg", configDir); sprintf(fileName, "%s/taos.cfg", configDir);
fp = fopen(fileName, "r"); fp = fopen(fileName, "r");
@ -288,9 +288,9 @@ void taosReadGlobalLogCfg() {
option = value = NULL; option = value = NULL;
olen = vlen = 0; olen = vlen = 0;
taosGetline(&line, &len, fp); taosGetline(&line, &len, fp);
line[len - 1] = 0; line[len - 1] = 0;
paGetToken(line, &option, &olen); paGetToken(line, &option, &olen);
if (olen == 0) continue; if (olen == 0) continue;
option[olen] = 0; option[olen] = 0;

View File

@ -62,6 +62,7 @@ typedef struct {
pthread_mutex_t logMutex; pthread_mutex_t logMutex;
} SLogObj; } SLogObj;
int32_t tsLogKeepDays = 0;
int32_t tsAsyncLog = 1; int32_t tsAsyncLog = 1;
float tsTotalLogDirGB = 0; float tsTotalLogDirGB = 0;
float tsAvailLogDirGB = 0; float tsAvailLogDirGB = 0;
@ -78,6 +79,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen
static SLogBuff *taosLogBuffNew(int32_t bufSize); static SLogBuff *taosLogBuffNew(int32_t bufSize);
static void taosCloseLogByFd(int32_t oldFd); static void taosCloseLogByFd(int32_t oldFd);
static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum); static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum);
extern void taosPrintGlobalCfg();
static int32_t taosStartLog() { static int32_t taosStartLog() {
pthread_attr_t threadAttr; pthread_attr_t threadAttr;
@ -136,11 +138,24 @@ static void taosUnLockFile(int32_t fd) {
} }
} }
static void taosKeepOldLog(char *oldName) {
if (tsLogKeepDays <= 0) return;
int64_t fileSec = taosGetTimestampSec();
char fileName[LOG_FILE_NAME_LEN + 20];
snprintf(fileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64, tsLogObj.logName, fileSec);
taosRename(oldName, fileName);
taosRemoveOldLogFiles(tsLogDir, tsLogKeepDays);
}
static void *taosThreadToOpenNewFile(void *param) { static void *taosThreadToOpenNewFile(void *param) {
char name[LOG_FILE_NAME_LEN + 20]; char keepName[LOG_FILE_NAME_LEN + 20];
sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag);
tsLogObj.flag ^= 1; tsLogObj.flag ^= 1;
tsLogObj.lines = 0; tsLogObj.lines = 0;
char name[LOG_FILE_NAME_LEN + 20];
sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag); sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag);
umask(0); umask(0);
@ -150,6 +165,7 @@ static void *taosThreadToOpenNewFile(void *param) {
uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno)); uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno));
return NULL; return NULL;
} }
taosLockFile(fd); taosLockFile(fd);
(void)lseek(fd, 0, SEEK_SET); (void)lseek(fd, 0, SEEK_SET);
@ -157,9 +173,13 @@ static void *taosThreadToOpenNewFile(void *param) {
tsLogObj.logHandle->fd = fd; tsLogObj.logHandle->fd = fd;
tsLogObj.lines = 0; tsLogObj.lines = 0;
tsLogObj.openInProgress = 0; tsLogObj.openInProgress = 0;
uInfo("new log file is opened!!!");
taosCloseLogByFd(oldFd); taosCloseLogByFd(oldFd);
uInfo(" new log file:%d is opened", tsLogObj.flag);
uInfo("==================================");
taosPrintGlobalCfg();
taosKeepOldLog(keepName);
return NULL; return NULL;
} }
@ -264,20 +284,23 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
strcat(name, ".0"); strcat(name, ".0");
} }
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
strcpy(name, fn);
strcat(name, ".1");
}
bool log0Exist = stat(name, &logstat0) >= 0;
bool log1Exist = stat(name, &logstat1) >= 0;
// if none of the log files exist, open 0, if both exists, open the old one // if none of the log files exist, open 0, if both exists, open the old one
if (stat(name, &logstat0) < 0) { if (!log0Exist && !log1Exist) {
tsLogObj.flag = 0; tsLogObj.flag = 0;
} else if (!log1Exist) {
tsLogObj.flag = 0;
} else if (!log0Exist) {
tsLogObj.flag = 1;
} else { } else {
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1;
strcpy(name, fn);
strcat(name, ".1");
}
if (stat(name, &logstat1) < 0) {
tsLogObj.flag = 1;
} else {
tsLogObj.flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1;
}
} }
char fileName[LOG_FILE_NAME_LEN + 50] = "\0"; char fileName[LOG_FILE_NAME_LEN + 50] = "\0";

View File

@ -253,7 +253,7 @@ void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...)
ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId());
#else #else
len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min,
ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); ptm->tm_sec, (int)timeSecs.tv_usec, (unsigned long int)pthread_self());
#endif #endif
va_start(argpointer, format); va_start(argpointer, format);
len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer);

View File

@ -47,6 +47,15 @@ static void vnodeNotifyRole(void *ahandle, int8_t role);
static void vnodeCtrlFlow(void *handle, int32_t mseconds); static void vnodeCtrlFlow(void *handle, int32_t mseconds);
static int vnodeNotifyFileSynced(void *ahandle, uint64_t fversion); static int vnodeNotifyFileSynced(void *ahandle, uint64_t fversion);
#ifndef _SYNC
tsync_h syncStart(const SSyncInfo *info) { return NULL; }
int32_t syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype) { return 0; }
void syncStop(tsync_h shandle) {}
int32_t syncReconfig(tsync_h shandle, const SSyncCfg * cfg) { return 0; }
int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; }
void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {}
#endif
int32_t vnodeInitResources() { int32_t vnodeInitResources() {
vnodeInitWriteFp(); vnodeInitWriteFp();
vnodeInitReadFp(); vnodeInitReadFp();
@ -289,12 +298,16 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
syncInfo.notifyFileSynced = vnodeNotifyFileSynced; syncInfo.notifyFileSynced = vnodeNotifyFileSynced;
pVnode->sync = syncStart(&syncInfo); pVnode->sync = syncStart(&syncInfo);
#ifndef _SYNC
pVnode->role = TAOS_SYNC_ROLE_MASTER;
#else
if (pVnode->sync == NULL) { if (pVnode->sync == NULL) {
vError("vgId:%d, failed to open sync module, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, vError("vgId:%d, failed to open sync module, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica,
tstrerror(terrno)); tstrerror(terrno));
vnodeCleanUp(pVnode); vnodeCleanUp(pVnode);
return terrno; return terrno;
} }
#endif
pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId); pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId);
if (pVnode->qMgmt == NULL) { if (pVnode->qMgmt == NULL) {
@ -369,7 +382,13 @@ void vnodeRelease(void *pVnodeRaw) {
char newDir[TSDB_FILENAME_LEN] = {0}; char newDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId); sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId); sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId);
taosRename(rootDir, newDir);
if (0 == tsEnableVnodeBak) {
vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId);
} else {
taosRename(rootDir, newDir);
}
taosRemoveDir(rootDir); taosRemoveDir(rootDir);
dnodeSendStatusMsgToMnode(); dnodeSendStatusMsgToMnode();
} }
@ -658,9 +677,13 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum); len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
vInfo("vgId:%d, save vnode cfg, replica:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.replications);
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId); len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId);
len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp); len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp);
vInfo("vgId:%d, save vnode cfg, nodeId:%d nodeEp:%s", pVnodeCfg->cfg.vgId, pVnodeCfg->nodes[i].nodeId,
pVnodeCfg->nodes[i].nodeEp);
if (i < pVnodeCfg->cfg.replications - 1) { if (i < pVnodeCfg->cfg.replications - 1) {
len += snprintf(content + len, maxLen - len, " },{\n"); len += snprintf(content + len, maxLen - len, " },{\n");

View File

@ -7,6 +7,5 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC)
IF (TD_LINUX) IF (TD_LINUX)
ADD_LIBRARY(twal ${SRC}) ADD_LIBRARY(twal ${SRC})
TARGET_LINK_LIBRARIES(twal tutil common) TARGET_LINK_LIBRARIES(twal tutil common)
ADD_SUBDIRECTORY(test) ADD_SUBDIRECTORY(test)
ENDIF () ENDIF ()

View File

@ -63,7 +63,7 @@
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.2</version> <version>2.0.4</version>
</dependency> </dependency>
</dependencies> </dependencies>
</project> </project>

View File

@ -63,7 +63,7 @@
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.2</version> <version>2.0.4</version>
</dependency> </dependency>
<dependency> <dependency>
@ -76,6 +76,24 @@
</dependencies> </dependencies>
<build> <build>
<resources>
<resource>
<directory>src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
</includes>
<filtering>true</filtering>
</resource>
<resource>
<directory>src/main/java</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
</includes>
</resource>
</resources>
<plugins> <plugins>
<plugin> <plugin>
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>

View File

@ -0,0 +1,28 @@
package com.taosdata.jdbc.springbootdemo.controller;
import com.taosdata.jdbc.springbootdemo.domain.Rainfall;
import com.taosdata.jdbc.springbootdemo.service.RainStationService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
@RestController
@RequestMapping("/rainstation")
public class RainStationController {
@Autowired
private RainStationService service;
@GetMapping("/init")
public boolean init() {
service.init();
service.createTable();
return true;
}
@PostMapping("/insert")
public int insert(@RequestBody Rainfall rainfall){
return service.insert(rainfall);
}
}

View File

@ -16,43 +16,47 @@ public class WeatherController {
/** /**
* create database and table * create database and table
*
* @return * @return
*/ */
@GetMapping("/init") @GetMapping("/init")
public boolean init(){ public boolean init() {
return weatherService.init(); return weatherService.init();
} }
/** /**
* Pagination Query * Pagination Query
*
* @param limit * @param limit
* @param offset * @param offset
* @return * @return
*/ */
@GetMapping("/{limit}/{offset}") @GetMapping("/{limit}/{offset}")
public List<Weather> queryWeather(@PathVariable Long limit, @PathVariable Long offset){ public List<Weather> queryWeather(@PathVariable Long limit, @PathVariable Long offset) {
return weatherService.query(limit, offset); return weatherService.query(limit, offset);
} }
/** /**
* upload single weather info * upload single weather info
*
* @param temperature * @param temperature
* @param humidity * @param humidity
* @return * @return
*/ */
@PostMapping("/{temperature}/{humidity}") @PostMapping("/{temperature}/{humidity}")
public int saveWeather(@PathVariable int temperature, @PathVariable float humidity){ public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) {
return weatherService.save(temperature, humidity); return weatherService.save(temperature, humidity);
} }
/** /**
* upload multi weather info * upload multi weather info
*
* @param weatherList * @param weatherList
* @return * @return
*/ */
@PostMapping("/batch") @PostMapping("/batch")
public int batchSaveWeather(@RequestBody List<Weather> weatherList){ public int batchSaveWeather(@RequestBody List<Weather> weatherList) {
return weatherService.save(weatherList); return weatherService.save(weatherList);
} }

View File

@ -0,0 +1,15 @@
package com.taosdata.jdbc.springbootdemo.dao;
import java.util.Map;
public interface DatabaseMapper {
int createDatabase(String dbname);
int dropDatabase(String dbname);
int creatDatabaseWithParameters(Map<String,String> map);
int useDatabase(String dbname);
}

View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper">
<update id="createDatabase" parameterType="java.lang.String">
create database if not exists ${dbname}
</update>
<update id="dropDatabase" parameterType="java.lang.String">
DROP database if exists ${dbname}
</update>
<update id="creatDatabaseWithParameters" parameterType="map">
CREATE database if not EXISTS ${dbname}
<if test="keep != null">
KEEP ${keep}
</if>
<if test="days != null">
DAYS ${days}
</if>
<if test="replica != null">
REPLICA ${replica}
</if>
<if test="cache != null">
cache ${cache}
</if>
<if test="blocks != null">
blocks ${blocks}
</if>
<if test="minrows != null">
minrows ${minrows}
</if>
<if test="maxrows != null">
maxrows ${maxrows}
</if>
</update>
<update id="useDatabase" parameterType="java.lang.String">
use ${dbname}
</update>
</mapper>

View File

@ -0,0 +1,9 @@
package com.taosdata.jdbc.springbootdemo.dao;
import java.util.Map;
public interface RainfallMapper {
int save(Map<String, Object> map);
}

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.RainfallMapper">
<insert id="save" parameterType="map">
INSERT INTO ${table} using ${dbname}.${stable} tags(#{values.station_code}, #{values.station_name}) (ts, name, code, rainfall) values (#{values.ts}, #{values.name}, #{values.code}, #{values.rainfall})
</insert>
</mapper>

View File

@ -0,0 +1,8 @@
package com.taosdata.jdbc.springbootdemo.dao;
import com.taosdata.jdbc.springbootdemo.domain.TableMetadata;
public interface TableMapper {
boolean createSTable(TableMetadata tableMetadata);
}

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.jdbc.springbootdemo.dao.TableMapper">
<update id="createSTable" parameterType="com.taosdata.jdbc.springbootdemo.domain.TableMetadata">
create table if not exists ${dbname}.${tablename}
<foreach collection="fields" item="field" index="index" open="(" close=")" separator=",">
${field.name} ${field.type}
</foreach>
TAGS
<foreach collection="tags" item="tag" index="index" open="(" close=")" separator=",">
${tag.name} ${tag.type}
</foreach>
</update>
<update id="dropTable" parameterType="java.lang.String">
drop ${tablename}
</update>
</mapper>

View File

@ -0,0 +1,28 @@
package com.taosdata.jdbc.springbootdemo.domain;
public class FieldMetadata {
private String name;
private String type;
public FieldMetadata(String name, String type) {
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}

View File

@ -0,0 +1,64 @@
package com.taosdata.jdbc.springbootdemo.domain;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.sql.Timestamp;
public class Rainfall {
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
private Timestamp ts;
private String name;
private String code;
private float rainfall;
private String station_code;
private String station_name;
public Timestamp getTs() {
return ts;
}
public void setTs(Timestamp ts) {
this.ts = ts;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public float getRainfall() {
return rainfall;
}
public void setRainfall(float rainfall) {
this.rainfall = rainfall;
}
public String getStation_code() {
return station_code;
}
public void setStation_code(String station_code) {
this.station_code = station_code;
}
public String getStation_name() {
return station_name;
}
public void setStation_name(String station_name) {
this.station_name = station_name;
}
}

View File

@ -0,0 +1,43 @@
package com.taosdata.jdbc.springbootdemo.domain;
import java.util.List;
public class TableMetadata {
private String dbname;
private String tablename;
private List<FieldMetadata> fields;
private List<TagMetadata> tags;
public String getDbname() {
return dbname;
}
public void setDbname(String dbname) {
this.dbname = dbname;
}
public String getTablename() {
return tablename;
}
public void setTablename(String tablename) {
this.tablename = tablename;
}
public List<FieldMetadata> getFields() {
return fields;
}
public void setFields(List<FieldMetadata> fields) {
this.fields = fields;
}
public List<TagMetadata> getTags() {
return tags;
}
public void setTags(List<TagMetadata> tags) {
this.tags = tags;
}
}

View File

@ -0,0 +1,27 @@
package com.taosdata.jdbc.springbootdemo.domain;
public class TagMetadata {
private String name;
private String type;
public TagMetadata(String name, String type) {
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}

View File

@ -1,9 +1,12 @@
package com.taosdata.jdbc.springbootdemo.domain; package com.taosdata.jdbc.springbootdemo.domain;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.sql.Timestamp; import java.sql.Timestamp;
public class Weather { public class Weather {
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
private Timestamp ts; private Timestamp ts;
private int temperature; private int temperature;

View File

@ -0,0 +1,72 @@
package com.taosdata.jdbc.springbootdemo.service;
import com.taosdata.jdbc.springbootdemo.dao.DatabaseMapper;
import com.taosdata.jdbc.springbootdemo.dao.RainfallMapper;
import com.taosdata.jdbc.springbootdemo.dao.TableMapper;
import com.taosdata.jdbc.springbootdemo.domain.FieldMetadata;
import com.taosdata.jdbc.springbootdemo.domain.Rainfall;
import com.taosdata.jdbc.springbootdemo.domain.TableMetadata;
import com.taosdata.jdbc.springbootdemo.domain.TagMetadata;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Service
public class RainStationService {
@Autowired
private DatabaseMapper databaseMapper;
@Autowired
private TableMapper tableMapper;
@Autowired
private RainfallMapper rainfallMapper;
public boolean init() {
databaseMapper.dropDatabase("rainstation");
Map<String, String> map = new HashMap<>();
map.put("dbname", "rainstation");
map.put("keep", "36500");
map.put("days", "30");
map.put("blocks", "4");
databaseMapper.creatDatabaseWithParameters(map);
databaseMapper.useDatabase("rainstation");
return true;
}
public boolean createTable() {
TableMetadata tableMetadata = new TableMetadata();
tableMetadata.setDbname("rainstation");
tableMetadata.setTablename("monitoring");
List<FieldMetadata> fields = new ArrayList<>();
fields.add(new FieldMetadata("ts", "timestamp"));
fields.add(new FieldMetadata("name", "NCHAR(10)"));
fields.add(new FieldMetadata("code", " BINARY(8)"));
fields.add(new FieldMetadata("rainfall", "float"));
tableMetadata.setFields(fields);
List<TagMetadata> tags = new ArrayList<>();
tags.add(new TagMetadata("station_code", "BINARY(8)"));
tags.add(new TagMetadata("station_name", "NCHAR(10)"));
tableMetadata.setTags(tags);
tableMapper.createSTable(tableMetadata);
return true;
}
public int insert(Rainfall rainfall) {
Map<String, Object> map = new HashMap<>();
map.put("dbname", "rainstation");
map.put("table", "S_53646");
map.put("stable", "monitoring");
map.put("values", rainfall);
return rainfallMapper.save(map);
}
}

Some files were not shown because too many files have changed in this diff Show More