merge with develop
This commit is contained in:
commit
9382e4d85b
|
@ -26,6 +26,13 @@ SET(CMAKE_VERBOSE_MAKEFILE ON)
|
|||
# open the file named TDengine.sln
|
||||
#
|
||||
|
||||
SET(TD_GODLL FALSE)
|
||||
IF (${DLLTYPE} MATCHES "go")
|
||||
ADD_DEFINITIONS(-D_TD_GO_DLL_)
|
||||
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
|
||||
SET(TD_GODLL TRUE)
|
||||
ENDIF ()
|
||||
|
||||
IF (NOT DEFINED TD_CLUSTER)
|
||||
MESSAGE(STATUS "Build the Lite Version")
|
||||
SET(TD_CLUSTER FALSE)
|
||||
|
@ -41,34 +48,43 @@ IF (NOT DEFINED TD_CLUSTER)
|
|||
SET(TD_ARM FALSE)
|
||||
SET(TD_ARM_64 FALSE)
|
||||
SET(TD_ARM_32 FALSE)
|
||||
SET(TD_MIPS FALSE)
|
||||
SET(TD_MIPS_64 FALSE)
|
||||
SET(TD_MIPS_32 FALSE)
|
||||
SET(TD_DARWIN_64 FALSE)
|
||||
SET(TD_WINDOWS_64 FALSE)
|
||||
|
||||
# if generate ARM version:
|
||||
# cmake -DARMVER=arm32 .. or cmake -DARMVER=arm64
|
||||
IF (${ARMVER} MATCHES "arm32")
|
||||
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
|
||||
IF (${CPUTYPE} MATCHES "aarch32")
|
||||
SET(TD_ARM TRUE)
|
||||
SET(TD_ARM_32 TRUE)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_32_)
|
||||
ELSEIF (${ARMVER} MATCHES "arm64")
|
||||
ELSEIF (${CPUTYPE} MATCHES "aarch64")
|
||||
SET(TD_ARM TRUE)
|
||||
SET(TD_ARM_64 TRUE)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_64_)
|
||||
ELSEIF (${CPUTYPE} MATCHES "mips64")
|
||||
SET(TD_MIPS TRUE)
|
||||
SET(TD_MIPS_64 TRUE)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_64_)
|
||||
ELSEIF (${CPUTYPE} MATCHES "x64")
|
||||
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
|
||||
ELSEIF (${CPUTYPE} MATCHES "x86")
|
||||
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_ARM)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_)
|
||||
IF (TD_ARM_32)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_32_)
|
||||
ELSEIF (TD_ARM_64)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_64_)
|
||||
ELSE ()
|
||||
EXIT ()
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
#
|
||||
# Get OS information and store in variable TD_OS_INFO.
|
||||
#
|
||||
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh)
|
||||
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
|
||||
MESSAGE(STATUS "The current os is " ${TD_OS_INFO})
|
||||
|
||||
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
||||
IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8)
|
||||
|
@ -150,6 +166,12 @@ IF (NOT DEFINED TD_CLUSTER)
|
|||
ENDIF ()
|
||||
ADD_DEFINITIONS(-DLINUX)
|
||||
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
|
||||
IF (${TD_OS_INFO} MATCHES "Alpine")
|
||||
MESSAGE(STATUS "The current OS is Alpine, append extra flags")
|
||||
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
|
||||
link_libraries(/usr/lib/libargp.a)
|
||||
ADD_DEFINITIONS(-D_ALPINE)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_LINUX_32)
|
||||
IF (NOT TD_ARM)
|
||||
EXIT ()
|
||||
|
@ -160,11 +182,19 @@ IF (NOT DEFINED TD_CLUSTER)
|
|||
ADD_DEFINITIONS(-DLINUX)
|
||||
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
|
||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
IF (${TD_OS_INFO} MATCHES "Alpine")
|
||||
MESSAGE(STATUS "The current OS is Alpine, add extra flags")
|
||||
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
|
||||
link_library(/usr/lib/libargp.a)
|
||||
ADD_DEFINITIONS(-D_ALPINE)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_WINDOWS_64)
|
||||
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
|
||||
IF (NOT TD_GODLL)
|
||||
SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
|
||||
SET(DEBUG_FLAGS "/Zi /W3 /GL")
|
||||
SET(RELEASE_FLAGS "/W0 /GL")
|
||||
ENDIF ()
|
||||
ADD_DEFINITIONS(-DWINDOWS)
|
||||
ADD_DEFINITIONS(-D__CLEANUP_C)
|
||||
ADD_DEFINITIONS(-DPTW32_STATIC_LIB)
|
||||
|
@ -230,6 +260,7 @@ IF (NOT DEFINED TD_CLUSTER)
|
|||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})")
|
||||
ELSEIF (TD_WINDOWS_64)
|
||||
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
|
||||
IF (NOT TD_GODLL)
|
||||
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
|
||||
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
|
||||
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
|
||||
|
@ -245,6 +276,10 @@ IF (NOT DEFINED TD_CLUSTER)
|
|||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSE ()
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
|
|
|
@ -45,10 +45,10 @@ mkdir build && cd build
|
|||
cmake .. && cmake --build .
|
||||
```
|
||||
|
||||
if compiling on an arm64 processor, you need add one parameter:
|
||||
if compiling on an aarch64 processor, you need add one parameter:
|
||||
|
||||
```cmd
|
||||
cmake .. -DARMVER=arm64 && cmake --build .
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
# Quick Run
|
||||
|
|
|
@ -175,7 +175,10 @@ static const struct alias sysdep_aliases[] = {
|
|||
#ifdef __GNUC__
|
||||
__inline
|
||||
#endif
|
||||
const struct alias *
|
||||
// gcc -o0 bug fix
|
||||
// see http://git.savannah.gnu.org/gitweb/?p=libiconv.git;a=blobdiff;f=lib/iconv.c;h=31853a7f1c47871221189dbf597473a16d8a8da7;hp=5a1a32597fa3efc5f69624d37a2eb96f308cd241;hb=b29089d8b43abc8fba073da7e6dccaeba56b2b70;hpb=0a04404c90d6a725b8b6bbcd65e10c5fcf5993e9
|
||||
|
||||
static const struct alias *
|
||||
aliases2_lookup (register const char *str)
|
||||
{
|
||||
const struct alias * ptr;
|
||||
|
|
|
@ -46,11 +46,11 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录
|
|||
|
||||
#### 配置数据源
|
||||
|
||||
用户可以直接通过localhost:3000的网址,登录Grafana服务器(用户名/密码:admin/admin),通过左侧`Configuration -> Data Sources`可以添加数据源,如下图所示:
|
||||
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
|
||||
|
||||

|
||||
|
||||
点击 `Add data source` 可进入新增数据源页面,在查询框中输入TDengine 可选择添加,如下图所示:
|
||||
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
|
||||
|
||||

|
||||
|
||||
|
@ -58,7 +58,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录
|
|||
|
||||

|
||||
|
||||
* HostTDengine:集群的中任意一台服务器的IP地址与TDengine RESTful接口的端口号(6020),默认http://localhost:6020。
|
||||
* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6020),默认 http://localhost:6020。
|
||||
* User:TDengine 用户名。
|
||||
* Password:TDengine 用户密码。
|
||||
|
||||
|
@ -83,7 +83,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录
|
|||

|
||||
|
||||
> 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息,请参考Grafana官方的[文档](https://grafana.com/docs/)。
|
||||
>
|
||||
|
||||
#### 导入 Dashboard
|
||||
|
||||
在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# TAOS SQL
|
||||
|
||||
TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句操纵数据库,也可以通过C/C++, Java(JDBC), Python, Go等各种程序来执行SQL语句。
|
||||
本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。
|
||||
|
||||
TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供修改和更新功能,因此在TAO SQL中不提供数据更新和数据删除的相关功能。
|
||||
|
||||
本章节SQL语法遵循如下约定:
|
||||
|
||||
|
@ -9,11 +11,41 @@ TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句
|
|||
- | 表示多选一,选择其中一个即可,但不能输入|本身
|
||||
- … 表示前面的项可重复多个
|
||||
|
||||
为更好地说明SQL语法的规则及其特点,本文假设存在一个数据集。该数据集是针对两种类型的设备温度(湿度)传感器、气压(海拔)传感器建立的数据模型。
|
||||
针对温度传感器,具有超级表(super table) temp_stable。其数据模型如下:
|
||||
```
|
||||
taos> describe temp_stable;
|
||||
Field | Type | Length | Note |
|
||||
=======================================================================================================
|
||||
ts |TIMESTAMP | 8 | |
|
||||
temperature |FLOAT | 4 | |
|
||||
humidity |TINYINT | 1 | |
|
||||
status |TINYINT | 1 | |
|
||||
deviceid |BIGINT | 12 |tag |
|
||||
location |BINARY | 20 |tag |
|
||||
```
|
||||
数据集包含2个温度传感器的数据,按照TDengine的建模规则,对应2个子表,其名称分别是 temp_tb_1,temp_tb_2 。
|
||||
针对压力(海拔)传感器,具有超级表(super table) pressure_stable。其数据模型如下:
|
||||
数据集包含2个压力传感器数据,对应2个子表,分别是 press_tb_1,press_tb_2。
|
||||
|
||||
```text
|
||||
taos> describe pressure_stable;
|
||||
Field | Type | Length | Note |
|
||||
=======================================================================================================
|
||||
ts |TIMESTAMP | 8 | |
|
||||
height |FLOAT | 4 | |
|
||||
pressure |FLOAT | 4 | |
|
||||
devstat |TINYINT | 1 | |
|
||||
id |BIGINT | 8 |tag |
|
||||
city |NCHAR | 20 |tag |
|
||||
longitude |FLOAT | 4 |tag |
|
||||
latitude |FLOAT | 4 |tag |
|
||||
```
|
||||
## 支持的数据类型
|
||||
|
||||
使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则:
|
||||
|
||||
- 时间格式为YYYY-MM-DD HH:mm:ss.MS, 默认时间分辨率为毫秒。比如:2017-08-12 18:25:58.128
|
||||
- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
|
||||
- 内部函数now是服务器的当前时间
|
||||
- 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间
|
||||
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
|
||||
|
@ -27,13 +59,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
| | 类型 | Bytes | 说明 |
|
||||
| ---- | :-------: | ------ | ------------------------------------------------------------ |
|
||||
| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
|
||||
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31被用作Null值 |
|
||||
| 3 | BIGINT | 8 | 长整型,范围 [-2^59, 2^59] |
|
||||
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
|
||||
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
|
||||
| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] |
|
||||
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数15-16,范围 [-1.7E308, 1.7E308] |
|
||||
| 6 | BINARY | 自定义 | 用于记录字符串,最长不能超过504 bytes。binary仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如binary(20)定义了最长为20个字符的字符串,每个字符占1byte的存储空间。如果用户字符串超出20字节,将被自动截断。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示, 即 **\’**。 |
|
||||
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767] |
|
||||
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127] |
|
||||
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL |
|
||||
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL |
|
||||
| 9 | BOOL | 1 | 布尔型,{true, false} |
|
||||
| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串,如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 **\’**。nchar使用时须指定字符串大小,类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符,会固定占用40bytes的空间。如用户字符串长度超出声明长度,则将被自动截断。 |
|
||||
|
||||
|
@ -165,19 +197,172 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
## 数据查询
|
||||
|
||||
###查询语法是:
|
||||
### 查询语法:
|
||||
|
||||
```mysql
|
||||
SELECT {* | expr_list} FROM tb_name
|
||||
[WHERE where_condition]
|
||||
[ORDER BY _c0 { DESC | ASC }]
|
||||
[LIMIT limit [, OFFSET offset]]
|
||||
[>> export_file]
|
||||
SELECT [DISTINCT] select_expr [, select_expr ...]
|
||||
FROM {tb_name_list}
|
||||
[WHERE where_condition]
|
||||
[INTERVAL [interval_offset,] interval_val]
|
||||
[FILL fill_val]
|
||||
[SLIDING fill_val]
|
||||
[GROUP BY col_list]
|
||||
[ORDER BY col_list { DESC | ASC }]
|
||||
[HAVING expr_list]
|
||||
[SLIMIT limit_val [, SOFFSET offset_val]]
|
||||
[LIMIT limit_val [, OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
```
|
||||
#### SELECT子句
|
||||
一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。
|
||||
|
||||
SELECT function_list FROM tb_name
|
||||
[WHERE where_condition]
|
||||
[LIMIT limit [, OFFSET offset]]
|
||||
[>> export_file]
|
||||
##### 通配符
|
||||
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
|
||||
```
|
||||
taos> select * from temp_tb_1;
|
||||
ts | temperature |humidity|status|
|
||||
============================================================
|
||||
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
|
||||
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |
|
||||
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |
|
||||
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |
|
||||
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |
|
||||
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |
|
||||
```
|
||||
在针对超级表,通配符包含 _标签列_ 。
|
||||
```
|
||||
taos> select * from temp_stable;
|
||||
ts | temperature |humidity|status| deviceid | location |
|
||||
==============================================================================================
|
||||
19-04-28 14:22:07.000| 21.00000 | 37 | 1 |54197 |beijing |
|
||||
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |91234 |beijing |
|
||||
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |91234 |beijing |
|
||||
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |91234 |beijing |
|
||||
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |91234 |beijing |
|
||||
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |91234 |beijing |
|
||||
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |91234 |beijing |
|
||||
```
|
||||
通配符支持表名前缀,以下两个SQL语句均为返回全部的列:
|
||||
```
|
||||
select * from temp_tb_1;
|
||||
select temp_tb_1.* from temp_tb_1;
|
||||
```
|
||||
在Join查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
|
||||
```
|
||||
taos> select * from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
|
||||
ts | temperature |humidity|status| ts | temperature |humidity|status|
|
||||
========================================================================================================================
|
||||
19-04-28 14:22:07.000| 20.00000 | 34 | 1 | 19-04-28 14:22:07.000| 21.00000 | 37 | 1 |
|
||||
```
|
||||
|
||||
```
|
||||
taos> select temp_tb_1.* from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
|
||||
ts | temperature |humidity|status|
|
||||
============================================================
|
||||
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
|
||||
```
|
||||
|
||||
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
|
||||
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
|
||||
|
||||
```
|
||||
taos> select count(*) from temp_tb_1;
|
||||
count(*) |
|
||||
======================
|
||||
1 |
|
||||
```
|
||||
|
||||
```
|
||||
taos> select first(*) from temp_tb_1;
|
||||
first(ts) | first(temperature) |first(humidity)|first(status)|
|
||||
==========================================================================
|
||||
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
|
||||
```
|
||||
|
||||
#### 结果集列名
|
||||
|
||||
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
|
||||
```
|
||||
taos> select ts, ts as primary_key_ts from temp_tb_1;
|
||||
ts | primary_key_ts |
|
||||
==============================================
|
||||
19-04-28 14:22:07.000| 19-04-28 14:22:07.000|
|
||||
```
|
||||
但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。
|
||||
|
||||
#### DISTINCT修饰符*
|
||||
只能用于修饰标签列(TAGS)的结果,不能用于修饰普通列来获得去重后的结果。并且应用```DISTINCT```以后,只能进行单列的标签输出。
|
||||
```count(distinct column_name)```用以返回近似的不重复结果的数量,该结果是近似值。
|
||||
|
||||
#### 隐式结果列
|
||||
```Select_exprs```可以是表所属列的列名,也可以是基于列的函数表达式或计算式,数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后,在最后返回结果中会强制返回时间戳列(第一列)和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出,列输出完全由select子句控制。
|
||||
|
||||
#### 表(超级表)列表
|
||||
|
||||
FROM关键字后面可以是若干个表(超级表)列表,也可以是子查询的结果。
|
||||
如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```sample.temp_tb_1``` 方式来跨库使用表。
|
||||
```
|
||||
SELECT * FROM sample.temp_tb_1;
|
||||
------------------------------
|
||||
use sample;
|
||||
SELECT * FROM temp_tb_1;
|
||||
```
|
||||
From子句中列表可以使用别名来让SQL整体更加简单。
|
||||
```
|
||||
SELECT t.ts FROM temp_tb_1 t ;
|
||||
```
|
||||
> 暂不支持FROM子句的表别名
|
||||
|
||||
#### 特殊功能
|
||||
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
|
||||
```
|
||||
taos> SELECT database();
|
||||
database() |
|
||||
=================================
|
||||
sample |
|
||||
```
|
||||
如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据,则返回NULL。
|
||||
```
|
||||
taos> select database();
|
||||
database() |
|
||||
=================================
|
||||
NULL |
|
||||
```
|
||||
获取服务器和客户端版本号:
|
||||
```
|
||||
SELECT client_version()
|
||||
SELECT server_version()
|
||||
```
|
||||
服务器状态检测语句。如果服务器正常,返回一个数字(例如 1)。如果服务器异常,返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。
|
||||
```
|
||||
SELECT server_status()
|
||||
SELECT server_status() AS result
|
||||
```
|
||||
#### TAOS SQL中特殊关键词
|
||||
|
||||
> TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br>
|
||||
\_c0: 表示表(超级表)的第一列
|
||||
|
||||
#### 小技巧
|
||||
获取一个超级表所有的子表名及相关的标签信息:
|
||||
```
|
||||
SELECT TBNAME, location FROM temp_stable
|
||||
```
|
||||
统计超级表下辖子表数量:
|
||||
```
|
||||
SELECT COUNT(TBNAME) FROM temp_stable
|
||||
```
|
||||
以上两个查询均只支持在Where条件子句中添加针对标签(TAGS)的过滤条件。例如:
|
||||
```
|
||||
taos> select count(tbname) from temp_stable;
|
||||
count(tbname) |
|
||||
======================
|
||||
2 |
|
||||
|
||||
taos> select count(tbname) from temp_stable where deviceid > 60000;
|
||||
count(tbname) |
|
||||
======================
|
||||
1 |
|
||||
```
|
||||
|
||||
- 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
|
||||
|
|
|
@ -33,11 +33,26 @@ taosd -c /home/user
|
|||
|
||||
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。**
|
||||
|
||||
**internalIp**
|
||||
- 默认值:操作配置的IP地址列表中的第一个IP地址
|
||||
**privateIp**
|
||||
- 默认值:物理节点IP地址列表中的第一个IP地址
|
||||
|
||||
对外提供服务的IP地址。
|
||||
|
||||
**publicIp**
|
||||
- 默认值:与privateIp相同
|
||||
|
||||
对于阿里等云平台,此为公网IP地址,publicIp在内部映射为对应的privateIP地址,仅对企业版有效。
|
||||
|
||||
**masterIp**
|
||||
- 默认值:与privateIp相同
|
||||
|
||||
集群内第一个物理节点的privateIp地址,仅对企业版有效。
|
||||
|
||||
**secondIp**
|
||||
- 默认值:与privateIp相同
|
||||
|
||||
集群内第二个物理节点的privateIp地址,仅对企业版有效。
|
||||
|
||||
**mgmtShellPort**
|
||||
- 默认值: _6030_
|
||||
|
||||
|
@ -50,6 +65,28 @@ taosd -c /home/user
|
|||
数据节点与客户端通信使用的TCP/UDP端口号。
|
||||
> 端口范围 _6035_ - _6039_ 的5个端口用于UDP通信。此外,还使用端口 _6035_ 用于TCP通讯。
|
||||
|
||||
**mgmtVnodePort**
|
||||
- 默认值: _6040_
|
||||
|
||||
管理节点与数据节点通信使用的TCP/UDP端口号,仅对企业版有效。
|
||||
> 端口范围 _6040_ - _6044_ 的5个端口用于UDP通信。此外,还使用端口 _6040_ 用于TCP通讯。
|
||||
|
||||
**vnodeVnodePort**
|
||||
- 默认值: _6045_
|
||||
|
||||
数据节点与数据节点通信使用的TCP/UDP端口号,仅对企业版有效。
|
||||
> 端口范围 _6045_ - _6049_ 的5个端口用于UDP通信。此外,还使用端口 _6045_ 用于TCP通讯。
|
||||
|
||||
**mgmtMgmtPort**
|
||||
- 默认值: _6050_
|
||||
|
||||
管理节点与管理节点通信使用的UDP端口号,仅对企业版有效。
|
||||
|
||||
**mgmtSyncPort**
|
||||
- 默认值: _6050_
|
||||
|
||||
管理节点与管理节点同步使用的TCP端口号,仅对企业版有效。
|
||||
|
||||
**httpPort**
|
||||
- 默认值: _6020_
|
||||
|
||||
|
|
|
@ -880,7 +880,7 @@ npm install td-connector
|
|||
```
|
||||
我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下
|
||||
|
||||
To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
|
||||
我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
|
||||
|
||||
### Unix
|
||||
|
||||
|
@ -939,7 +939,7 @@ To interact with TDengine, we make use of the [node-gyp](https://github.com/node
|
|||
|
||||
#### 连接
|
||||
|
||||
使用node.js连接器时,必须先require```td-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信
|
||||
使用node.js连接器时,必须先<em>require</em> ```td-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信
|
||||
|
||||
```javascript
|
||||
const taos = require('td-connector');
|
||||
|
@ -1043,7 +1043,7 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos
|
|||
|
||||
### 客户端安装
|
||||
|
||||
在Windows操作系统下,TDengine提供64位的Windows客户端,客户端安装程序为.exe文件,运行该文件即可安装,安装路径为C:\TDengine。Windows的客户端可运行在主流的64位Windows平台之上,客户端目录结构如下:
|
||||
在Windows操作系统下,TDengine提供64位的Windows客户端([点击下载](https://www.taosdata.com/cn/all-downloads/#tdengine_win-list)),客户端安装程序为.exe文件,运行该文件即可安装,安装路径为C:\TDengine。Windows的客户端可运行在主流的64位Windows平台之上,客户端目录结构如下:
|
||||
|
||||
```
|
||||
├── cfg
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
.idea/
|
||||
.vscode/
|
|
@ -0,0 +1,661 @@
|
|||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
|
@ -0,0 +1,241 @@
|
|||
## 样例数据导入
|
||||
|
||||
该工具可以根据用户提供的 `json` 或 `csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。
|
||||
|
||||
为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。
|
||||
|
||||
## 下载安装
|
||||
|
||||
### 下载可执行文件
|
||||
|
||||
由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`。
|
||||
|
||||
### go 源码编译
|
||||
|
||||
由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`。
|
||||
```shell
|
||||
go get https://github.com/taosdata/TDengine/importSampleData
|
||||
cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData
|
||||
go build -o bin/taosimport app/main.go
|
||||
```
|
||||
|
||||
> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。
|
||||
> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`。
|
||||
|
||||
## 使用
|
||||
|
||||
### 快速体验
|
||||
|
||||
执行命令 `bin/taosimport` 会根据默认配置执行以下操作:
|
||||
1. 创建数据库
|
||||
|
||||
自动创建名称为 `test_yyyyMMdd` 的数据库。
|
||||
|
||||
2. 创建超级表
|
||||
|
||||
根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。
|
||||
> 建表语句: create table s_sensor_info(ts timestamp, temperature int, humidity float) tags(location binary(20), color binary(16), devgroup int);
|
||||
|
||||
3. 自动建立子表并插入数据
|
||||
|
||||
根据配置文件 `config/cfg.toml` 中 `sensor_info` 场景指定的 `data/sensor_info.csv` 样例数据进行横向扩展 `100` 倍(可通过 hnum 参数指定),即自动创建 `10*100=1000` 张子表(默认样例数据中有 10 张子表,每张表 100 条数据),启动 `10` 个线程(可通过 thread 参数指定)对每张子表循环导入 `1000` 次(可通过 vnum 参数指定)。
|
||||
|
||||
进入 `taos shell`,可运行如下查询验证:
|
||||
|
||||
* 查询记录数
|
||||
|
||||
```shell
|
||||
taos> use test_yyyyMMdd;
|
||||
taos> select count(*) from s_sensor_info;
|
||||
```
|
||||
* 查询各个分组的记录数
|
||||
|
||||
```shell
|
||||
taos> select count(*) from s_sensor_info group by devgroup;
|
||||
```
|
||||
* 按 1h 间隔查询各聚合指标
|
||||
|
||||
```shell
|
||||
taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h);
|
||||
```
|
||||
* 查询指定位置最新上传指标
|
||||
|
||||
```shell
|
||||
taos> select last(*) from s_sensor_info where location = 'beijing';
|
||||
```
|
||||
> 更多查询及函数使用请参考 [数据查询][4]
|
||||
|
||||
### 详细使用说明
|
||||
|
||||
执行命令 `bin/taosimport -h` 可以查看详细参数使用说明:
|
||||
|
||||
* -cfg string
|
||||
|
||||
导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`。
|
||||
|
||||
* -cases string
|
||||
|
||||
需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`。
|
||||
|
||||
* -hnum int
|
||||
|
||||
需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。
|
||||
|
||||
* -vnum int
|
||||
|
||||
需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次。
|
||||
|
||||
* -delay int
|
||||
|
||||
当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
|
||||
|
||||
* -tick int
|
||||
|
||||
打印统计信息的时间间隔,默认 2000 ms。
|
||||
|
||||
* -save int
|
||||
|
||||
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
|
||||
|
||||
* -auto int
|
||||
|
||||
是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0。
|
||||
|
||||
* -start string
|
||||
|
||||
导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空。
|
||||
|
||||
* -interval int
|
||||
|
||||
导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
|
||||
|
||||
* -thread int
|
||||
|
||||
执行导入数据的线程数目,默认为 10。
|
||||
|
||||
* -batch int
|
||||
|
||||
执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录。
|
||||
|
||||
* -host string
|
||||
|
||||
导入的 TDengine 服务器 IP,默认为 127.0.0.1。
|
||||
|
||||
* -port int
|
||||
|
||||
导入的 TDengine 服务器端口,默认为 6030。
|
||||
|
||||
* -user string
|
||||
|
||||
导入的 TDengine 用户名,默认为 root。
|
||||
|
||||
* -password string
|
||||
|
||||
导入的 TDengine 用户密码,默认为 taosdata。
|
||||
|
||||
* -dropdb int
|
||||
|
||||
导入数据之前是否删除数据库,1 是,0 否, 默认 0。
|
||||
|
||||
* -db string
|
||||
|
||||
导入的 TDengine 数据库名称,默认为 test_yyyyMMdd。
|
||||
|
||||
* -dbparam string
|
||||
|
||||
当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 `days 10 cache 16000 ablocks 4`,默认为空。
|
||||
|
||||
### 常见使用示例
|
||||
|
||||
* `bin/taosimport -cfg config/cfg.toml -cases sensor_info,camera_detection -hnum 1 -vnum 10`
|
||||
|
||||
执行上述命令后会将 sensor_info、camera_detection 两个场景的数据各导入 10 次。
|
||||
|
||||
* `bin/taosimport -cfg config/cfg.toml -cases sensor_info -hnum 2 -vnum 0 -start "2019-12-12 00:00:00.000" -interval 5000`
|
||||
|
||||
执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。
|
||||
|
||||
### config/cfg.toml 配置文件说明
|
||||
|
||||
``` toml
|
||||
# 传感器场景
|
||||
[sensor_info] # 场景名称
|
||||
format = "csv" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。
|
||||
filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据
|
||||
separator = "," # csv 样例文件中字段分隔符,默认逗号
|
||||
|
||||
stname = "sensor_info" # 超级表名称
|
||||
subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。
|
||||
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
|
||||
timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
|
||||
#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定
|
||||
tags = [
|
||||
# 标签列表,name 为标签名称,type 为标签类型
|
||||
{ name = "location", type = "binary(20)" },
|
||||
{ name = "color", type = "binary(16)" },
|
||||
{ name = "devgroup", type = "int" },
|
||||
]
|
||||
|
||||
fields = [
|
||||
# 字段列表,name 为字段名称,type 为字段类型
|
||||
{ name = "ts", type = "timestamp" },
|
||||
{ name = "temperature", type = "int" },
|
||||
{ name = "humidity", type = "float" },
|
||||
]
|
||||
|
||||
# 摄像头检测场景
|
||||
[camera_detection] # 场景名称
|
||||
format = "json" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。
|
||||
filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据
|
||||
#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置
|
||||
|
||||
stname = "camera_detection" # 超级表名称
|
||||
subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。
|
||||
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
|
||||
timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
|
||||
timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定
|
||||
tags = [
|
||||
# 标签列表,name 为标签名称,type 为标签类型
|
||||
{ name = "home_id", type = "binary(30)" },
|
||||
{ name = "object_type", type = "int" },
|
||||
{ name = "object_kind", type = "binary(20)" },
|
||||
]
|
||||
|
||||
fields = [
|
||||
# 字段列表,name 为字段名称,type 为字段类型
|
||||
{ name = "ts", type = "timestamp" },
|
||||
{ name = "states", type = "tinyint" },
|
||||
{ name = "battery_voltage", type = "float" },
|
||||
]
|
||||
|
||||
# other cases
|
||||
|
||||
```
|
||||
|
||||
### 样例数据格式说明
|
||||
|
||||
#### json
|
||||
|
||||
当配置文件 `config/cfg.toml` 中各场景的 format="json" 时,样例数据文件需要提供 tags 和 fields 字段列表中的字段值。样例数据格式如下:
|
||||
|
||||
```json
|
||||
{"home_id": "603", "sensor_id": "s100", "ts": "2019-01-01 00:00:00.000", "object_type": 1, "object_kind": "night", "battery_voltage": 0.8, "states": 1}
|
||||
{"home_id": "604", "sensor_id": "s200", "ts": "2019-01-01 00:00:00.000", "object_type": 2, "object_kind": "day", "battery_voltage": 0.6, "states": 0}
|
||||
```
|
||||
|
||||
#### csv
|
||||
|
||||
当配置文件 `config/cfg.toml` 中各场景的 format="csv" 时,样例数据文件需要提供表头和对应的数据,其中字段分隔符由使用场景中 `separator` 指定,默认逗号。具体格式如下:
|
||||
|
||||
```csv
|
||||
devid,location,color,devgroup,ts,temperature,humidity
|
||||
0, beijing, white, 0, 1575129600000, 16, 19.405091
|
||||
0, beijing, white, 0, 1575129601000, 22, 14.377142
|
||||
```
|
||||
|
||||
|
||||
|
||||
[1]: https://github.com/taosdata/TDengine
|
||||
[2]: https://golang.org/doc/install
|
||||
[3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector
|
||||
[4]: https://www.taosdata.com/cn/documentation/taos-sql/#%E6%95%B0%E6%8D%AE%E6%9F%A5%E8%AF%A2
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
@ -0,0 +1,51 @@
|
|||
# 传感器场景
|
||||
[sensor_info] # 场景名称
|
||||
format = "csv" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。
|
||||
filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据
|
||||
separator = "," # csv 样例文件中字段分隔符,默认逗号
|
||||
|
||||
stname = "sensor_info" # 超级表名称
|
||||
subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。
|
||||
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
|
||||
timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
|
||||
#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定
|
||||
tags = [
|
||||
# 标签列表,name 为标签名称,type 为标签类型
|
||||
{ name = "location", type = "binary(20)" },
|
||||
{ name = "color", type = "binary(16)" },
|
||||
{ name = "devgroup", type = "int" },
|
||||
]
|
||||
|
||||
fields = [
|
||||
# 字段列表,name 为字段名称,type 为字段类型
|
||||
{ name = "ts", type = "timestamp" },
|
||||
{ name = "temperature", type = "int" },
|
||||
{ name = "humidity", type = "float" },
|
||||
]
|
||||
|
||||
# 摄像头检测场景
|
||||
[camera_detection] # 场景名称
|
||||
format = "json" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。
|
||||
filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据
|
||||
#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置
|
||||
|
||||
stname = "camera_detection" # 超级表名称
|
||||
subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。
|
||||
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
|
||||
timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
|
||||
timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定
|
||||
tags = [
|
||||
# 标签列表,name 为标签名称,type 为标签类型
|
||||
{ name = "home_id", type = "binary(30)" },
|
||||
{ name = "object_type", type = "int" },
|
||||
{ name = "object_kind", type = "binary(20)" },
|
||||
]
|
||||
|
||||
fields = [
|
||||
# 字段列表,name 为字段名称,type 为字段类型
|
||||
{ name = "ts", type = "timestamp" },
|
||||
{ name = "states", type = "tinyint" },
|
||||
{ name = "battery_voltage", type = "float" },
|
||||
]
|
||||
|
||||
# other case
|
|
@ -0,0 +1,380 @@
|
|||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 7,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"colorBackground": false,
|
||||
"colorValue": true,
|
||||
"colors": [
|
||||
"#299c46",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"#d44a3a"
|
||||
],
|
||||
"datasource": null,
|
||||
"format": "celsius",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
"minValue": 0,
|
||||
"show": false,
|
||||
"thresholdLabels": false,
|
||||
"thresholdMarkers": true
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 6,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"mappingType": 1,
|
||||
"mappingTypes": [
|
||||
{
|
||||
"name": "value to text",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"name": "range to text",
|
||||
"value": 2
|
||||
}
|
||||
],
|
||||
"maxDataPoints": 100,
|
||||
"nullPointMode": "connected",
|
||||
"nullText": null,
|
||||
"options": {},
|
||||
"postfix": "",
|
||||
"postfixFontSize": "50%",
|
||||
"prefix": "",
|
||||
"prefixFontSize": "50%",
|
||||
"rangeMaps": [
|
||||
{
|
||||
"from": "null",
|
||||
"text": "N/A",
|
||||
"to": "null"
|
||||
}
|
||||
],
|
||||
"sparkline": {
|
||||
"fillColor": "rgba(31, 118, 189, 0.18)",
|
||||
"full": false,
|
||||
"lineColor": "rgb(31, 120, 193)",
|
||||
"show": true,
|
||||
"ymax": null,
|
||||
"ymin": null
|
||||
},
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"alias": "lastest_temperature",
|
||||
"refId": "A",
|
||||
"sql": "select ts, temp from test.stream_temp_last where ts >= $from and ts < $to",
|
||||
"target": "select metric",
|
||||
"type": "timeserie"
|
||||
}
|
||||
],
|
||||
"thresholds": "20,30",
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "最新温度",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "80%",
|
||||
"valueMaps": [
|
||||
{
|
||||
"op": "=",
|
||||
"text": "N/A",
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "current"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"fieldOptions": {
|
||||
"calcs": [
|
||||
"last"
|
||||
],
|
||||
"defaults": {
|
||||
"decimals": 2,
|
||||
"mappings": [],
|
||||
"max": 100,
|
||||
"min": 0,
|
||||
"thresholds": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
],
|
||||
"title": ""
|
||||
},
|
||||
"override": {},
|
||||
"values": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true
|
||||
},
|
||||
"pluginVersion": "6.4.3",
|
||||
"targets": [
|
||||
{
|
||||
"alias": "maxHumidity",
|
||||
"refId": "A",
|
||||
"sql": "select ts, humidity from test.stream_humidity_max where ts >= $from and ts < $to",
|
||||
"target": "select metric",
|
||||
"type": "timeserie"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "最大湿度",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": true,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": null,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 4,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": false,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "avgTemperature",
|
||||
"refId": "A",
|
||||
"sql": "select ts, temp from test.stream_temp_avg where ts >= $from and ts < $to",
|
||||
"target": "select metric",
|
||||
"type": "timeserie"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "平均温度",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "celsius",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": null,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 10,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "max",
|
||||
"refId": "A",
|
||||
"sql": "select ts, max_temp from test.stream_sensor where ts >= $from and ts < $to",
|
||||
"target": "select metric",
|
||||
"type": "timeserie"
|
||||
},
|
||||
{
|
||||
"alias": "avg",
|
||||
"refId": "B",
|
||||
"sql": "select ts, avg_temp from test.stream_sensor where ts >= $from and ts < $to",
|
||||
"target": "select metric",
|
||||
"type": "timeserie"
|
||||
},
|
||||
{
|
||||
"alias": "min",
|
||||
"refId": "C",
|
||||
"sql": "select ts, min_temp from test.stream_sensor where ts >= $from and ts < $to",
|
||||
"target": "select metric",
|
||||
"type": "timeserie"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "某传感器",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "celsius",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 20,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-5m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "sensor_info",
|
||||
"uid": "dGSoaTLWz",
|
||||
"version": 2
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,66 @@
|
|||
package dataimport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/pelletier/go-toml"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg Config
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// Config inclue all scene import config
|
||||
type Config struct {
|
||||
UserCases map[string]CaseConfig
|
||||
}
|
||||
|
||||
// CaseConfig include the sample data config and tdengine config
|
||||
type CaseConfig struct {
|
||||
Format string
|
||||
FilePath string
|
||||
Separator string
|
||||
Stname string
|
||||
SubTableName string
|
||||
Timestamp string
|
||||
TimestampType string
|
||||
TimestampTypeFormat string
|
||||
Tags []FieldInfo
|
||||
Fields []FieldInfo
|
||||
}
|
||||
|
||||
// FieldInfo is field or tag info
|
||||
type FieldInfo struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
// LoadConfig will load the specified file config
|
||||
func LoadConfig(filePath string) Config {
|
||||
once.Do(func() {
|
||||
filePath, err := filepath.Abs(filePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("parse toml file once. filePath: %s\n", filePath)
|
||||
tree, err := toml.LoadFile(filePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(tree.ToMap())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(bytes, &cfg.UserCases)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
return cfg
|
||||
}
|
|
@ -5,14 +5,39 @@
|
|||
# #
|
||||
########################################################
|
||||
|
||||
# master IP for TDengine system
|
||||
# masterIp 127.0.0.1
|
||||
|
||||
# Internal IP address of the server, which can be acquired by using ifconfig command.
|
||||
# internalIp 127.0.0.1
|
||||
# second IP for TDengine system, for cluster version only
|
||||
# secondIp 127.0.0.1
|
||||
|
||||
# IP address of the server
|
||||
# privateIp 127.0.0.1
|
||||
|
||||
# public IP of server, on which the tdengine are deployed
|
||||
# this IP is assigned by cloud service provider, for cluster version only
|
||||
# publicIp 127.0.0.1
|
||||
|
||||
# network is bound to 0.0.0.0
|
||||
# anyIp 1
|
||||
|
||||
# set socket type ("udp" and "tcp")
|
||||
# the server and client should have the same socket type. Otherwise, connect will fail
|
||||
# sockettype udp
|
||||
|
||||
# client local IP
|
||||
# localIp 127.0.0.1
|
||||
|
||||
# data file's directory
|
||||
# for the cluster version, data file's directory is configured this way
|
||||
# option mount_path tier_level
|
||||
# dataDir /mnt/disk1/taos 0
|
||||
# dataDir /mnt/disk2/taos 0
|
||||
# dataDir /mnt/disk3/taos 0
|
||||
# dataDir /mnt/disk4/taos 0
|
||||
# dataDir /mnt/disk5/taos 0
|
||||
# dataDir /mnt/disk6/taos 1
|
||||
# dataDir /mnt/disk7/taos 1
|
||||
# for the stand-alone version, data file's directory is configured this way
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# log file's directory
|
||||
|
@ -27,6 +52,18 @@
|
|||
# port for DNode connect to Client, default udp[6035-6039] tcp[6035]
|
||||
# vnodeShellPort 6035
|
||||
|
||||
# port for MNode connect to VNode, default udp[6040-6044] tcp[6040], for cluster version only
|
||||
# mgmtVnodePort 6040
|
||||
|
||||
# port for DNode connect to DNode, default tcp[6045], for cluster version only
|
||||
# vnodeVnodePort 6045
|
||||
|
||||
# port for MNode connect to MNode, default udp[6050], for cluster version only
|
||||
# mgmtMgmtPort 6050
|
||||
|
||||
# port sync file MNode and MNode, default tcp[6050], for cluster version only
|
||||
# mgmtSyncPort 6050
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1
|
||||
|
||||
|
@ -54,11 +91,7 @@
|
|||
# interval of system monitor
|
||||
# monitorInterval 60
|
||||
|
||||
# set socket type("udp" and "tcp").
|
||||
# The server and client should have the same socket type. Otherwise, connect will fail.
|
||||
# sockettype udp
|
||||
|
||||
# The compressed rpc message, option:
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
|
@ -73,12 +106,18 @@
|
|||
# commit interval,unit is second
|
||||
# ctime 3600
|
||||
|
||||
# interval of DNode report status to MNode, unit is Second
|
||||
# interval of DNode report status to MNode, unit is Second, for cluster version only
|
||||
# statusInterval 1
|
||||
|
||||
# interval of Shell send HB to MNode, unit is Second
|
||||
# shellActivityTimer 3
|
||||
|
||||
# interval of DNode send HB to DNode, unit is Second, for cluster version only
|
||||
# vnodePeerHBTimer 1
|
||||
|
||||
# interval of MNode send HB to MNode, unit is Second, for cluster version only
|
||||
# mgmtPeerHBTimer 1
|
||||
|
||||
# time to keep MeterMeta in Cache, seconds
|
||||
# meterMetaKeepTimer 7200
|
||||
|
||||
|
@ -94,6 +133,12 @@
|
|||
# max number of tables
|
||||
# maxTables 650000
|
||||
|
||||
# max number of Dnodes, for cluster version only
|
||||
# maxDnodes 1000
|
||||
|
||||
# Max number of VGroups, for cluster version only
|
||||
# maxVGroups 1000
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
||||
|
@ -118,6 +163,9 @@
|
|||
# number of days to keep DB file
|
||||
# keep 3650
|
||||
|
||||
# number of replications, for cluster version only
|
||||
# replications 1
|
||||
|
||||
# client default database(database should be created)
|
||||
# defaultDB
|
||||
|
||||
|
@ -139,18 +187,30 @@
|
|||
# max connection to Vnode
|
||||
# maxVnodeConnections 10000
|
||||
|
||||
# start http service in the cluster
|
||||
# mnode take into account while balance, for cluster version only
|
||||
# mgmtEqualVnodeNum 4
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster version only
|
||||
# offlineThreshold 864000
|
||||
|
||||
# start http service
|
||||
# http 1
|
||||
|
||||
# start system monitor module in the cluster
|
||||
# start system monitor module
|
||||
# monitor 1
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# number of threads used to process http requests
|
||||
# httpMaxThreads 2
|
||||
|
||||
# pre-allocated number of http sessions
|
||||
# httpCacheSessions 100
|
||||
|
||||
# whether to enable HTTP compression transmission
|
||||
# httpEnableCompress 0
|
||||
|
||||
# whether the telegraf table name contains the number of tags and the number of fields
|
||||
# telegrafUseFieldNum 0
|
||||
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for ubuntu
|
||||
# set -x
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
armver=$4
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -m ${script_dir}/../..)"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/debworkroom"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
|
@ -64,15 +68,24 @@ debver="Version: "$tdengine_ver
|
|||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||
|
||||
#get taos version, then set deb name
|
||||
if [ -z "$armver" ]; then
|
||||
debname="TDengine-"${tdengine_ver}".deb"
|
||||
elif [ "$armver" == "arm64" ]; then
|
||||
debname="TDengine-"${tdengine_ver}"-arm64.deb"
|
||||
elif [ "$armver" == "arm32" ]; then
|
||||
debname="TDengine-"${tdengine_ver}"-arm32.deb"
|
||||
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "lite" ]; then
|
||||
debname="TDengine-server-edge"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "input parameter error!!!"
|
||||
return
|
||||
echo "unknow verMode, nor cluster or lite"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
debname=${debname}-${verType}".deb"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
debname=${debname}".deb"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make deb package
|
||||
|
|
|
@ -5,11 +5,49 @@
|
|||
set -e
|
||||
# set -x
|
||||
|
||||
armver=$1
|
||||
# releash.sh -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta]
|
||||
|
||||
# set parameters by default value
|
||||
verMode=lite # [cluster, lite]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
|
||||
|
||||
while getopts "hv:V:c:o:" arg
|
||||
do
|
||||
case $arg in
|
||||
v)
|
||||
#echo "verMode=$OPTARG"
|
||||
verMode=$( echo $OPTARG )
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta]"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -m ${script_dir}/..)"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
versioninfo="${top_dir}/src/util/src/version.c"
|
||||
|
||||
csudo=""
|
||||
|
@ -111,9 +149,19 @@ echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo
|
|||
echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
|
||||
echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo}
|
||||
echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
|
||||
echo "" >> ${versioninfo}
|
||||
tmp_version=$(echo $version | tr -s "." "_")
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
libtaos_info=${tmp_version}_${osType}_${cpuType}
|
||||
else
|
||||
libtaos_info=edge_${tmp_version}_${osType}_${cpuType}
|
||||
fi
|
||||
if [ "$verType" == "beta" ]; then
|
||||
libtaos_info=${libtaos_info}_${verType}
|
||||
fi
|
||||
echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo}
|
||||
|
||||
# 2. cmake executable file
|
||||
|
||||
compile_dir="${top_dir}/debug"
|
||||
if [ -d ${compile_dir} ]; then
|
||||
${csudo} rm -rf ${compile_dir}
|
||||
|
@ -122,16 +170,12 @@ fi
|
|||
${csudo} mkdir -p ${compile_dir}
|
||||
cd ${compile_dir}
|
||||
|
||||
# arm only support lite ver
|
||||
if [ -z "$armver" ]; then
|
||||
cmake ../
|
||||
elif [ "$armver" == "arm64" ]; then
|
||||
cmake ../ -DARMVER=arm64
|
||||
elif [ "$armver" == "arm32" ]; then
|
||||
cmake ../ -DARMVER=arm32
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
|
||||
cmake ../ -DCPUTYPE=${cpuType}
|
||||
else
|
||||
echo "input parameter error!!!"
|
||||
return
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
make
|
||||
|
@ -143,28 +187,28 @@ cd ${curr_dir}
|
|||
#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
|
||||
#echo "osinfo: ${osinfo}"
|
||||
|
||||
echo "do deb package for the ubuntu system"
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo} rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo} mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${armver}
|
||||
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
echo "do rpm package for the centos system"
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo} rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo} mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${armver}
|
||||
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
echo "do tar.gz package for all systems"
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${armver}
|
||||
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${armver}
|
||||
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
|
||||
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
# 4. Clean up temporary compile directories
|
||||
#${csudo} rm -rf ${compile_dir}
|
||||
|
|
|
@ -9,10 +9,13 @@
|
|||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
armver=$4
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -m ${script_dir}/../..)"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/rpmworkroom"
|
||||
spec_file="${script_dir}/tdengine.spec"
|
||||
|
||||
|
@ -55,15 +58,30 @@ ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
|
|||
|
||||
${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
|
||||
|
||||
# copy rpm package to output_dir, then clean temp dir
|
||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||
#${csudo} cp -rf RPMS/* ${output_dir}
|
||||
cp_rpm_package ${pkg_dir}/RPMS
|
||||
|
||||
if [ "$armver" == "arm64" ]; then
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/TDengine-${tdengine_ver}-arm64.rpm
|
||||
elif [ "$armver" == "arm32" ]; then
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/TDengine-${tdengine_ver}-arm32.rpm
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "lite" ]; then
|
||||
rpmname="TDengine-server-edge"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or lite"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
rpmname=${rpmname}-${verType}".rpm"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
rpmname=${rpmname}".rpm"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
|
||||
|
||||
cd ..
|
||||
${csudo} rm -rf ${pkg_dir}
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
|
@ -7,7 +7,7 @@ set -e
|
|||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -m "$0"))
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
|
|
@ -7,7 +7,7 @@ set -e
|
|||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -m "$0"))
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
|
|
@ -9,7 +9,7 @@ set -e
|
|||
# -----------------------Variables definition---------------------
|
||||
source_dir=$1
|
||||
binary_dir=$2
|
||||
script_dir=$(dirname $(readlink -m "$0"))
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
|
|
@ -1,17 +1,20 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
set -x
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
armver=$4
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -m ${script_dir}/../..)"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
|
@ -19,7 +22,7 @@ code_dir="${top_dir}/src"
|
|||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
install_dir="${release_dir}/TDengine-client-${version}"
|
||||
install_dir="${release_dir}/TDengine-client"
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
|
||||
|
@ -42,12 +45,13 @@ cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install*
|
|||
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
cp -r ${top_dir}/tests/examples/c ${install_dir}/examples
|
||||
cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples
|
||||
cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples
|
||||
cp -r ${top_dir}/tests/examples/python ${install_dir}/examples
|
||||
cp -r ${top_dir}/tests/examples/R ${install_dir}/examples
|
||||
cp -r ${top_dir}/tests/examples/go ${install_dir}/examples
|
||||
examples_dir="${top_dir}/tests/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
|
@ -67,12 +71,25 @@ cp -r ${connector_dir}/go ${install_dir}/connector
|
|||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
if [ -z "$armver" ]; then
|
||||
tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files
|
||||
elif [ "$armver" == "arm64" ]; then
|
||||
tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files
|
||||
elif [ "$armver" == "arm32" ]; then
|
||||
tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "lite" ]; then
|
||||
pkg_name=${install_dir}-edge-${version}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or lite"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
pkg_name=${pkg_name}-${verType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stable or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -1,15 +1,21 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for other os system (no unbutu or centos)
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
armver=$4
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -m ${script_dir}/../..)"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
|
@ -17,7 +23,7 @@ code_dir="${top_dir}/src"
|
|||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
install_dir="${release_dir}/TDengine-${version}"
|
||||
install_dir="${release_dir}/TDengine-server"
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh"
|
||||
|
@ -78,12 +84,25 @@ cp -r ${connector_dir}/go ${install_dir}/connector
|
|||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
if [ -z "$armver" ]; then
|
||||
tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files
|
||||
elif [ "$armver" == "arm64" ]; then
|
||||
tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files
|
||||
elif [ "$armver" == "arm32" ]; then
|
||||
tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "lite" ]; then
|
||||
pkg_name=${install_dir}-edge-${version}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or lite"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
pkg_name=${pkg_name}-${verType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# is required to use systemd to manage services at boot
|
||||
#set -x
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -m "$0"))
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
|
|
@ -17,7 +17,7 @@ done
|
|||
declare -A dirHash
|
||||
|
||||
for linkFile in $(find -L $linkDir -xtype l); do
|
||||
targetFile=$(readlink -m $linkFile)
|
||||
targetFile=$(readlink -f $linkFile)
|
||||
echo "targetFile: ${targetFile}"
|
||||
# TODO : Extract directory part and basename part
|
||||
dirName=$(dirname $(dirname ${targetFile}))
|
||||
|
|
|
@ -51,7 +51,9 @@ ELSEIF (TD_WINDOWS_64)
|
|||
|
||||
# generate dynamic library (*.dll)
|
||||
ADD_LIBRARY(taos SHARED ${SRC})
|
||||
IF (NOT TD_GODLL)
|
||||
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
|
||||
ENDIF ()
|
||||
TARGET_LINK_LIBRARIES(taos trpc)
|
||||
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
|
|
|
@ -466,11 +466,17 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
|||
case TSDB_DATA_TYPE_BIGINT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i]));
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i]));
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
|
||||
|
@ -615,11 +621,17 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI
|
|||
case TSDB_DATA_TYPE_BIGINT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i]));
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i]));
|
||||
case TSDB_DATA_TYPE_DOUBLE:{
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
|
||||
|
|
1411
src/client/src/sql.c
1411
src/client/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -3817,9 +3817,9 @@ static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, dou
|
|||
int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) {
|
||||
float fmin = DBL_MAX;
|
||||
float fmax = -DBL_MAX;
|
||||
float fminIndex = 0;
|
||||
float fmaxIndex = 0;
|
||||
double dsum = 0;
|
||||
*minIndex = 0;
|
||||
*maxIndex = 0;
|
||||
|
||||
assert(numOfRow <= INT16_MAX);
|
||||
|
||||
|
@ -3830,18 +3830,16 @@ static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, dou
|
|||
}
|
||||
|
||||
float fv = 0;
|
||||
*(int32_t*)(&fv) = *(int32_t*)(&(data[i]));
|
||||
|
||||
//*sum += data[i];
|
||||
fv = GET_FLOAT_VAL(&(data[i]));
|
||||
dsum += fv;
|
||||
if (fmin > fv) {
|
||||
fmin = fv;
|
||||
fminIndex = i;
|
||||
*minIndex = i;
|
||||
}
|
||||
|
||||
if (fmax < fv) {
|
||||
fmax = fv;
|
||||
fmaxIndex = i;
|
||||
*maxIndex = i;
|
||||
}
|
||||
|
||||
// if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) {
|
||||
|
@ -3855,24 +3853,26 @@ static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, dou
|
|||
}
|
||||
|
||||
double csum = 0;
|
||||
*(int64_t*)(&csum) = *(int64_t*)sum;
|
||||
csum = GET_DOUBLE_VAL(sum);
|
||||
csum += dsum;
|
||||
*(int64_t*)(sum) = *(int64_t*)(&csum);
|
||||
|
||||
*(int32_t*)max = *(int32_t*)(&fmax);
|
||||
*(int32_t*)min = *(int32_t*)(&fmin);
|
||||
*(int32_t*)minIndex = *(int32_t*)(&fminIndex);
|
||||
*(int32_t*)maxIndex = *(int32_t*)(&fmaxIndex);
|
||||
|
||||
#ifdef _TD_ARM_32_
|
||||
SET_DOUBLE_VAL_ALIGN(sum, &csum);
|
||||
SET_DOUBLE_VAL_ALIGN(max, &fmax);
|
||||
SET_DOUBLE_VAL_ALIGN(min, &fmin);
|
||||
#else
|
||||
*sum = csum;
|
||||
*max = fmax;
|
||||
*min = fmin;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, double *min, double *max, double *sum,
|
||||
int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) {
|
||||
double dmin = DBL_MAX;
|
||||
double dmax = -DBL_MAX;
|
||||
double dminIndex = 0;
|
||||
double dmaxIndex = 0;
|
||||
double dsum = 0;
|
||||
*minIndex = 0;
|
||||
*maxIndex = 0;
|
||||
|
||||
assert(numOfRow <= INT16_MAX);
|
||||
|
||||
|
@ -3883,18 +3883,16 @@ static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, do
|
|||
}
|
||||
|
||||
double dv = 0;
|
||||
*(int64_t*)(&dv) = *(int64_t*)(&(data[i]));
|
||||
|
||||
//*sum += data[i];
|
||||
dv = GET_DOUBLE_VAL(&(data[i]));
|
||||
dsum += dv;
|
||||
if (dmin > dv) {
|
||||
dmin = dv;
|
||||
dminIndex = i;
|
||||
*minIndex = i;
|
||||
}
|
||||
|
||||
if (dmax < dv) {
|
||||
dmax = dv;
|
||||
dmaxIndex = i;
|
||||
*maxIndex = i;
|
||||
}
|
||||
|
||||
// if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) {
|
||||
|
@ -3908,14 +3906,19 @@ static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, do
|
|||
}
|
||||
|
||||
double csum = 0;
|
||||
*(int64_t*)(&csum) = *(int64_t*)sum;
|
||||
csum = GET_DOUBLE_VAL(sum);
|
||||
csum += dsum;
|
||||
*(int64_t*)(sum) = *(int64_t*)(&csum);
|
||||
|
||||
*(int64_t*)max = *(int64_t*)(&dmax);
|
||||
*(int64_t*)min = *(int64_t*)(&dmin);
|
||||
*(int64_t*)minIndex = *(int64_t*)(&dminIndex);
|
||||
*(int64_t*)maxIndex = *(int64_t*)(&dmaxIndex);
|
||||
|
||||
#ifdef _TD_ARM_32_
|
||||
SET_DOUBLE_VAL_ALIGN(sum, &csum);
|
||||
SET_DOUBLE_VAL_ALIGN(max, &dmax);
|
||||
SET_DOUBLE_VAL_ALIGN(min, &dmin);
|
||||
#else
|
||||
*sum = csum;
|
||||
*max = dmax;
|
||||
*min = dmin;
|
||||
#endif
|
||||
}
|
||||
|
||||
void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max,
|
||||
|
|
|
@ -39,25 +39,17 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
|
|||
case TSDB_DATA_TYPE_NCHAR:
|
||||
return length;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
#ifdef _TD_ARM_32_
|
||||
double dv = 0;
|
||||
*(int64_t *)(&dv) = *(int64_t *)pData;
|
||||
len = sprintf(buf, "%f", dv);
|
||||
#else
|
||||
len = sprintf(buf, "%lf", *(double *)pData);
|
||||
#endif
|
||||
dv = GET_DOUBLE_VAL(pData);
|
||||
len = sprintf(buf, "%lf", dv);
|
||||
if (strncasecmp("nan", buf, 3) == 0) {
|
||||
len = 4;
|
||||
}
|
||||
} break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
#ifdef _TD_ARM_32_
|
||||
float fv = 0;
|
||||
*(int32_t *)(&fv) = *(int32_t *)pData;
|
||||
fv = GET_FLOAT_VAL(pData);
|
||||
len = sprintf(buf, "%f", fv);
|
||||
#else
|
||||
len = sprintf(buf, "%f", *(float *)pData);
|
||||
#endif
|
||||
if (strncasecmp("nan", buf, 3) == 0) {
|
||||
len = 4;
|
||||
}
|
||||
|
@ -202,22 +194,14 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
|
|||
taosUcs4ToMbs(pTagValue, pSchema[i].bytes, target);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
#ifdef _TD_ARM_32_
|
||||
float fv = 0;
|
||||
*(int32_t *)(&fv) = *(int32_t *)pTagValue;
|
||||
fv = GET_FLOAT_VAL(pTagValue);
|
||||
sprintf(target, "%f", fv);
|
||||
#else
|
||||
sprintf(target, "%f", *(float *)pTagValue);
|
||||
#endif
|
||||
} break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
#ifdef _TD_ARM_32_
|
||||
double dv = 0;
|
||||
*(int64_t *)(&dv) = *(int64_t *)pTagValue;
|
||||
dv = GET_DOUBLE_VAL(pTagValue);
|
||||
sprintf(target, "%lf", dv);
|
||||
#else
|
||||
sprintf(target, "%lf", *(double *)pTagValue);
|
||||
#endif
|
||||
} break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
sprintf(target, "%d", *(int8_t *)pTagValue);
|
||||
|
|
|
@ -36,7 +36,7 @@ enum {
|
|||
TSDB_USE_CLI_TS = 1,
|
||||
};
|
||||
|
||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize);
|
||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
|
||||
|
||||
static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
|
||||
int32_t numType = isValidNumber(pToken);
|
||||
|
@ -309,6 +309,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
|
|||
}
|
||||
|
||||
strncpy(payload, pToken->z, pToken->n);
|
||||
|
||||
if (pToken->n < pSchema->bytes) {
|
||||
payload[pToken->n] = 0; // add the null-terminated char if the length of the string is shorter than the available space
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -515,14 +519,16 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
|
|||
|
||||
*str += index;
|
||||
if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) {
|
||||
int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize);
|
||||
if (0 == tSize) { // TODO pass the correct error code to client
|
||||
int32_t tSize;
|
||||
int32_t retcode = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize, &tSize);
|
||||
if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
|
||||
strcpy(error, "client out of memory");
|
||||
*code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
*code = retcode;
|
||||
return -1;
|
||||
}
|
||||
|
||||
maxRows += tSize;
|
||||
assert(tSize > maxRows);
|
||||
maxRows = tSize;
|
||||
}
|
||||
|
||||
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
|
||||
|
@ -567,7 +573,7 @@ static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema,
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) {
|
||||
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) {
|
||||
size_t remain = pDataBlock->nAllocSize - pDataBlock->size;
|
||||
const int factor = 5;
|
||||
uint32_t nAllocSizeOld = pDataBlock->nAllocSize;
|
||||
|
@ -587,11 +593,13 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) {
|
|||
// assert(false);
|
||||
// do nothing
|
||||
pDataBlock->nAllocSize = nAllocSizeOld;
|
||||
return 0;
|
||||
*numOfRows = (int32_t)(pDataBlock->nAllocSize) / rowSize;
|
||||
return TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
return (int32_t)(pDataBlock->nAllocSize - pDataBlock->size) / rowSize;
|
||||
*numOfRows = (int32_t)(pDataBlock->nAllocSize) / rowSize;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tsSetBlockInfo(SShellSubmitBlock *pBlocks, const SMeterMeta *pMeterMeta, int32_t numOfRows) {
|
||||
|
@ -658,8 +666,9 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
|
|||
return ret;
|
||||
}
|
||||
|
||||
int32_t maxNumOfRows = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize);
|
||||
if (0 == maxNumOfRows) {
|
||||
int32_t maxNumOfRows;
|
||||
ret = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize, &maxNumOfRows);
|
||||
if (TSDB_CODE_SUCCESS != ret) {
|
||||
return TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -987,6 +996,9 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
|
|||
return code;
|
||||
}
|
||||
|
||||
ASSERT(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList))
|
||||
|| ((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)));
|
||||
|
||||
if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) {
|
||||
pSql->pTableHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
|
||||
|
||||
|
@ -996,6 +1008,7 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
|
|||
goto _error_clean;
|
||||
}
|
||||
} else {
|
||||
ASSERT((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList));
|
||||
str = pSql->asyncTblPos;
|
||||
}
|
||||
|
||||
|
@ -1048,10 +1061,16 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
|
|||
* interrupted position.
|
||||
*/
|
||||
if (fp != NULL) {
|
||||
if (TSDB_CODE_ACTION_IN_PROGRESS == code) {
|
||||
tscTrace("async insert and waiting to get meter meta, then continue parse sql: %s", pSql->asyncTblPos);
|
||||
return code;
|
||||
} else {
|
||||
goto _error_clean;
|
||||
}
|
||||
|
||||
tscTrace("async insert parse error, code:%d, %s", code, tsError[code]);
|
||||
pSql->asyncTblPos = NULL;
|
||||
}
|
||||
|
||||
goto _error_clean; // TODO: should _clean or _error_clean to async flow ????
|
||||
}
|
||||
|
||||
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
|
||||
|
@ -1282,6 +1301,7 @@ int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion) {
|
|||
tscTrace("continue parse sql: %s", pSql->asyncTblPos);
|
||||
}
|
||||
|
||||
|
||||
if (tscIsInsertOrImportData(pSql->sqlstr)) {
|
||||
/*
|
||||
* only for async multi-vnode insertion
|
||||
|
@ -1350,7 +1370,7 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
|
|||
char * line = NULL;
|
||||
size_t n = 0;
|
||||
int len = 0;
|
||||
uint32_t maxRows = 0;
|
||||
int32_t maxRows = 0;
|
||||
SSqlCmd * pCmd = &pSql->cmd;
|
||||
int numOfRows = 0;
|
||||
int32_t code = 0;
|
||||
|
@ -1369,8 +1389,8 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
|
|||
|
||||
tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock);
|
||||
|
||||
maxRows = tscAllocateMemIfNeed(pTableDataBlock, rowSize);
|
||||
if (maxRows < 1) return -1;
|
||||
code = tscAllocateMemIfNeed(pTableDataBlock, rowSize, &maxRows);
|
||||
if (TSDB_CODE_SUCCESS != code) return -1;
|
||||
|
||||
int count = 0;
|
||||
SParsedDataColInfo spd = {.numOfCols = pMeterMeta->numOfColumns};
|
||||
|
@ -1386,14 +1406,7 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
|
|||
char *lineptr = line;
|
||||
strtolower(line, line);
|
||||
|
||||
if (numOfRows >= maxRows || pTableDataBlock->size + rowSize >= pTableDataBlock->nAllocSize) {
|
||||
uint32_t tSize = tscAllocateMemIfNeed(pTableDataBlock, rowSize);
|
||||
if (0 == tSize) return (-TSDB_CODE_CLI_OUT_OF_MEMORY);
|
||||
maxRows += tSize;
|
||||
}
|
||||
|
||||
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code,
|
||||
tmpTokenBuf);
|
||||
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf);
|
||||
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
|
||||
pSql->res.code = code;
|
||||
return (-code);
|
||||
|
|
|
@ -121,11 +121,11 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
|||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
var->dKey = *(float*)tb->buffer;
|
||||
var->dKey = GET_FLOAT_VAL(tb->buffer);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
var->dKey = *(double*)tb->buffer;
|
||||
var->dKey = GET_DOUBLE_VAL(tb->buffer);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
|
|
@ -1698,7 +1698,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprIt
|
|||
if (optr == TK_PERCENTILE || optr == TK_APERCENTILE) {
|
||||
tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE);
|
||||
|
||||
double dp = *((double*)val);
|
||||
double dp = GET_DOUBLE_VAL(val);
|
||||
if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg5);
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) {
|
|||
#ifdef CLUSTER
|
||||
connInit.peerIp = tscMgmtIpList.ipstr[pSql->index];
|
||||
#else
|
||||
connInit.peerIp = tsServerIpStr;
|
||||
connInit.peerIp = tsMasterIp;
|
||||
#endif
|
||||
thandle = taosOpenRpcConn(&connInit, pCode);
|
||||
}
|
||||
|
@ -296,7 +296,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) {
|
|||
connInit.peerId = htonl((pVPeersDesc[0].vnode << TSDB_SHELL_VNODE_BITS));
|
||||
connInit.shandle = pVnodeConn;
|
||||
connInit.ahandle = pSql;
|
||||
connInit.peerIp = tsServerIpStr;
|
||||
connInit.peerIp = tsMasterIp;
|
||||
connInit.peerPort = tsVnodeShellPort;
|
||||
thandle = taosOpenRpcConn(&connInit, pCode);
|
||||
vidIndex = (vidIndex + 1) % tscNumOfThreads;
|
||||
|
@ -485,8 +485,19 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) {
|
|||
if (code == 0) return pSql;
|
||||
msg = NULL;
|
||||
} else if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID ||
|
||||
rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID ||
|
||||
rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) {
|
||||
rspCode == TSDB_CODE_INVALID_VNODE_ID || rspCode == TSDB_CODE_NOT_ACTIVE_VNODE ||
|
||||
rspCode == TSDB_CODE_NETWORK_UNAVAIL || rspCode == TSDB_CODE_NOT_ACTIVE_SESSION ||
|
||||
rspCode == TSDB_CODE_TABLE_ID_MISMATCH) {
|
||||
/*
|
||||
* not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized,
|
||||
* the virtual node may have not create table till now, so try again by using the new metermeta.
|
||||
* 2. this requested table may have been removed by other client, so we need to renew the
|
||||
* metermeta here.
|
||||
*
|
||||
* not_active_vnode: current vnode is move to other node due to node balance procedure or virtual node have been
|
||||
* removed. So, renew metermeta and try again.
|
||||
* not_active_session: db has been move to other node, the vnode does not exist on this dnode anymore.
|
||||
*/
|
||||
#else
|
||||
if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID ||
|
||||
rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID ||
|
||||
|
@ -881,28 +892,26 @@ int tscProcessSql(SSqlObj *pSql) {
|
|||
return doProcessSql(pSql);
|
||||
}
|
||||
|
||||
static void doCleanupSubqueries(SSqlObj *pSql, int32_t vnodeIndex, int32_t numOfVnodes, SRetrieveSupport *pTrs,
|
||||
tOrderDescriptor *pDesc, tColModel *pModel, tExtMemBuffer **pMemoryBuf,
|
||||
SSubqueryState *pState) {
|
||||
pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC;
|
||||
pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState* pState) {
|
||||
assert(numOfSubs <= pSql->numOfSubs && numOfSubs >= 0 && pState != NULL);
|
||||
|
||||
/*
|
||||
* if i > 0, at least one sub query is issued, the allocated resource is
|
||||
* freed by it when subquery completed.
|
||||
*/
|
||||
if (vnodeIndex == 0) {
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfVnodes);
|
||||
tfree(pState);
|
||||
for(int32_t i = 0; i < numOfSubs; ++i) {
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
assert(pSub != NULL);
|
||||
|
||||
if (pTrs != NULL) {
|
||||
tfree(pTrs->localBuffer);
|
||||
SRetrieveSupport* pSupport = pSub->param;
|
||||
|
||||
pthread_mutex_unlock(&pTrs->queryMutex);
|
||||
pthread_mutex_destroy(&pTrs->queryMutex);
|
||||
tfree(pTrs);
|
||||
}
|
||||
tfree(pSupport->localBuffer);
|
||||
|
||||
pthread_mutex_unlock(&pSupport->queryMutex);
|
||||
pthread_mutex_destroy(&pSupport->queryMutex);
|
||||
|
||||
tfree(pSupport);
|
||||
|
||||
tscFreeSqlObj(pSub);
|
||||
}
|
||||
|
||||
free(pState);
|
||||
}
|
||||
|
||||
int tscLaunchMetricSubQueries(SSqlObj *pSql) {
|
||||
|
@ -925,8 +934,8 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
|
|||
|
||||
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
|
||||
int32_t numOfVnodes = pMeterMetaInfo->pMetricMeta->numOfVnodes;
|
||||
assert(numOfVnodes > 0);
|
||||
int32_t numOfSubQueries = pMeterMetaInfo->pMetricMeta->numOfVnodes;
|
||||
assert(numOfSubQueries > 0);
|
||||
|
||||
int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize);
|
||||
if (ret != 0) {
|
||||
|
@ -937,36 +946,33 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
|
|||
return pRes->code;
|
||||
}
|
||||
|
||||
pSql->pSubs = malloc(POINTER_BYTES * numOfVnodes);
|
||||
pSql->numOfSubs = numOfVnodes;
|
||||
pSql->pSubs = calloc(numOfSubQueries, POINTER_BYTES);
|
||||
pSql->numOfSubs = numOfSubQueries;
|
||||
|
||||
tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfVnodes);
|
||||
tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfSubQueries);
|
||||
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
|
||||
pState->numOfTotal = numOfVnodes;
|
||||
pState->numOfTotal = numOfSubQueries;
|
||||
pRes->code = TSDB_CODE_SUCCESS;
|
||||
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
if (pRes->code == TSDB_CODE_QUERY_CANCELLED || pRes->code == TSDB_CODE_CLI_OUT_OF_MEMORY) {
|
||||
/*
|
||||
* during launch sub queries, if the master query is cancelled. the remain is ignored and set the retrieveDoneRec
|
||||
* to the value of remaining not built sub-queries. So, the already issued sub queries can successfully free
|
||||
* allocated resources.
|
||||
*/
|
||||
pState->numOfCompleted = (numOfVnodes - i);
|
||||
doCleanupSubqueries(pSql, i, numOfVnodes, NULL, pDesc, pModel, pMemoryBuf, pState);
|
||||
|
||||
if (i == 0) {
|
||||
return pSql->res.code;
|
||||
}
|
||||
|
||||
int32_t i = 0;
|
||||
for (; i < numOfSubQueries; ++i) {
|
||||
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
|
||||
if (trs == NULL) {
|
||||
tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
||||
break;
|
||||
}
|
||||
|
||||
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
|
||||
trs->pExtMemBuffer = pMemoryBuf;
|
||||
trs->pOrderDescriptor = pDesc;
|
||||
trs->pState = pState;
|
||||
|
||||
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
|
||||
if (trs->localBuffer == NULL) {
|
||||
tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
||||
tfree(trs);
|
||||
break;
|
||||
}
|
||||
|
||||
trs->subqueryIndex = i;
|
||||
trs->pParentSqlObj = pSql;
|
||||
trs->pFinalColModel = pModel;
|
||||
|
@ -977,15 +983,10 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
|
|||
pthread_mutexattr_destroy(&mutexattr);
|
||||
|
||||
SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs, NULL);
|
||||
|
||||
if (pNew == NULL) {
|
||||
pState->numOfCompleted = (numOfVnodes - i);
|
||||
doCleanupSubqueries(pSql, i, numOfVnodes, trs, pDesc, pModel, pMemoryBuf, pState);
|
||||
|
||||
if (i == 0) {
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
||||
tfree(trs->localBuffer);
|
||||
tfree(trs);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -995,8 +996,30 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
|
|||
pNewQueryInfo->tsBuf = tsBufClone(pQueryInfo->tsBuf);
|
||||
}
|
||||
|
||||
tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, trs->subqueryIndex);
|
||||
tscProcessSql(pNew);
|
||||
tscTrace("%p sub:%p create subquery success. orderOfSub:%d", pSql, pNew, trs->subqueryIndex);
|
||||
}
|
||||
|
||||
if (i < numOfSubQueries) {
|
||||
tscError("%p failed to prepare subquery structure and launch subqueries", pSql);
|
||||
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries);
|
||||
doCleanupSubqueries(pSql, i, pState);
|
||||
return pRes->code; // free all allocated resource
|
||||
}
|
||||
|
||||
if (pRes->code == TSDB_CODE_QUERY_CANCELLED) {
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries);
|
||||
doCleanupSubqueries(pSql, i, pState);
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
for(int32_t j = 0; j < numOfSubQueries; ++j) {
|
||||
SSqlObj* pSub = pSql->pSubs[j];
|
||||
SRetrieveSupport* pSupport = pSub->param;
|
||||
|
||||
tscTrace("%p sub:%p launch subquery, orderOfSub:%d.", pSql, pSub, pSupport->subqueryIndex);
|
||||
tscProcessSql(pSub);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1047,10 +1070,13 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
|
|||
int32_t subqueryIndex = trsupport->subqueryIndex;
|
||||
|
||||
assert(pSql != NULL);
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 &&
|
||||
pPObj->numOfSubs == pState->numOfTotal);
|
||||
|
||||
/* retrieved in subquery failed. OR query cancelled in retrieve phase. */
|
||||
if (trsupport->pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) {
|
||||
trsupport->pState->code = -(int)pPObj->res.code;
|
||||
if (pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) {
|
||||
pState->code = -(int)pPObj->res.code;
|
||||
|
||||
/*
|
||||
* kill current sub-query connection, which may retrieve data from vnodes;
|
||||
|
@ -1059,15 +1085,15 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
|
|||
pSql->res.numOfRows = 0;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts
|
||||
tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql,
|
||||
subqueryIndex, trsupport->pState->code);
|
||||
subqueryIndex, pState->code);
|
||||
}
|
||||
|
||||
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
|
||||
tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, subqueryIndex);
|
||||
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql,
|
||||
subqueryIndex, trsupport->pState->code);
|
||||
subqueryIndex, pState->code);
|
||||
} else {
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && trsupport->pState->code == TSDB_CODE_SUCCESS) {
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pState->code == TSDB_CODE_SUCCESS) {
|
||||
/*
|
||||
* current query failed, and the retry count is less than the available
|
||||
* count, retry query clear previous retrieved data, then launch a new sub query
|
||||
|
@ -1086,7 +1112,7 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
|
|||
tscError("%p sub:%p failed to create new subquery sqlobj due to out of memory, abort retry",
|
||||
trsupport->pParentSqlObj, pSql);
|
||||
|
||||
trsupport->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
return;
|
||||
}
|
||||
|
@ -1094,24 +1120,26 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
|
|||
tscProcessSql(pNew);
|
||||
return;
|
||||
} else { // reach the maximum retry count, abort
|
||||
atomic_val_compare_exchange_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows);
|
||||
atomic_val_compare_exchange_32(&pState->code, TSDB_CODE_SUCCESS, numOfRows);
|
||||
tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql,
|
||||
numOfRows, subqueryIndex, trsupport->pState->code);
|
||||
numOfRows, subqueryIndex, pState->code);
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) {
|
||||
int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1);
|
||||
if (finished < pState->numOfTotal) {
|
||||
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished);
|
||||
return tscFreeSubSqlObj(trsupport, pSql);
|
||||
}
|
||||
|
||||
// all subqueries are failed
|
||||
tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, trsupport->pState->numOfTotal,
|
||||
trsupport->pState->code);
|
||||
pPObj->res.code = -(trsupport->pState->code);
|
||||
tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, pState->numOfTotal,
|
||||
pState->code);
|
||||
pPObj->res.code = -(pState->code);
|
||||
|
||||
// release allocated resource
|
||||
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel,
|
||||
trsupport->pState->numOfTotal);
|
||||
pState->numOfTotal);
|
||||
|
||||
tfree(trsupport->pState);
|
||||
tscFreeSubSqlObj(trsupport, pSql);
|
||||
|
@ -1151,10 +1179,14 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
return;
|
||||
}
|
||||
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 &&
|
||||
pPObj->numOfSubs == pState->numOfTotal);
|
||||
|
||||
// query process and cancel query process may execute at the same time
|
||||
pthread_mutex_lock(&trsupport->queryMutex);
|
||||
|
||||
if (numOfRows < 0 || trsupport->pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) {
|
||||
if (numOfRows < 0 || pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) {
|
||||
return tscHandleSubRetrievalError(trsupport, pSql, numOfRows);
|
||||
}
|
||||
|
||||
|
@ -1168,10 +1200,10 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
|
||||
if (numOfRows > 0) {
|
||||
assert(pRes->numOfRows == numOfRows);
|
||||
atomic_add_fetch_64(&trsupport->pState->numOfRetrievedRows, numOfRows);
|
||||
atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
|
||||
|
||||
tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql,
|
||||
pRes->numOfRows, trsupport->pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx);
|
||||
pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx);
|
||||
|
||||
#ifdef _DEBUG_VIEW
|
||||
printf("received data from vnode: %d rows\n", pRes->numOfRows);
|
||||
|
@ -1230,7 +1262,9 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE);
|
||||
}
|
||||
|
||||
if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) {
|
||||
int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1);
|
||||
if (finished < pState->numOfTotal) {
|
||||
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished);
|
||||
return tscFreeSubSqlObj(trsupport, pSql);
|
||||
}
|
||||
|
||||
|
@ -1238,12 +1272,12 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
pDesc->pSchema->maxCapacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
|
||||
|
||||
tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", pPObj,
|
||||
trsupport->pState->numOfTotal, trsupport->pState->numOfCompleted);
|
||||
pState->numOfTotal, pState->numOfCompleted);
|
||||
|
||||
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
|
||||
tscClearInterpInfo(pPQueryInfo);
|
||||
|
||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, trsupport->pState->numOfTotal, pDesc, trsupport->pFinalColModel,
|
||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel,
|
||||
&pPObj->cmd, &pPObj->res);
|
||||
tscTrace("%p build loser tree completed", pPObj);
|
||||
|
||||
|
@ -1252,7 +1286,8 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
pPObj->res.row = 0;
|
||||
|
||||
// only free once
|
||||
free(trsupport->pState);
|
||||
tfree(trsupport->pState);
|
||||
|
||||
tscFreeSubSqlObj(trsupport, pSql);
|
||||
|
||||
if (pPObj->fp == NULL) {
|
||||
|
@ -1346,13 +1381,17 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index];
|
||||
}
|
||||
|
||||
if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || trsupport->pState->code != TSDB_CODE_SUCCESS) {
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 &&
|
||||
trsupport->pParentSqlObj->numOfSubs == pState->numOfTotal);
|
||||
|
||||
if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || pState->code != TSDB_CODE_SUCCESS) {
|
||||
// metric query is killed, Note: code must be less than 0
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS) {
|
||||
code = -(int)(trsupport->pParentSqlObj->res.code);
|
||||
} else {
|
||||
code = trsupport->pState->code;
|
||||
code = pState->code;
|
||||
}
|
||||
tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql,
|
||||
trsupport->subqueryIndex, code);
|
||||
|
@ -1368,7 +1407,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||
tscTrace("%p sub:%p reach the max retry count,set global code:%d", trsupport->pParentSqlObj, pSql, code);
|
||||
atomic_val_compare_exchange_32(&trsupport->pState->code, 0, code);
|
||||
atomic_val_compare_exchange_32(&pState->code, 0, code);
|
||||
} else { // does not reach the maximum retry count, go on
|
||||
tscTrace("%p sub:%p failed code:%d, retry:%d", trsupport->pParentSqlObj, pSql, code, trsupport->numOfRetry);
|
||||
|
||||
|
@ -1377,7 +1416,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d",
|
||||
trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->subqueryIndex);
|
||||
|
||||
trsupport->pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
} else {
|
||||
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
|
||||
|
@ -1388,17 +1427,17 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
}
|
||||
}
|
||||
|
||||
if (trsupport->pState->code != TSDB_CODE_SUCCESS) { // failed, abort
|
||||
if (pState->code != TSDB_CODE_SUCCESS) { // failed, abort
|
||||
if (vnodeInfo != NULL) {
|
||||
tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql,
|
||||
vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode,
|
||||
trsupport->subqueryIndex, trsupport->pState->code);
|
||||
trsupport->subqueryIndex, pState->code);
|
||||
} else {
|
||||
tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql,
|
||||
trsupport->subqueryIndex, trsupport->pState->code);
|
||||
trsupport->subqueryIndex, pState->code);
|
||||
}
|
||||
|
||||
tscRetrieveFromVnodeCallBack(param, tres, trsupport->pState->code);
|
||||
tscRetrieveFromVnodeCallBack(param, tres, pState->code);
|
||||
} else { // success, proceed to retrieve data from dnode
|
||||
tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql,
|
||||
vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode,
|
||||
|
|
|
@ -70,8 +70,8 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
|
|||
}
|
||||
#else
|
||||
if (ip && ip[0]) {
|
||||
if (ip != tsServerIpStr) {
|
||||
strcpy(tsServerIpStr, ip);
|
||||
if (ip != tsMasterIp) {
|
||||
strcpy(tsMasterIp, ip);
|
||||
}
|
||||
tsServerIp = inet_addr(ip);
|
||||
}
|
||||
|
@ -152,11 +152,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
|
|||
|
||||
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
|
||||
if (ip == NULL || (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0))) {
|
||||
#ifdef CLUSTER
|
||||
ip = tsMasterIp;
|
||||
#else
|
||||
ip = tsServerIpStr;
|
||||
#endif
|
||||
}
|
||||
tscTrace("try to create a connection to %s", ip);
|
||||
|
||||
|
@ -180,7 +176,7 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
|
|||
void *param, void **taos) {
|
||||
#ifndef CLUSTER
|
||||
if (ip == NULL) {
|
||||
ip = tsServerIpStr;
|
||||
ip = tsMasterIp;
|
||||
}
|
||||
#endif
|
||||
return taos_connect_imp(ip, user, pass, db, port, fp, param, taos);
|
||||
|
@ -613,9 +609,6 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
TAOS_ROW rows = taos_fetch_row_impl(res);
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
if (rows == NULL) {
|
||||
int32_t k = 1;
|
||||
}
|
||||
while (rows == NULL && tscProjectionQueryOnSTable(pCmd, 0)) {
|
||||
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
|
||||
|
||||
|
@ -918,12 +911,18 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
len += sprintf(str + len, "%f ", *((float *)row[i]));
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
len += sprintf(str + len, "%f ", fv);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
len += sprintf(str + len, "%lf ", *((double *)row[i]));
|
||||
case TSDB_DATA_TYPE_DOUBLE:{
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
len += sprintf(str + len, "%lf ", dv);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
|
|
@ -71,7 +71,7 @@ var GenericDatasource = exports.GenericDatasource = function () {
|
|||
var targets = _lodash2.default.map(options.targets, function (target) {
|
||||
return {
|
||||
refId: target.refId,
|
||||
alias: target.alias || "",
|
||||
alias: _this.generateAlias(options, target),
|
||||
sql: _this.generateSql(options, target)
|
||||
};
|
||||
});
|
||||
|
@ -112,6 +112,13 @@ var GenericDatasource = exports.GenericDatasource = function () {
|
|||
|
||||
return "Basic " + this.encode(defaultUser + ":" + defaultPassword);
|
||||
}
|
||||
}, {
|
||||
key: 'generateAlias',
|
||||
value: function generateAlias(options, target) {
|
||||
var alias = target.alias || "";
|
||||
alias = this.templateSrv.replace(alias, options.scopedVars, 'csv');
|
||||
return alias;
|
||||
}
|
||||
}, {
|
||||
key: 'generateSql',
|
||||
value: function generateSql(options, target) {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -49,7 +49,7 @@ export class GenericDatasource {
|
|||
var targets = _.map(options.targets, target => {
|
||||
return {
|
||||
refId: target.refId,
|
||||
alias: target.alias || "",
|
||||
alias: this.generateAlias(options, target),
|
||||
sql: this.generateSql(options, target)
|
||||
};
|
||||
});
|
||||
|
@ -89,6 +89,12 @@ export class GenericDatasource {
|
|||
return "Basic " + this.encode(defaultUser + ":" + defaultPassword);
|
||||
}
|
||||
|
||||
generateAlias(options, target){
|
||||
var alias = target.alias || "";
|
||||
alias = this.templateSrv.replace(alias, options.scopedVars, 'csv');
|
||||
return alias;
|
||||
}
|
||||
|
||||
generateSql(options, target) {
|
||||
var sql = target.sql;
|
||||
if (sql == null || sql == ""){
|
||||
|
|
|
@ -108,6 +108,12 @@ cmd ::= SHOW dbPrefix(X) VGROUPS. {
|
|||
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
|
||||
}
|
||||
|
||||
cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
|
||||
SSQLToken token;
|
||||
setDBName(&token, &X);
|
||||
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y);
|
||||
}
|
||||
|
||||
//drop configure for tables
|
||||
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
|
||||
X.n += Z.n;
|
||||
|
|
|
@ -57,10 +57,16 @@ typedef struct taosField {
|
|||
char type;
|
||||
} TAOS_FIELD;
|
||||
|
||||
void taos_init();
|
||||
int taos_options(TSDB_OPTION option, const void *arg, ...);
|
||||
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
|
||||
void taos_close(TAOS *taos);
|
||||
#ifdef _TD_GO_DLL_
|
||||
#define DLL_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
#define DLL_EXPORT
|
||||
#endif
|
||||
|
||||
DLL_EXPORT void taos_init();
|
||||
DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
|
||||
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
|
||||
DLL_EXPORT void taos_close(TAOS *taos);
|
||||
|
||||
typedef struct TAOS_BIND {
|
||||
int buffer_type;
|
||||
|
@ -80,18 +86,18 @@ int taos_stmt_execute(TAOS_STMT *stmt);
|
|||
TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
|
||||
int taos_stmt_close(TAOS_STMT *stmt);
|
||||
|
||||
int taos_query(TAOS *taos, const char *sql);
|
||||
TAOS_RES *taos_use_result(TAOS *taos);
|
||||
TAOS_ROW taos_fetch_row(TAOS_RES *res);
|
||||
int taos_result_precision(TAOS_RES *res); // get the time precision of result
|
||||
void taos_free_result(TAOS_RES *res);
|
||||
int taos_field_count(TAOS *taos);
|
||||
int taos_num_fields(TAOS_RES *res);
|
||||
int taos_affected_rows(TAOS *taos);
|
||||
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
|
||||
int taos_select_db(TAOS *taos, const char *db);
|
||||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
void taos_stop_query(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_query(TAOS *taos, const char *sql);
|
||||
DLL_EXPORT TAOS_RES *taos_use_result(TAOS *taos);
|
||||
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result
|
||||
DLL_EXPORT void taos_free_result(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_field_count(TAOS *taos);
|
||||
DLL_EXPORT int taos_num_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_affected_rows(TAOS *taos);
|
||||
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
|
||||
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
DLL_EXPORT void taos_stop_query(TAOS_RES *res);
|
||||
|
||||
int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
|
||||
int taos_validate_sql(TAOS *taos, const char *sql);
|
||||
|
@ -100,27 +106,27 @@ int taos_validate_sql(TAOS *taos, const char *sql);
|
|||
// TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild);
|
||||
|
||||
// TODO: the return value should be `const`
|
||||
char *taos_get_server_info(TAOS *taos);
|
||||
char *taos_get_client_info();
|
||||
char *taos_errstr(TAOS *taos);
|
||||
DLL_EXPORT char *taos_get_server_info(TAOS *taos);
|
||||
DLL_EXPORT char *taos_get_client_info();
|
||||
DLL_EXPORT char *taos_errstr(TAOS *taos);
|
||||
|
||||
int taos_errno(TAOS *taos);
|
||||
DLL_EXPORT int taos_errno(TAOS *taos);
|
||||
|
||||
void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);
|
||||
void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
|
||||
void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);
|
||||
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);
|
||||
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
|
||||
DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);
|
||||
|
||||
TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *table, int64_t time, int mseconds);
|
||||
TAOS_ROW taos_consume(TAOS_SUB *tsub);
|
||||
void taos_unsubscribe(TAOS_SUB *tsub);
|
||||
int taos_subfields_count(TAOS_SUB *tsub);
|
||||
TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub);
|
||||
DLL_EXPORT TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *table, int64_t time, int mseconds);
|
||||
DLL_EXPORT TAOS_ROW taos_consume(TAOS_SUB *tsub);
|
||||
DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub);
|
||||
DLL_EXPORT int taos_subfields_count(TAOS_SUB *tsub);
|
||||
DLL_EXPORT TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub);
|
||||
|
||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
DLL_EXPORT TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
int64_t stime, void *param, void (*callback)(void *));
|
||||
void taos_close_stream(TAOS_STREAM *tstr);
|
||||
DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr);
|
||||
|
||||
int taos_load_table_info(TAOS *taos, const char* tableNameList);
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -75,7 +75,6 @@ extern float tsNumOfThreadsPerCore;
|
|||
extern float tsRatioOfQueryThreads;
|
||||
extern char tsPublicIp[];
|
||||
extern char tsPrivateIp[];
|
||||
extern char tsServerIpStr[];
|
||||
extern short tsNumOfVnodesPerCore;
|
||||
extern short tsNumOfTotalVnodes;
|
||||
extern short tsCheckHeaderFile;
|
||||
|
@ -148,7 +147,6 @@ extern int tsHttpMaxThreads;
|
|||
extern int tsHttpEnableCompress;
|
||||
extern int tsHttpEnableRecordSql;
|
||||
extern int tsTelegrafUseFieldNum;
|
||||
extern int tsAdminRowLimit;
|
||||
|
||||
extern int tsTscEnableRecordSql;
|
||||
extern int tsAnyIp;
|
||||
|
|
|
@ -182,6 +182,10 @@ extern "C" {
|
|||
#define TSDB_MAX_AVG_BLOCKS 2048
|
||||
#define TSDB_DEFAULT_AVG_BLOCKS 4
|
||||
|
||||
/*
|
||||
* There is a bug in function taosAllocateId.
|
||||
* When "create database tables 1" is executed, the wrong sid is assigned, so the minimum value is set to 2.
|
||||
*/
|
||||
#define TSDB_MIN_TABLES_PER_VNODE 2
|
||||
#define TSDB_MAX_TABLES_PER_VNODE 220000
|
||||
|
||||
|
|
|
@ -164,6 +164,8 @@ int32_t taosInitTimer(void (*callback)(int), int32_t ms);
|
|||
|
||||
bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len);
|
||||
|
||||
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);
|
||||
|
||||
bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs);
|
||||
|
||||
bool taosValidateEncodec(const char *encodec);
|
||||
|
|
|
@ -457,25 +457,15 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) {
|
|||
printf("%*" PRId64 "|", l[i], *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
#ifdef _TD_ARM_32_
|
||||
float fv = 0;
|
||||
//memcpy(&fv, row[i], sizeof(float));
|
||||
*(int32_t*)(&fv) = *(int32_t*)row[i];
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
printf("%*.5f|", l[i], fv);
|
||||
#else
|
||||
printf("%*.5f|", l[i], *((float *)row[i]));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
#ifdef _TD_ARM_32_
|
||||
double dv = 0;
|
||||
//memcpy(&dv, row[i], sizeof(double));
|
||||
*(int64_t*)(&dv) = *(int64_t*)row[i];
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
printf("%*.9f|", l[i], dv);
|
||||
#else
|
||||
printf("%*.9f|", l[i], *((double *)row[i]));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -542,25 +532,15 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) {
|
|||
printf("%" PRId64 "\n", *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
#ifdef _TD_ARM_32_
|
||||
float fv = 0;
|
||||
//memcpy(&fv, row[i], sizeof(float));
|
||||
*(int32_t*)(&fv) = *(int32_t*)row[i];
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
printf("%.5f\n", fv);
|
||||
#else
|
||||
printf("%.5f\n", *((float *)row[i]));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
#ifdef _TD_ARM_32_
|
||||
double dv = 0;
|
||||
//memcpy(&dv, row[i], sizeof(double));
|
||||
*(int64_t*)(&dv) = *(int64_t*)row[i];
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
printf("%.9f\n", dv);
|
||||
#else
|
||||
printf("%.9f\n", *((double *)row[i]));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -630,25 +610,15 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) {
|
|||
fprintf(fp, "%" PRId64, *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
#ifdef _TD_ARM_32_
|
||||
float fv = 0;
|
||||
//memcpy(&fv, row[i], sizeof(float));
|
||||
*(int32_t*)(&fv) = *(int32_t*)row[i];
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
fprintf(fp, "%.5f", fv);
|
||||
#else
|
||||
fprintf(fp, "%.5f", *((float *)row[i]));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
#ifdef _TD_ARM_32_
|
||||
double dv = 0;
|
||||
//memcpy(&dv, row[i], sizeof(double));
|
||||
*(int64_t*)(&dv) = *(int64_t*)row[i];
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
fprintf(fp, "%.9f", dv);
|
||||
#else
|
||||
fprintf(fp, "%.9f", *((double *)row[i]));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
|
|
@ -130,7 +130,11 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
|
|||
|
||||
argp_parse(&argp, argc, argv, 0, 0, arguments);
|
||||
if (arguments->abort) {
|
||||
#ifndef _ALPINE
|
||||
error(10, 0, "ABORTED");
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,10 @@
|
|||
|
||||
#include <argp.h>
|
||||
#include <assert.h>
|
||||
|
||||
#ifndef _ALPINE
|
||||
#include <error.h>
|
||||
#endif
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
#include <stdbool.h>
|
||||
|
@ -309,7 +312,13 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
argp_parse(&argp, argc, argv, 0, 0, &arguments);
|
||||
|
||||
if (arguments.abort) error(10, 0, "ABORTED");
|
||||
if (arguments.abort) {
|
||||
#ifndef _ALPINE
|
||||
error(10, 0, "ABORTED");
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
||||
enum MODE query_mode = arguments.mode;
|
||||
char *ip_addr = arguments.host;
|
||||
|
@ -342,6 +351,11 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
FILE *fp = fopen(arguments.output_file, "a");
|
||||
if (NULL == fp) {
|
||||
fprintf(stderr, "Failed to open %s for writing\n", arguments.output_file);
|
||||
return 1;
|
||||
};
|
||||
|
||||
time_t tTime = time(NULL);
|
||||
struct tm tm = *localtime(&tTime);
|
||||
|
||||
|
@ -833,7 +847,7 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam
|
|||
} else if (strcasecmp(data_type[i % c], "binary") == 0) {
|
||||
char s[len_of_binary];
|
||||
rand_string(s, len_of_binary);
|
||||
pstr += sprintf(pstr, ", %s", s);
|
||||
pstr += sprintf(pstr, ", \"%s\"", s);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,9 @@
|
|||
|
||||
#include <argp.h>
|
||||
#include <assert.h>
|
||||
#include <error.h>
|
||||
#ifndef _ALPINE
|
||||
#include <error.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
|
@ -335,7 +337,13 @@ int main(int argc, char *argv[]) {
|
|||
reflected in arguments. */
|
||||
argp_parse(&argp, argc, argv, 0, 0, &arguments);
|
||||
|
||||
if (arguments.abort) error(10, 0, "ABORTED");
|
||||
if (arguments.abort) {
|
||||
#ifndef _ALPINE
|
||||
error(10, 0, "ABORTED");
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
||||
if (taosCheckParam(&arguments) < 0) {
|
||||
exit(EXIT_FAILURE);
|
||||
|
|
|
@ -209,9 +209,14 @@ typedef int(*__compar_fn_t)(const void *, const void *);
|
|||
#define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
|
||||
#endif
|
||||
|
||||
#ifndef _TD_ARM_32_
|
||||
#define BUILDIN_CLZL(val) __builtin_clzl(val)
|
||||
#define BUILDIN_CLZ(val) __builtin_clz(val)
|
||||
#define BUILDIN_CTZL(val) __builtin_ctzl(val)
|
||||
#else
|
||||
#define BUILDIN_CLZL(val) __builtin_clzll(val)
|
||||
#define BUILDIN_CTZL(val) __builtin_ctzll(val)
|
||||
#endif
|
||||
#define BUILDIN_CLZ(val) __builtin_clz(val)
|
||||
#define BUILDIN_CTZ(val) __builtin_ctz(val)
|
||||
|
||||
#endif
|
|
@ -23,7 +23,10 @@ extern "C" {
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <error.h>
|
||||
#ifndef _ALPINE
|
||||
#include <error.h>
|
||||
#endif
|
||||
|
||||
#include <argp.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <assert.h>
|
||||
|
@ -229,9 +232,22 @@ void taosSetCoreDump();
|
|||
|
||||
void taosBlockSIGPIPE();
|
||||
|
||||
#ifdef _ALPINE
|
||||
typedef int(*__compar_fn_t)(const void *, const void *);
|
||||
void error (int, int, const char *);
|
||||
#ifndef PTHREAD_MUTEX_RECURSIVE_NP
|
||||
#define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef _TD_ARM_32_
|
||||
#define BUILDIN_CLZL(val) __builtin_clzl(val)
|
||||
#define BUILDIN_CLZ(val) __builtin_clz(val)
|
||||
#define BUILDIN_CTZL(val) __builtin_ctzl(val)
|
||||
#else
|
||||
#define BUILDIN_CLZL(val) __builtin_clzll(val)
|
||||
#define BUILDIN_CTZL(val) __builtin_ctzll(val)
|
||||
#endif
|
||||
#define BUILDIN_CLZ(val) __builtin_clz(val)
|
||||
#define BUILDIN_CTZ(val) __builtin_ctz(val)
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -234,8 +234,15 @@ void *taosProcessAlarmSignal(void *tharg) {
|
|||
|
||||
timer_t timerId;
|
||||
struct sigevent sevent;
|
||||
|
||||
#ifdef _ALPINE
|
||||
sevent.sigev_notify = SIGEV_THREAD;
|
||||
sevent.sigev_value.sival_int = syscall(__NR_gettid);
|
||||
#else
|
||||
sevent.sigev_notify = SIGEV_THREAD_ID;
|
||||
sevent._sigev_un._tid = syscall(__NR_gettid);
|
||||
#endif
|
||||
|
||||
sevent.sigev_signo = SIGALRM;
|
||||
|
||||
if (timer_create(CLOCK_REALTIME, &sevent, &timerId) == -1) {
|
||||
|
|
|
@ -27,12 +27,15 @@
|
|||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef _ALPINE
|
||||
#include <linux/sysctl.h>
|
||||
#else
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
#include "tglobalcfg.h"
|
||||
#include "tlog.h"
|
||||
|
|
|
@ -38,9 +38,12 @@
|
|||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <time.h>
|
||||
#include <inttypes.h>
|
||||
#include "winsock2.h"
|
||||
#include <WS2tcpip.h>
|
||||
|
||||
#include <winbase.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -74,7 +77,13 @@ extern "C" {
|
|||
#define strncasecmp _strnicmp
|
||||
#define wcsncasecmp _wcsnicmp
|
||||
#define strtok_r strtok_s
|
||||
#define str2int64 _atoi64
|
||||
#ifdef _TD_GO_DLL_
|
||||
int64_t str2int64(char *str);
|
||||
uint64_t htonll(uint64_t val);
|
||||
#else
|
||||
#define str2int64 _atoi64
|
||||
#endif
|
||||
|
||||
#define snprintf _snprintf
|
||||
#define in_addr_t unsigned long
|
||||
#define socklen_t int
|
||||
|
@ -135,7 +144,12 @@ extern "C" {
|
|||
#define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
|
||||
#define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
|
||||
|
||||
#define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval))
|
||||
#ifdef _TD_GO_DLL_
|
||||
#define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
|
||||
#else
|
||||
#define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval))
|
||||
#endif
|
||||
|
||||
#define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval))
|
||||
#define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval))
|
||||
#define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval))
|
||||
|
@ -155,9 +169,14 @@ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val);
|
|||
#else
|
||||
#define atomic_add_fetch_ptr atomic_add_fetch_32
|
||||
#endif
|
||||
#ifdef _TD_GO_DLL_
|
||||
#define atomic_fetch_add_8 __sync_fetch_and_ad
|
||||
#define atomic_fetch_add_16 __sync_fetch_and_add
|
||||
#else
|
||||
#define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
|
||||
#define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
|
||||
#define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val))
|
||||
#define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val))
|
||||
#ifdef _WIN64
|
||||
|
@ -185,14 +204,17 @@ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val);
|
|||
#else
|
||||
#define atomic_fetch_sub_ptr atomic_fetch_sub_32
|
||||
#endif
|
||||
|
||||
char interlocked_and_fetch_8(char volatile* ptr, char val);
|
||||
short interlocked_and_fetch_16(short volatile* ptr, short val);
|
||||
#ifndef _TD_GO_DLL_
|
||||
char interlocked_and_fetch_8(char volatile* ptr, char val);
|
||||
short interlocked_and_fetch_16(short volatile* ptr, short val);
|
||||
#endif
|
||||
long interlocked_and_fetch_32(long volatile* ptr, long val);
|
||||
__int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val);
|
||||
|
||||
#define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val))
|
||||
#ifndef _TD_GO_DLL_
|
||||
#define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
#define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val))
|
||||
#define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
|
||||
#ifdef _WIN64
|
||||
|
@ -200,9 +222,10 @@ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val);
|
|||
#else
|
||||
#define atomic_and_fetch_ptr atomic_and_fetch_32
|
||||
#endif
|
||||
|
||||
#define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val))
|
||||
#ifndef _TD_GO_DLL_
|
||||
#define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
#define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val))
|
||||
|
||||
#ifdef _M_IX86
|
||||
|
@ -217,14 +240,17 @@ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val);
|
|||
#else
|
||||
#define atomic_fetch_and_ptr atomic_fetch_and_32
|
||||
#endif
|
||||
|
||||
char interlocked_or_fetch_8(char volatile* ptr, char val);
|
||||
short interlocked_or_fetch_16(short volatile* ptr, short val);
|
||||
#ifndef _TD_GO_DLL_
|
||||
char interlocked_or_fetch_8(char volatile* ptr, char val);
|
||||
short interlocked_or_fetch_16(short volatile* ptr, short val);
|
||||
#endif
|
||||
long interlocked_or_fetch_32(long volatile* ptr, long val);
|
||||
__int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val);
|
||||
|
||||
#define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val))
|
||||
#ifndef _TD_GO_DLL_
|
||||
#define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
#define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val))
|
||||
#define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
|
||||
#ifdef _WIN64
|
||||
|
@ -232,9 +258,10 @@ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val);
|
|||
#else
|
||||
#define atomic_or_fetch_ptr atomic_or_fetch_32
|
||||
#endif
|
||||
|
||||
#define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val))
|
||||
#ifndef _TD_GO_DLL_
|
||||
#define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
#define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val))
|
||||
|
||||
#ifdef _M_IX86
|
||||
|
@ -250,13 +277,17 @@ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val);
|
|||
#define atomic_fetch_or_ptr atomic_fetch_or_32
|
||||
#endif
|
||||
|
||||
char interlocked_xor_fetch_8(char volatile* ptr, char val);
|
||||
short interlocked_xor_fetch_16(short volatile* ptr, short val);
|
||||
#ifndef _TD_GO_DLL_
|
||||
char interlocked_xor_fetch_8(char volatile* ptr, char val);
|
||||
short interlocked_xor_fetch_16(short volatile* ptr, short val);
|
||||
#endif
|
||||
long interlocked_xor_fetch_32(long volatile* ptr, long val);
|
||||
__int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val);
|
||||
|
||||
#define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val))
|
||||
#ifndef _TD_GO_DLL_
|
||||
#define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
#define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val))
|
||||
#define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
|
||||
#ifdef _WIN64
|
||||
|
@ -265,8 +296,10 @@ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val);
|
|||
#define atomic_xor_fetch_ptr atomic_xor_fetch_32
|
||||
#endif
|
||||
|
||||
#define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val))
|
||||
#ifndef _TD_GO_DLL_
|
||||
#define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val))
|
||||
#define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val))
|
||||
#endif
|
||||
#define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val))
|
||||
|
||||
#ifdef _M_IX86
|
||||
|
@ -292,7 +325,11 @@ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val);
|
|||
#define MAX(a,b) (((a)>(b))?(a):(b))
|
||||
#define MIN(a,b) (((a)<(b))?(a):(b))
|
||||
|
||||
#define MILLISECOND_PER_SECOND (1000i64)
|
||||
#ifdef _TD_GO_DLL_
|
||||
#define MILLISECOND_PER_SECOND (1000LL)
|
||||
#else
|
||||
#define MILLISECOND_PER_SECOND (1000i64)
|
||||
#endif
|
||||
|
||||
#define tsem_t sem_t
|
||||
#define tsem_init sem_init
|
||||
|
|
|
@ -28,6 +28,10 @@
|
|||
#include "tsdb.h"
|
||||
#include "tglobalcfg.h"
|
||||
|
||||
#include <intrin.h>
|
||||
#include <winbase.h>
|
||||
#include <Winsock2.h>
|
||||
|
||||
char configDir[TSDB_FILENAME_LEN] = "C:/TDengine/cfg";
|
||||
char tsDirectory[TSDB_FILENAME_LEN] = "C:/TDengine/data";
|
||||
char logDir[TSDB_FILENAME_LEN] = "C:/TDengine/log";
|
||||
|
@ -68,11 +72,19 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle
|
|||
|
||||
// add
|
||||
char interlocked_add_fetch_8(char volatile* ptr, char val) {
|
||||
#ifdef _TD_GO_DLL_
|
||||
return __sync_fetch_and_add(ptr, val) + val;
|
||||
#else
|
||||
return _InterlockedExchangeAdd8(ptr, val) + val;
|
||||
#endif
|
||||
}
|
||||
|
||||
short interlocked_add_fetch_16(short volatile* ptr, short val) {
|
||||
#ifdef _TD_GO_DLL_
|
||||
return __sync_fetch_and_add(ptr, val) + val;
|
||||
#else
|
||||
return _InterlockedExchangeAdd16(ptr, val) + val;
|
||||
#endif
|
||||
}
|
||||
|
||||
long interlocked_add_fetch_32(long volatile* ptr, long val) {
|
||||
|
@ -84,6 +96,7 @@ __int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) {
|
|||
}
|
||||
|
||||
// and
|
||||
#ifndef _TD_GO_DLL_
|
||||
char interlocked_and_fetch_8(char volatile* ptr, char val) {
|
||||
return _InterlockedAnd8(ptr, val) & val;
|
||||
}
|
||||
|
@ -91,6 +104,7 @@ char interlocked_and_fetch_8(char volatile* ptr, char val) {
|
|||
short interlocked_and_fetch_16(short volatile* ptr, short val) {
|
||||
return _InterlockedAnd16(ptr, val) & val;
|
||||
}
|
||||
#endif
|
||||
|
||||
long interlocked_and_fetch_32(long volatile* ptr, long val) {
|
||||
return _InterlockedAnd(ptr, val) & val;
|
||||
|
@ -124,6 +138,7 @@ __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val) {
|
|||
#endif
|
||||
|
||||
// or
|
||||
#ifndef _TD_GO_DLL_
|
||||
char interlocked_or_fetch_8(char volatile* ptr, char val) {
|
||||
return _InterlockedOr8(ptr, val) | val;
|
||||
}
|
||||
|
@ -131,7 +146,7 @@ char interlocked_or_fetch_8(char volatile* ptr, char val) {
|
|||
short interlocked_or_fetch_16(short volatile* ptr, short val) {
|
||||
return _InterlockedOr16(ptr, val) | val;
|
||||
}
|
||||
|
||||
#endif
|
||||
long interlocked_or_fetch_32(long volatile* ptr, long val) {
|
||||
return _InterlockedOr(ptr, val) | val;
|
||||
}
|
||||
|
@ -164,6 +179,7 @@ __int64 interlocked_fetch_or_64(__int64 volatile* ptr, __int64 val) {
|
|||
#endif
|
||||
|
||||
// xor
|
||||
#ifndef _TD_GO_DLL_
|
||||
char interlocked_xor_fetch_8(char volatile* ptr, char val) {
|
||||
return _InterlockedXor8(ptr, val) ^ val;
|
||||
}
|
||||
|
@ -171,7 +187,7 @@ char interlocked_xor_fetch_8(char volatile* ptr, char val) {
|
|||
short interlocked_xor_fetch_16(short volatile* ptr, short val) {
|
||||
return _InterlockedXor16(ptr, val) ^ val;
|
||||
}
|
||||
|
||||
#endif
|
||||
long interlocked_xor_fetch_32(long volatile* ptr, long val) {
|
||||
return _InterlockedXor(ptr, val) ^ val;
|
||||
}
|
||||
|
@ -397,3 +413,15 @@ char *strndup(const char *s, size_t n) {
|
|||
}
|
||||
|
||||
void taosSetCoreDump() {}
|
||||
|
||||
#ifdef _TD_GO_DLL_
|
||||
int64_t str2int64(char *str) {
|
||||
char *endptr = NULL;
|
||||
return strtoll(str, &endptr, 10);
|
||||
}
|
||||
|
||||
uint64_t htonll(uint64_t val)
|
||||
{
|
||||
return (((uint64_t) htonl(val)) << 32) + htonl(val >> 32);
|
||||
}
|
||||
#endif
|
|
@ -906,7 +906,10 @@ int taosProcessMsgHeader(STaosHeader *pHeader, SRpcConn **ppConn, STaosRpc *pSer
|
|||
}
|
||||
|
||||
if (taosAuthenticateMsg((uint8_t *)pHeader, dataLen - TSDB_AUTH_LEN, pDigest->auth, pConn->secret) < 0) {
|
||||
tTrace("%s cid:%d sid:%d id:%s, authentication failed, msg discarded pConn:%p", pServer->label, chann, sid,
|
||||
char ipstr[24];
|
||||
tinet_ntoa(ipstr, ip);
|
||||
mLError("user:%s login from %s, authentication failed", pHeader->meterId, ipstr);
|
||||
tError("%s cid:%d sid:%d id:%s, authentication failed, msg discarded pConn:%p", pServer->label, chann, sid,
|
||||
pConn->meterId, pConn);
|
||||
code = TSDB_CODE_AUTH_FAILURE;
|
||||
goto _exit;
|
||||
|
|
|
@ -447,6 +447,7 @@ void *taosInitTcpServer(char *ip, uint16_t port, char *label, int numOfThreads,
|
|||
return (void *)pServerObj;
|
||||
}
|
||||
|
||||
#if 0
|
||||
void taosListTcpConnection(void *handle, char *buffer) {
|
||||
SServerObj *pServerObj;
|
||||
SThreadObj *pThreadObj;
|
||||
|
@ -468,7 +469,7 @@ void taosListTcpConnection(void *handle, char *buffer) {
|
|||
msg = msg + strlen(msg);
|
||||
pFdObj = pThreadObj->pHead;
|
||||
while (pFdObj) {
|
||||
sprintf(" ip:%s port:%hu\n", pFdObj->ipstr, pFdObj->port);
|
||||
sprintf(msg, " ip:%s port:%hu\n", pFdObj->ipstr, pFdObj->port);
|
||||
msg = msg + strlen(msg);
|
||||
numOfFds++;
|
||||
numOfConns++;
|
||||
|
@ -486,6 +487,7 @@ void taosListTcpConnection(void *handle, char *buffer) {
|
|||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
int taosSendTcpServerData(uint32_t ip, uint16_t port, char *data, int len, void *chandle) {
|
||||
SFdObj *pFdObj = (SFdObj *)chandle;
|
||||
|
|
|
@ -807,20 +807,22 @@ void sdbResetTable(SSdbTable *pTable) {
|
|||
int bytes;
|
||||
int total_size = 0;
|
||||
int real_size = 0;
|
||||
int64_t oldId;
|
||||
SRowHead *rowHead = NULL;
|
||||
void * pMetaRow = NULL;
|
||||
int64_t oldId = pTable->id;
|
||||
int oldNumOfRows = pTable->numOfRows;
|
||||
|
||||
oldId = pTable->id;
|
||||
if (sdbOpenSdbFile(pTable) < 0) return;
|
||||
pTable->numOfRows = oldNumOfRows;
|
||||
|
||||
total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM);
|
||||
rowHead = (SRowHead *)malloc(total_size);
|
||||
if (rowHead == NULL) {
|
||||
sdbError("failed to allocate row head memory for reset, sdb:%s", pTable->name);
|
||||
return;
|
||||
}
|
||||
|
||||
sdbTrace("open sdb file:%s for update", pTable->fn);
|
||||
sdbPrint("open sdb file:%s for reset table", pTable->fn);
|
||||
|
||||
while (1) {
|
||||
memset(rowHead, 0, total_size);
|
||||
|
@ -841,15 +843,15 @@ void sdbResetTable(SSdbTable *pTable) {
|
|||
}
|
||||
|
||||
if (rowHead->rowSize < 0 || rowHead->rowSize > pTable->maxRowSize) {
|
||||
sdbError("error row size in sdb file:%s rowSize:%d maxRowSize:%d", pTable->fn, rowHead->rowSize,
|
||||
pTable->maxRowSize);
|
||||
sdbError("error row size in sdb file:%s for reset, id:%d rowSize:%d maxRowSize:%d",
|
||||
pTable->fn, rowHead->id, rowHead->rowSize, pTable->maxRowSize);
|
||||
pTable->size += sizeof(SRowHead);
|
||||
continue;
|
||||
}
|
||||
|
||||
bytes = read(pTable->fd, rowHead->data, rowHead->rowSize + sizeof(TSCKSUM));
|
||||
if (bytes < rowHead->rowSize + sizeof(TSCKSUM)) {
|
||||
sdbError("failed to read sdb file:%s id:%d rowSize:%d", pTable->fn, rowHead->id, rowHead->rowSize);
|
||||
sdbError("failed to read sdb file:%s for reset, id:%d rowSize:%d", pTable->fn, rowHead->id, rowHead->rowSize);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -897,7 +899,7 @@ void sdbResetTable(SSdbTable *pTable) {
|
|||
|
||||
tfree(rowHead);
|
||||
|
||||
sdbTrace("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id);
|
||||
sdbPrint("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id);
|
||||
}
|
||||
|
||||
// TODO:A problem here :use snapshot file to sync another node will cause
|
||||
|
|
|
@ -21,6 +21,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#define tsetModuleStatus(mod) \
|
||||
{ tsModuleStatus |= (1 << mod); }
|
||||
|
|
|
@ -419,7 +419,7 @@ int mgmtRetrieveVnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn);
|
|||
int mgmtInitBalance();
|
||||
void mgmtCleanupBalance();
|
||||
int mgmtAllocVnodes(SVgObj *pVgroup);
|
||||
void mgmtSetDnodeShellRemoving(SDnodeObj *pDnode);
|
||||
int mgmtSetDnodeShellRemoving(SDnodeObj *pDnode);
|
||||
void mgmtSetDnodeUnRemove(SDnodeObj *pDnode);
|
||||
void mgmtStartBalanceTimer(int64_t mseconds);
|
||||
void mgmtSetDnodeOfflineOnSdbChanged();
|
||||
|
|
|
@ -668,6 +668,11 @@ int mgmtRetrieveDbs(SShowObj *pShow, char *data, int rows, SConnObj *pConn) {
|
|||
pDb = (SDbObj *)pShow->pNode;
|
||||
if (pDb == NULL) break;
|
||||
pShow->pNode = (void *)pDb->next;
|
||||
if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) {
|
||||
if (strcmp(pConn->pUser->user, "root") != 0 && strcmp(pConn->pUser->user, "_root") != 0 && strcmp(pConn->pUser->user, "monitor") != 0 ) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
cols = 0;
|
||||
|
||||
|
|
|
@ -435,9 +435,16 @@ int mgmtGetVnodeMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) {
|
|||
return TSDB_CODE_NODE_OFFLINE;
|
||||
}
|
||||
|
||||
pShow->numOfRows = pDnode->openVnodes;
|
||||
pShow->pNode = pDnode;
|
||||
SVnodeLoad* pVnode;
|
||||
pShow->numOfRows = 0;
|
||||
for (int i = 0 ; i < TSDB_MAX_VNODES; i++) {
|
||||
pVnode = &pDnode->vload[i];
|
||||
if (0 != pVnode->vgId) {
|
||||
pShow->numOfRows++;
|
||||
}
|
||||
}
|
||||
|
||||
pShow->pNode = pDnode;
|
||||
} else {
|
||||
while (true) {
|
||||
pShow->pNode = mgmtGetNextDnode(pShow, (SDnodeObj **)&pDnode);
|
||||
|
|
|
@ -488,6 +488,7 @@ int mgmtSendCfgDnodeMsg(char *cont) {
|
|||
return code;
|
||||
}
|
||||
|
||||
#ifdef CLUSTER
|
||||
pStart = taosBuildReqMsg(pDnode->thandle, TSDB_MSG_TYPE_CFG_PNODE);
|
||||
if (pStart == NULL) return TSDB_CODE_NODE_OFFLINE;
|
||||
pMsg = pStart;
|
||||
|
@ -497,6 +498,8 @@ int mgmtSendCfgDnodeMsg(char *cont) {
|
|||
|
||||
msgLen = pMsg - pStart;
|
||||
taosSendMsgToDnode(pDnode, pStart, msgLen);
|
||||
|
||||
#else
|
||||
(void)tsCfgDynamicOptions(pCfg->config);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1270,6 +1270,11 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) {
|
|||
if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name);
|
||||
|
||||
if (pDb == NULL) return 0;
|
||||
if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) {
|
||||
if (strcmp(pConn->pUser->user, "root") != 0 && strcmp(pConn->pUser->user, "_root") != 0 && strcmp(pConn->pUser->user, "monitor") != 0 ) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
strcpy(prefix, pDb->name);
|
||||
strcat(prefix, TS_PATH_DELIMITER);
|
||||
|
@ -1387,6 +1392,16 @@ int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn)
|
|||
char * pWrite;
|
||||
int cols = 0;
|
||||
|
||||
SDbObj *pDb = NULL;
|
||||
if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name);
|
||||
|
||||
if (pDb == NULL) return 0;
|
||||
if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) {
|
||||
if (strcmp(pConn->pUser->user, "root") != 0 && strcmp(pConn->pUser->user, "_root") != 0 && strcmp(pConn->pUser->user, "monitor") != 0 ) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
|
||||
|
||||
char metricName[TSDB_METER_NAME_LEN] = {0};
|
||||
|
|
|
@ -734,8 +734,11 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) {
|
|||
|
||||
if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) {
|
||||
bool hasRight = false;
|
||||
|
||||
if (strcmp(pUser->user, "root") == 0) {
|
||||
hasRight = false;
|
||||
} else if (strcmp(pUser->user, pUser->acct) == 0) {
|
||||
hasRight = false;
|
||||
} else if (strcmp(pOperUser->user, "root") == 0) {
|
||||
hasRight = true;
|
||||
} else if (strcmp(pUser->user, pOperUser->user) == 0) {
|
||||
|
@ -750,12 +753,15 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) {
|
|||
}
|
||||
}
|
||||
|
||||
if (hasRight) {
|
||||
if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) {
|
||||
if (pAlter->privilege == 1) { // super
|
||||
pUser->superAuth = 1;
|
||||
pUser->writeAuth = 1;
|
||||
hasRight = false;
|
||||
}
|
||||
|
||||
if (hasRight) {
|
||||
//if (pAlter->privilege == 1) { // super
|
||||
// pUser->superAuth = 1;
|
||||
// pUser->writeAuth = 1;
|
||||
//}
|
||||
if (pAlter->privilege == 2) { // read
|
||||
pUser->superAuth = 0;
|
||||
pUser->writeAuth = 0;
|
||||
|
@ -764,7 +770,7 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) {
|
|||
pUser->superAuth = 0;
|
||||
pUser->writeAuth = 1;
|
||||
}
|
||||
}
|
||||
|
||||
code = mgmtUpdateUser(pUser);
|
||||
mLPrint("user:%s privilege is altered by %s, code:%d", pAlter->user, pConn->pUser->user, code);
|
||||
} else {
|
||||
|
|
|
@ -238,11 +238,25 @@ int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) {
|
|||
cols++;
|
||||
|
||||
int maxReplica = 0;
|
||||
SVgObj *pVgroup = NULL;
|
||||
STabObj *pMeter = NULL;
|
||||
if (pShow->payloadLen > 0 ) {
|
||||
pMeter = mgmtGetMeter(pShow->payload);
|
||||
if (NULL == pMeter) {
|
||||
return TSDB_CODE_INVALID_METER_ID;
|
||||
}
|
||||
|
||||
pVgroup = mgmtGetVgroup(pMeter->gid.vgId);
|
||||
if (NULL == pVgroup) return TSDB_CODE_INVALID_METER_ID;
|
||||
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
} else {
|
||||
SVgObj *pVgroup = pDb->pHead;
|
||||
while (pVgroup != NULL) {
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
pVgroup = pVgroup->next;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < maxReplica; ++i) {
|
||||
pShow->bytes[cols] = 16;
|
||||
|
@ -276,9 +290,15 @@ int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) {
|
|||
pShow->offset[0] = 0;
|
||||
for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
|
||||
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
|
||||
if (NULL == pMeter) {
|
||||
pShow->numOfRows = pDb->numOfVgroups;
|
||||
pShow->pNode = pDb->pHead;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
} else {
|
||||
pShow->numOfRows = 1;
|
||||
pShow->pNode = pVgroup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -579,7 +579,12 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) {
|
|||
SVnodeObj *pVnode = vnodeList + pObj->vnode;
|
||||
char dpath[TSDB_FILENAME_LEN] = "\0";
|
||||
SCompInfo compInfo;
|
||||
|
||||
#ifdef _ALPINE
|
||||
off_t offset = 0;
|
||||
#else
|
||||
__off_t offset = 0;
|
||||
#endif
|
||||
|
||||
if (pVnode->nfd > 0) {
|
||||
offset = lseek(pVnode->nfd, 0, SEEK_CUR);
|
||||
|
|
|
@ -563,8 +563,8 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi
|
|||
|
||||
if (numOfPoints >= (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock) {
|
||||
code = TSDB_CODE_BATCH_SIZE_TOO_BIG;
|
||||
dError("vid:%d sid:%d id:%s, batch size too big, it shall be smaller than:%d", pObj->vnode, pObj->sid,
|
||||
pObj->meterId, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock);
|
||||
dError("vid:%d sid:%d id:%s, batch size too big, insert points:%d, it shall be smaller than:%d", pObj->vnode, pObj->sid,
|
||||
pObj->meterId, numOfPoints, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -3380,10 +3380,8 @@ void forwardQueryStartPosition(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
updateOffsetVal(pRuntimeEnv, &blockInfo, pBlock);
|
||||
} else {
|
||||
pQuery->limit.offset -= maxReads;
|
||||
|
||||
// update the lastkey, since the following skip operation may traverse to another media. update the lastkey first.
|
||||
pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery))? blockInfo.keyLast+1:blockInfo.keyFirst-1;
|
||||
|
||||
doSkipDataBlock(pRuntimeEnv);
|
||||
}
|
||||
}
|
||||
|
@ -4289,8 +4287,8 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl
|
|||
|
||||
return DISK_DATA_LOADED;
|
||||
} else {
|
||||
pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1;
|
||||
pQuery->slot = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->numOfBlocks - 1;
|
||||
pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1;
|
||||
}
|
||||
} else { // next block in the same file
|
||||
int32_t fid = pQuery->fileId;
|
||||
|
|
|
@ -52,7 +52,7 @@ const char* taosGetVnodeStatusStr(int32_t vnodeStatus) {
|
|||
|
||||
const char* taosGetVnodeSyncStatusStr(int32_t vnodeSyncStatus) {
|
||||
switch (vnodeSyncStatus) {
|
||||
case TSDB_VN_SYNC_STATUS_INIT: return "init";
|
||||
case TSDB_VN_SYNC_STATUS_INIT: return "ready";
|
||||
case TSDB_VN_SYNC_STATUS_SYNCING: return "syncing";
|
||||
case TSDB_VN_SYNC_STATUS_SYNC_CACHE: return "sync_cache";
|
||||
case TSDB_VN_SYNC_STATUS_SYNC_FILE: return "sync_file";
|
||||
|
|
|
@ -57,7 +57,6 @@ ELSEIF (TD_WINDOWS_64)
|
|||
LIST(APPEND SRC ./src/tsched.c)
|
||||
LIST(APPEND SRC ./src/tskiplist.c)
|
||||
LIST(APPEND SRC ./src/tsocket.c)
|
||||
LIST(APPEND SRC ./src/tstatus.c)
|
||||
LIST(APPEND SRC ./src/tstrbuild.c)
|
||||
LIST(APPEND SRC ./src/ttime.c)
|
||||
LIST(APPEND SRC ./src/ttimer.c)
|
||||
|
|
|
@ -536,4 +536,6 @@ int32_t taosCheckHashTable(HashObj *pObj) {
|
|||
assert(num == pEntry->num);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -895,8 +895,7 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
|
|||
return (ret < 0) ? -1 : 1;
|
||||
};
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t b = bytes / TSDB_NCHAR_SIZE;
|
||||
int32_t ret = wcsncmp((wchar_t *)f1, (wchar_t *)f2, b);
|
||||
int32_t ret = tasoUcs4Compare(f1, f2, bytes);
|
||||
if (ret == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,11 @@ int tscEmbedded = 0;
|
|||
*/
|
||||
int64_t tsMsPerDay[] = {86400000L, 86400000000L};
|
||||
|
||||
#ifdef CLUSTER
|
||||
char tsMasterIp[TSDB_IPv4ADDR_LEN] = {0};
|
||||
#else
|
||||
char tsMasterIp[TSDB_IPv4ADDR_LEN] = "127.0.0.1";
|
||||
#endif
|
||||
char tsSecondIp[TSDB_IPv4ADDR_LEN] = {0};
|
||||
uint16_t tsMgmtShellPort = 6030; // udp[6030-6034] tcp[6030]
|
||||
uint16_t tsVnodeShellPort = 6035; // udp[6035-6039] tcp[6035]
|
||||
|
@ -76,7 +80,6 @@ float tsNumOfThreadsPerCore = 1.0;
|
|||
float tsRatioOfQueryThreads = 0.5;
|
||||
char tsPublicIp[TSDB_IPv4ADDR_LEN] = {0};
|
||||
char tsPrivateIp[TSDB_IPv4ADDR_LEN] = {0};
|
||||
char tsServerIpStr[TSDB_IPv4ADDR_LEN] = "127.0.0.1";
|
||||
short tsNumOfVnodesPerCore = 8;
|
||||
short tsNumOfTotalVnodes = 0;
|
||||
short tsCheckHeaderFile = 0;
|
||||
|
@ -118,7 +121,7 @@ int tsBalanceMonitorInterval = 2; // seconds
|
|||
int tsBalanceStartInterval = 300; // seconds
|
||||
int tsBalancePolicy = 0; // 1-use sys.montor
|
||||
int tsOfflineThreshold = 864000; // seconds 10days
|
||||
int tsMgmtEqualVnodeNum = 0;
|
||||
int tsMgmtEqualVnodeNum = 4;
|
||||
|
||||
int tsEnableHttpModule = 1;
|
||||
int tsEnableMonitorModule = 1;
|
||||
|
@ -160,7 +163,6 @@ int tsHttpMaxThreads = 2;
|
|||
int tsHttpEnableCompress = 0;
|
||||
int tsHttpEnableRecordSql = 0;
|
||||
int tsTelegrafUseFieldNum = 0;
|
||||
int tsAdminRowLimit = 10240;
|
||||
|
||||
int tsTscEnableRecordSql = 0;
|
||||
int tsEnableCoreFile = 0;
|
||||
|
@ -447,9 +449,6 @@ static void doInitGlobalConfig() {
|
|||
tsInitConfigOption(cfg++, "secondIp", tsSecondIp, TSDB_CFG_VTYPE_IPSTR,
|
||||
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER,
|
||||
0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE);
|
||||
tsInitConfigOption(cfg++, "serverIp", tsServerIpStr, TSDB_CFG_VTYPE_IPSTR,
|
||||
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_LITE,
|
||||
0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE);
|
||||
tsInitConfigOption(cfg++, "publicIp", tsPublicIp, TSDB_CFG_VTYPE_IPSTR,
|
||||
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER,
|
||||
0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE);
|
||||
|
|
|
@ -401,6 +401,46 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP
|
|||
return rename(fullPath, *dstPath);
|
||||
}
|
||||
|
||||
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes) {
|
||||
#if defined WINDOWS
|
||||
for (int i = 0; i < bytes; ++i) {
|
||||
int32_t f1 = *(int32_t*)((char*)f1_ucs4 + i * 4);
|
||||
int32_t f2 = *(int32_t*)((char*)f2_ucs4 + i * 4);
|
||||
|
||||
if ((f1 == 0 && f2 != 0) || (f1 != 0 && f2 == 0)) {
|
||||
return f1 - f2;
|
||||
}
|
||||
else if (f1 == 0 && f2 == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (f1 != f2) {
|
||||
return f1 - f2;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
#if 0
|
||||
int32_t ucs4_max_len = bytes + 4;
|
||||
char *f1_mbs = calloc(bytes, 1);
|
||||
char *f2_mbs = calloc(bytes, 1);
|
||||
if (!taosUcs4ToMbs(f1_ucs4, ucs4_max_len, f1_mbs)) {
|
||||
return -1;
|
||||
}
|
||||
if (!taosUcs4ToMbs(f2_ucs4, ucs4_max_len, f2_mbs)) {
|
||||
return -1;
|
||||
}
|
||||
int32_t ret = strcmp(f1_mbs, f2_mbs);
|
||||
free(f1_mbs);
|
||||
free(f2_mbs);
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
#else
|
||||
return wcsncmp((wchar_t *)f1_ucs4, (wchar_t *)f2_ucs4, bytes / TSDB_NCHAR_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs) {
|
||||
#ifdef USE_LIBICONV
|
||||
iconv_t cd = iconv_open(tsCharset, DEFAULT_UNICODE_ENCODEC);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
char version[64] = "1.6.4.4";
|
||||
char version[64] = "1.6.5.1";
|
||||
char compatible_version[64] = "1.6.1.0";
|
||||
char gitinfo[128] = "d62c5c30231d04a736d437cf428af6e12599bd9f";
|
||||
char gitinfoOfInternal[128] = "8094a32d78dc519bd883d01ac2ba6ec49ac57a80";
|
||||
char buildinfo[512] = "Built by ubuntu at 2019-12-16 21:40";
|
||||
char gitinfo[128] = "2ea714387009421beb35e7f03b94c6a87d22529a";
|
||||
char gitinfoOfInternal[128] = "950f54ac026bc05bcec5cff356f4964a18d635bd";
|
||||
char buildinfo[512] = "Built by ubuntu at 2019-12-21 11:14";
|
||||
|
|
Loading…
Reference in New Issue