Merge branch 'develop' into feature/sangshuduo/snap
This commit is contained in:
commit
2a23fa0a1a
|
@ -66,6 +66,8 @@ CMakeError.log
|
|||
/test/cfg
|
||||
/src/.vs
|
||||
*.o
|
||||
version.c
|
||||
taos.rc
|
||||
src/connector/jdbc/.settings/
|
||||
tests/comparisonTest/cassandra/cassandratest/.classpath
|
||||
tests/comparisonTest/cassandra/cassandratest/.project
|
||||
|
|
|
@ -18,6 +18,7 @@ SET(TD_COVER FALSE)
|
|||
SET(TD_MEM_CHECK FALSE)
|
||||
|
||||
SET(TD_PAGMODE_LITE FALSE)
|
||||
SET(TD_SOMODE_STATIC FALSE)
|
||||
SET(TD_GODLL FALSE)
|
||||
|
||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||
|
@ -27,6 +28,7 @@ INCLUDE(cmake/input.inc)
|
|||
INCLUDE(cmake/platform.inc)
|
||||
INCLUDE(cmake/define.inc)
|
||||
INCLUDE(cmake/env.inc)
|
||||
INCLUDE(cmake/version.inc)
|
||||
INCLUDE(cmake/install.inc)
|
||||
|
||||
ADD_SUBDIRECTORY(deps)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
[](https://hub.docker.com/repository/docker/tdengine/tdengine)
|
||||
|
||||
[](https://www.taosdata.com)
|
||||
|
||||
|
|
|
@ -77,6 +77,11 @@ IF (TD_LINUX)
|
|||
ADD_DEFINITIONS(-D_LINUX)
|
||||
ADD_DEFINITIONS(-D_TD_LINUX)
|
||||
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
|
||||
|
||||
IF (TD_NINGSI_60)
|
||||
ADD_DEFINITIONS(-D_TD_NINGSI_60_)
|
||||
MESSAGE(STATUS "set ningsi macro to true")
|
||||
ENDIF ()
|
||||
|
||||
SET(DEBUG_FLAGS "-O0 -DDEBUG")
|
||||
SET(RELEASE_FLAGS "-O0")
|
||||
|
@ -110,7 +115,7 @@ IF (TD_WINDOWS)
|
|||
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
|
||||
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
|
||||
IF (NOT TD_GODLL)
|
||||
SET(COMMON_FLAGS "/nologo /WX /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
|
||||
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
|
||||
SET(DEBUG_FLAGS "/Zi /W3 /GL")
|
||||
SET(RELEASE_FLAGS "/W0 /GL")
|
||||
ENDIF ()
|
||||
|
|
|
@ -22,6 +22,11 @@ IF (${PAGMODE} MATCHES "lite")
|
|||
MESSAGE(STATUS "Build with pagmode lite")
|
||||
ENDIF ()
|
||||
|
||||
IF (${SOMODE} MATCHES "static")
|
||||
SET(TD_SOMODE_STATIC TRUE)
|
||||
MESSAGE(STATUS "Link so using static mode")
|
||||
ENDIF ()
|
||||
|
||||
IF (${DLLTYPE} MATCHES "go")
|
||||
SET(TD_GODLL TRUE)
|
||||
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
|
||||
|
|
|
@ -22,6 +22,9 @@ SET(TD_LINUX FALSE)
|
|||
SET(TD_MIPS_64 FALSE)
|
||||
SET(TD_MIPS_32 FALSE)
|
||||
SET(TD_APLHINE FALSE)
|
||||
SET(TD_NINGSI FALSE)
|
||||
SET(TD_NINGSI_60 FALSE)
|
||||
SET(TD_NINGSI_80 FALSE)
|
||||
SET(TD_WINDOWS FALSE)
|
||||
SET(TD_WINDOWS_64 FALSE)
|
||||
SET(TD_WINDOWS_32 FALSE)
|
||||
|
@ -99,3 +102,18 @@ ELSEIF (${CPUTYPE} MATCHES "x86")
|
|||
ELSE ()
|
||||
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
|
||||
ENDIF ()
|
||||
|
||||
# cmake -DOSTYPE=Ningsi
|
||||
IF (${OSTYPE} MATCHES "Ningsi60")
|
||||
SET(TD_NINGSI TRUE)
|
||||
SET(TD_NINGSI_60 TRUE)
|
||||
MESSAGE(STATUS "input osType: Ningsi60")
|
||||
ELSEIF (${OSTYPE} MATCHES "Ningsi80")
|
||||
SET(TD_NINGSI TRUE)
|
||||
SET(TD_NINGSI_80 TRUE)
|
||||
MESSAGE(STATUS "input osType: Ningsi80")
|
||||
ELSEIF (${OSTYPE} MATCHES "Linux")
|
||||
MESSAGE(STATUS "input osType: Linux")
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "input osType unknown: " ${OSTYPE})
|
||||
ENDIF ()
|
|
@ -0,0 +1,69 @@
|
|||
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
|
||||
PROJECT(TDengine)
|
||||
|
||||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.0.2.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
SET(TD_VER_COMPATIBLE ${VERCOMPATIBLE})
|
||||
ELSE ()
|
||||
SET(TD_VER_COMPATIBLE "2.0.0.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED GITINFO)
|
||||
SET(TD_VER_GIT ${GITINFO})
|
||||
ELSE ()
|
||||
SET(TD_VER_GIT "community")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED GITINFOI)
|
||||
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
|
||||
ELSE ()
|
||||
SET(TD_VER_GIT_INTERNAL "internal")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERDATE)
|
||||
SET(TD_VER_DATE ${VERDATE})
|
||||
ELSE ()
|
||||
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERTYPE)
|
||||
SET(TD_VER_VERTYPE ${VERTYPE})
|
||||
ELSE ()
|
||||
SET(TD_VER_VERTYPE "stable")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED CPUTYPE)
|
||||
SET(TD_VER_CPUTYPE ${CPUTYPE})
|
||||
ELSE ()
|
||||
IF (TD_WINDOWS_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
ELSE ()
|
||||
SET(TD_VER_CPUTYPE "x64")
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED OSTYPE)
|
||||
SET(TD_VER_OSTYPE ${OSTYPE})
|
||||
ELSE ()
|
||||
SET(TD_VER_OSTYPE "Linux")
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE(STATUS "============= compile version parameter information start ============= ")
|
||||
MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
|
||||
MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
|
||||
MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT})
|
||||
MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL})
|
||||
MESSAGE(STATUS "build date:" ${TD_VER_DATE})
|
||||
MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
|
||||
MESSAGE(STATUS "ver cpu:" ${TD_VER_CPUTYPE})
|
||||
MESSAGE(STATUS "os type:" ${TD_VER_OSTYPE})
|
||||
MESSAGE(STATUS "============= compile version parameter information end ============= ")
|
||||
|
||||
STRING(REPLACE "." "_" TD_LIB_VER_NUMBER ${TD_VER_NUMBER})
|
||||
|
||||
CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/util/src/version.c.in" "${TD_COMMUNITY_DIR}/src/util/src/version.c")
|
|
@ -4,5 +4,5 @@ PROJECT(TDengine)
|
|||
IF (TD_WINDOWS)
|
||||
INCLUDE_DIRECTORIES(include)
|
||||
AUX_SOURCE_DIRECTORY(src SRC)
|
||||
ADD_LIBRARY(MsvcLibXw64 ${SRC})
|
||||
ADD_LIBRARY(MsvcLibXw ${SRC})
|
||||
ENDIF ()
|
||||
|
|
|
@ -59,7 +59,7 @@
|
|||
/* Generate the OS-and-debug-mode-specific library name */
|
||||
#define _MSVCLIBX_LIB "MsvcLibX" _MSVCLIBX_LIB_OS_SUFFIX _MSVCLIBX_LIB_DBG_SUFFIX ".lib"
|
||||
//#pragma message("Adding pragma comment(lib, \"" _MSVCLIBX_LIB "\")")
|
||||
#pragma comment(lib, _MSVCLIBX_LIB)
|
||||
//#pragma comment(lib, _MSVCLIBX_LIB)
|
||||
|
||||
/* Library-specific routine used internally by many standard routines */
|
||||
#if defined(_WIN32)
|
||||
|
|
|
@ -9,9 +9,9 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个
|
|||
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
|
||||
|
||||
```cmd
|
||||
CREATE DATABASE power KEEP 365 DAYS 10 REPLICA 3 BLOCKS 4;
|
||||
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4;
|
||||
```
|
||||
上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,副本数为3, 内存块数为4。详细的语法及参数请见<a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL</a>
|
||||
上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4。详细的语法及参数请见<a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL</a>
|
||||
|
||||
创建库之后,需要使用SQL命令USE将当前库切换过来,例如:
|
||||
|
||||
|
@ -27,13 +27,15 @@ USE power;
|
|||
- 处于两个不同库的表是不能进行JOIN操作的。
|
||||
|
||||
## 创建超级表
|
||||
一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的设备创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
|
||||
一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
|
||||
```cmd
|
||||
CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
|
||||
```
|
||||
与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义,为采集的物理量(示例中为ts, current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 <a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL </a>一节。
|
||||
与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 <a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL </a>一节。
|
||||
|
||||
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。一个系统可以有多个DB,一个DB里可以有一到多个超级表。
|
||||
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。
|
||||
|
||||
一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。
|
||||
|
||||
## 创建表
|
||||
TDengine对每个数据采集点需要独立建表。与标准的关系型数据一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以表一中的智能电表为例,可以使用如下的SQL命令建表:
|
||||
|
@ -51,5 +53,7 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
|
|||
```
|
||||
上述SQL语句将记录(now, 10.2, 219, 0.32) 插入进表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。
|
||||
|
||||
**多列模型**:TDengine支持多列模型,只要这些物理量是同时采集的,这些量就可以作为不同列放在同一张表里。有的数据采集点有多组采集量,每一组的数据采集时间是不一样的,这时需要对同一个采集点建多张表。但还有一种极限的设计,单列模型,无论是否同时采集,每个采集的物理量单独建表。TDengine建议,只要采集时间一致,就采用多列模型,因为插入效率以及存储效率更高。TDengine支持最大的列数为1024列。
|
||||
## 多列模型 vs 单列模型
|
||||
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
|
||||
|
||||
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型就会显得简单。
|
||||
|
|
|
@ -82,7 +82,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
|
|||
|
||||
- firstEp: taosd启动时,主动连接的集群中第一个dnode的end point, 默认值为localhost:6030。
|
||||
- secondEp: taosd启动时,如果first连接不上,尝试连接集群中第二个dnode的end point, 默认值为空。
|
||||
- fqdn:数据节点的FQDN。如果为空,将自动获取操作系统配置的第一个, 默认值为空。
|
||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
|
||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。
|
||||
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。
|
||||
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
||||
|
@ -94,7 +94,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
|
|||
- maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。
|
||||
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。
|
||||
|
||||
**注意:**对于端口,TDengine会使用从serverPort起12个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030都6041共12个端口,而且必须TCP和UDP都打开。
|
||||
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030都6042共13个端口,而且必须TCP和UDP都打开。
|
||||
|
||||
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
|
||||
|
||||
|
@ -153,10 +153,10 @@ TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同
|
|||
系统管理员可以在CLI界面里添加、删除用户,也可以修改密码。CLI里SQL语法如下:
|
||||
|
||||
```
|
||||
CREATE USER <user_name> PASS <‘password’>;
|
||||
CREATE USER <user_name> PASS <'password'>;
|
||||
```
|
||||
|
||||
创建用户,并指定用户名和密码,密码需要用单引号引起来
|
||||
创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
|
||||
|
||||
```
|
||||
DROP USER <user_name>;
|
||||
|
@ -165,10 +165,10 @@ DROP USER <user_name>;
|
|||
删除用户,限root用户使用
|
||||
|
||||
```
|
||||
ALTER USER <user_name> PASS <‘password’>;
|
||||
ALTER USER <user_name> PASS <'password'>;
|
||||
```
|
||||
|
||||
修改用户密码, 为避免被转换为小写,密码需要用单引号引用
|
||||
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
|
||||
|
||||
```
|
||||
SHOW USERS;
|
||||
|
|
|
@ -82,7 +82,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
|||
### 节点之间的通讯
|
||||
**通讯方式:**TDengine系统的各个节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
|
||||
|
||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过选项“fqdn"进行指定,如果没有指定,系统将自动获取FQDN。如果节点没有配置FQDN,可以直接使用IP地址作为FQDN,但不建议使用,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。
|
||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
||||
|
||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP链接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。
|
||||
|
||||
|
|
|
@ -1,16 +1,46 @@
|
|||
#TDengine 集群安装、管理
|
||||
# TDengine 集群安装、管理
|
||||
|
||||
多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。
|
||||
多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。
|
||||
|
||||
集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令“hostname"获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。
|
||||
集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||
|
||||
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
|
||||
|
||||
##安装、创建第一个节点
|
||||
## 准备工作
|
||||
|
||||
集群是由一个一个dnode组成的,是从一个dnode的创建开始的。创建第一个节点很简单,就按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装、启动即可。
|
||||
**第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||
|
||||
启动后,请执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
|
||||
**第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
||||
|
||||
**第三步**:在所有节点安装TDengine,且版本必须是一致的,**但不要启动taosd**;
|
||||
|
||||
**第四步**:检查、配置所有节点的FQDN:
|
||||
|
||||
1. 每个节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的;
|
||||
2. 每个节点上执行`ping host`, 其中host是其他节点的hostname, 看能否ping通其它节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件,或DNS的配置。如果无法ping通,是无法组成集群的。
|
||||
3. 每个节点的FQDN就是输出的hostname外加端口号,比如h1.taosdata.com:6030
|
||||
|
||||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
|
||||
|
||||
```
|
||||
// firstEp 是每个节点启动后连接的第一个节点
|
||||
firstEp h1.taosdata.com:6030
|
||||
|
||||
// 配置本节点的FQDN,如果本机只有一个hostname, 无需配置
|
||||
fqdn h1.taosdata.com
|
||||
|
||||
// 配置本节点的端口号,缺省是6030
|
||||
serverPort 6030
|
||||
|
||||
// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
||||
arbitrator ha.taosdata.com:6042
|
||||
```
|
||||
|
||||
一定要修改的参数是firstEp, 其他参数可不做任何修改,除非你很清楚为什么要修改。
|
||||
|
||||
## 启动第一个节点
|
||||
|
||||
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示,启动第一个节点h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
|
@ -25,71 +55,64 @@ taos>
|
|||
```
|
||||
上述命令里,可以看到这个刚启动的这个节点的End Point是:h1.taos.com:6030
|
||||
|
||||
## 安装、创建后续节点
|
||||
## 启动后续节点
|
||||
|
||||
将新的节点添加到现有集群,具体有以下几步:
|
||||
将后续的节点添加到现有集群,具体有以下几步:
|
||||
|
||||
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装,**但不要启动taosd**
|
||||
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个节点启动taosd。
|
||||
|
||||
2. 如果是使用涛思数据的官方安装包进行安装,在安装结束时,会询问集群的End Port, 输入第一个节点的End Point即可。如果是源码安装,请编辑配置文件taos.cfg(缺省是在/etc/taos/目录),增加一行:
|
||||
|
||||
```
|
||||
firstEp h1.taos.com:6030
|
||||
```
|
||||
|
||||
请注意将示例的“h1.taos.com:6030" 替换为你自己第一个节点的End Point
|
||||
|
||||
3. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法启动taosd
|
||||
|
||||
4. 在Linux shell里执行命令"hostname"找出本机的FQDN, 假设为h2.taos.com。如果无法找到,可以查看taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),fqdn以及port都会打印出来。
|
||||
|
||||
5. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 使用命令:
|
||||
2. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
|
||||
|
||||
```
|
||||
CREATE DNODE "h2.taos.com:6030";
|
||||
```
|
||||
|
||||
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
|
||||
将新节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新节点的End Point。
|
||||
|
||||
6. 使用命令
|
||||
3. 然后执行命令
|
||||
|
||||
```
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
查看新节点是否被成功加入。
|
||||
查看新节点是否被成功加入。如果该被加入的节点处于离线状态,请做两个检查
|
||||
|
||||
- 查看该节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么
|
||||
- 查看该节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。
|
||||
|
||||
按照上述步骤可以源源不断的将新的节点加入到集群。
|
||||
|
||||
**提示:**
|
||||
|
||||
- firstEp, secondEp这两个参数仅仅在该节点第一次加入集群时有作用,加入集群后,该节点会保存最新的mnode的End Point列表,不再依赖这两个参数。
|
||||
- 两个没有配置firstEp, secondEp参数的dnode启动后,会独立运行起来。这个时候,无法将其中一个节点加入到另外一个节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
|
||||
- firstEp这个参数仅仅在该节点第一次加入集群时有作用,加入集群后,该节点会保存最新的mnode的End Point列表,不再依赖这两个参数。
|
||||
- 两个没有配置firstEp参数的dnode启动后,会独立运行起来。这个时候,无法将其中一个节点加入到另外一个节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
|
||||
|
||||
##节点管理
|
||||
## 节点管理
|
||||
|
||||
###添加节点
|
||||
### 添加节点
|
||||
执行CLI程序taos, 使用root账号登录进系统, 执行:
|
||||
```
|
||||
CREATE DNODE "fqdn:port";
|
||||
```
|
||||
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置,缺省是自动获取。
|
||||
|
||||
###删除节点
|
||||
### 删除节点
|
||||
执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
|
||||
|
||||
```
|
||||
DROP DNODE "fqdn:port";
|
||||
```
|
||||
其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
|
||||
|
||||
###查看节点
|
||||
### 查看节点
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
|
||||
```
|
||||
SHOW DNODES;
|
||||
```
|
||||
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。
|
||||
|
||||
###查看虚拟节点组
|
||||
### 查看虚拟节点组
|
||||
|
||||
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||
|
||||
|
@ -97,7 +120,7 @@ SHOW DNODES;
|
|||
```
|
||||
SHOW VGROUPS;
|
||||
```
|
||||
##vnode的高可用性
|
||||
## vnode的高可用性
|
||||
TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。
|
||||
|
||||
vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
|
||||
|
@ -111,7 +134,7 @@ CREATE DATABASE demo replica 3;
|
|||
|
||||
因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。
|
||||
|
||||
##Mnode的高可用性
|
||||
## Mnode的高可用性
|
||||
TDengine集群是由mnode (taosd的一个模块,逻辑节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
|
||||
|
||||
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
|
||||
|
@ -125,7 +148,7 @@ SHOW MNODES;
|
|||
|
||||
**注意:**一个TDengine高可用系统,无论是vnode还是mnode, 都必须配置多个副本。
|
||||
|
||||
##负载均衡
|
||||
## 负载均衡
|
||||
|
||||
有三种情况,将触发负载均衡,而且都无需人工干预。
|
||||
|
||||
|
@ -142,8 +165,9 @@ SHOW MNODES;
|
|||
|
||||
**注意:**如果一个虚拟节点组(包括mnode组)里每个节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个节点,副本数为3,如果3个节点都宕机,然后2个节点重启,是无法工作的,只有等3个节点都重启成功,才能对外服务。
|
||||
|
||||
##Arbitrator的使用
|
||||
## Arbitrator的使用
|
||||
|
||||
如果副本数为偶数,当一个vnode group里一半或超过一半的vnode不工作时,是无法从中选出master的。同理,一半或超过一半的mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
|
||||
|
||||
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
|
||||
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#常见问题
|
||||
# 常见问题
|
||||
|
||||
#### 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
|
||||
## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
|
||||
|
||||
2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
|
||||
|
||||
|
@ -10,23 +10,23 @@
|
|||
4. 安装最新稳定版本的TDengine
|
||||
5. 如果数据需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决
|
||||
|
||||
#### 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
|
||||
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
|
||||
请看为此问题撰写的<a href='blog/2019/12/03/jdbcdriver找不到动态链接库/'>技术博客 </a>
|
||||
|
||||
#### 3. 创建数据表时提示more dnodes are needed
|
||||
## 3. 创建数据表时提示more dnodes are needed
|
||||
请看为此问题撰写的<a href='blog/2019/12/03/创建数据表时提示more-dnodes-are-needed/'>技术博客</a>
|
||||
|
||||
#### 4. 如何让TDengine crash时生成core文件?
|
||||
## 4. 如何让TDengine crash时生成core文件?
|
||||
请看为此问题撰写的<a href='blog/2019/12/06/tdengine-crash时生成core文件的方法/'>技术博客</a>
|
||||
|
||||
#### 5. 遇到错误"Unable to establish connection", 我怎么办?
|
||||
## 5. 遇到错误"Unable to establish connection", 我怎么办?
|
||||
|
||||
客户端遇到链接故障,请按照下面的步骤进行检查:
|
||||
|
||||
1. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
|
||||
2. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
3. 确认客户端连接时指定了正确的服务器IP地址
|
||||
4. ping服务器IP,如果没有反应,请检查你的网络
|
||||
3. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
|
||||
4. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
|
||||
5. 检查防火墙设置,确认TCP/UDP 端口6030-6039 是打开的
|
||||
6. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
7. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
|
||||
|
@ -36,40 +36,61 @@
|
|||
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
|
||||
#### 6. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
|
||||
## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
|
||||
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
|
||||
|
||||
1. 请检查连接的服务器的FQDN是否正确
|
||||
2. 如果网络配置有DNS server, 请检查是否正常工作
|
||||
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。
|
||||
4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法链接服务器的
|
||||
|
||||
|
||||
## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
|
||||
|
||||
如果你确认语法正确,2.0之前版本,请检查SQL语句长度是否超过64K。如果超过,也会返回这个错误。
|
||||
|
||||
#### 7. 是否支持validation queries?
|
||||
## 8. 是否支持validation queries?
|
||||
|
||||
TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。
|
||||
|
||||
#### 8. 我可以删除或更新一条记录吗?
|
||||
## 9. 我可以删除或更新一条记录吗?
|
||||
|
||||
不能。因为TDengine是为联网设备采集的数据设计的,不容许修改。但TDengine提供数据保留策略,只要数据记录超过保留时长,就会被自动删除。
|
||||
|
||||
#### 10. 我怎么创建超过250列的表?
|
||||
## 10. 我怎么创建超过1024列的表?
|
||||
|
||||
使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。
|
||||
|
||||
#### 10. 最有效的写入数据的方法是什么?
|
||||
## 10. 最有效的写入数据的方法是什么?
|
||||
|
||||
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的记录。
|
||||
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
|
||||
|
||||
#### 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
||||
## 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
||||
|
||||
windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
|
||||
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
|
||||
Properties properties = new Properties();
|
||||
|
||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||
|
||||
Connection = DriverManager.getConnection(url, properties);
|
||||
|
||||
#### 12.TDengine GO windows驱动的如何编译?
|
||||
```JAVA
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||
Connection = DriverManager.getConnection(url, properties);
|
||||
```
|
||||
## 12.TDengine GO windows驱动的如何编译?
|
||||
请看为此问题撰写的<a href='blog/2020/01/06/tdengine-go-windows驱动的编译/'>技术博客
|
||||
|
||||
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
|
||||
请更新至最新的JDBC驱动
|
||||
```JAVA
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.4</version>
|
||||
</dependency>
|
||||
```
|
||||
## 14. 怎么报告问题?
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
1. /var/log/taos
|
||||
2. /etc/taos
|
||||
|
||||
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
|
||||
|
||||
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
|
||||
|
|
|
@ -24,6 +24,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
|
|||
|
||||
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为8M)。
|
||||
- TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程切频繁切换,带来额外开销。
|
||||
- 对同一张表,如果新插入记录的时间戳已经存在,新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。
|
||||
|
||||
## Prometheus直接写入
|
||||
[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
# dataDir /var/lib/taos
|
||||
|
||||
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
# arbitrator arbitrator_hostname:6030
|
||||
# arbitrator arbitrator_hostname:6042
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
|
|
@ -7,19 +7,19 @@
|
|||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDEngine
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts TDEngine tarbitrator
|
||||
# Description: Starts TDEngine tarbitrator, a arbitrator
|
||||
# Short-Description: Starts taoscluster tarbitrator
|
||||
# Description: Starts taoscluster tarbitrator, a arbitrator
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="tarbitrator"
|
||||
NAME="taoscluster"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/tarbitrator"
|
||||
|
|
|
@ -12,6 +12,6 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
|
|||
ENV LANG=en_US.UTF-8
|
||||
ENV LANGUAGE=en_US:en
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041
|
||||
EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042
|
||||
CMD ["taosd"]
|
||||
VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ]
|
||||
|
|
|
@ -7,20 +7,24 @@ set -e
|
|||
|
||||
# releash.sh -v [cluster | edge]
|
||||
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
|
||||
# set parameters by default value
|
||||
verMode=edge # [cluster, edge]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
verNumber=""
|
||||
verNumberComp="2.0.0.0"
|
||||
|
||||
while getopts "hv:V:c:o:l:n:" arg
|
||||
while getopts "hv:V:c:o:l:s:n:m:" arg
|
||||
do
|
||||
case $arg in
|
||||
v)
|
||||
|
@ -39,10 +43,18 @@ do
|
|||
#echo "pagMode=$OPTARG"
|
||||
pagMode=$(echo $OPTARG)
|
||||
;;
|
||||
s)
|
||||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
m)
|
||||
#echo "verNumberComp=$OPTARG"
|
||||
verNumberComp=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
|
@ -50,10 +62,12 @@ do
|
|||
h)
|
||||
echo "Usage: `basename $0` -v [cluster | edge] "
|
||||
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
|
@ -63,215 +77,142 @@ do
|
|||
esac
|
||||
done
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} verNumber=${verNumber}"
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} verNumber=${verNumber} verNumberComp=${verNumberComp}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
fi
|
||||
|
||||
versioninfo="${top_dir}/src/util/src/version.c"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
#if command -v sudo > /dev/null; then
|
||||
# csudo="sudo"
|
||||
#fi
|
||||
|
||||
function is_valid_version() {
|
||||
[ -z $1 ] && return 1 || :
|
||||
[ -z $1 ] && return 1 || :
|
||||
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function vercomp () {
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i=0; i<${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# 1. Read version information
|
||||
version=$(cat ${versioninfo} | grep " version" | cut -d '"' -f2)
|
||||
compatible_version=$(cat ${versioninfo} | grep " compatible_version" | cut -d '"' -f2)
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
if [ -z ${verNumber} ]; then
|
||||
while true; do
|
||||
read -p "Do you want to release a new version? [y/N]: " is_version_change
|
||||
|
||||
if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then
|
||||
read -p "Please enter the new version: " tversion
|
||||
while true; do
|
||||
if (! is_valid_version $tversion) || [ "$(vercomp $tversion $version)" = '2' ]; then
|
||||
read -p "Please enter a correct version: " tversion
|
||||
continue
|
||||
fi
|
||||
version=${tversion}
|
||||
break
|
||||
done
|
||||
|
||||
echo
|
||||
|
||||
read -p "Enter the oldest compatible version: " tversion
|
||||
while true; do
|
||||
|
||||
if [ -z $tversion ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
if (! is_valid_version $tversion) || [ "$(vercomp $version $tversion)" = '2' ]; then
|
||||
read -p "enter correct compatible version: " tversion
|
||||
else
|
||||
compatible_version=$tversion
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
break
|
||||
elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then
|
||||
echo "Use old version: ${version} compatible version: ${compatible_version}."
|
||||
break
|
||||
else
|
||||
continue
|
||||
for ((i=0; i<${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "old version: $version, new version: $verNumber"
|
||||
#if ( ! is_valid_version $verNumber ) || [[ "$(vercomp $version $verNumber)" == '2' ]]; then
|
||||
# echo "please enter correct version"
|
||||
# exit 0
|
||||
#else
|
||||
version=${verNumber}
|
||||
#fi
|
||||
fi
|
||||
echo 0
|
||||
}
|
||||
|
||||
echo "=======================new version number: ${version}======================================"
|
||||
# 1. check version information
|
||||
if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
|
||||
echo "please enter correct version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
|
||||
|
||||
# output the version info to the buildinfo file.
|
||||
build_time=$(date +"%F %R")
|
||||
echo "char version[12] = \"${version}\";" > ${versioninfo}
|
||||
echo "char compatible_version[12] = \"${compatible_version}\";" >> ${versioninfo}
|
||||
echo "char gitinfo[48] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
echo "char gitinfoOfInternal[48] = \"\";" >> ${versioninfo}
|
||||
else
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
echo "char gitinfoOfInternal[48] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
|
||||
cd ${curr_dir}
|
||||
fi
|
||||
echo "char buildinfo[64] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
|
||||
echo "" >> ${versioninfo}
|
||||
tmp_version=$(echo $version | tr -s "." "_")
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
libtaos_info=${tmp_version}_${osType}_${cpuType}
|
||||
else
|
||||
libtaos_info=edge_${tmp_version}_${osType}_${cpuType}
|
||||
fi
|
||||
if [ "$verType" == "beta" ]; then
|
||||
libtaos_info=${libtaos_info}_${verType}
|
||||
fi
|
||||
echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo}
|
||||
|
||||
# get commint id from git
|
||||
gitinfo=$(git rev-parse --verify HEAD)
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
gitinfoOfInternal=$(git rev-parse --verify HEAD)
|
||||
cd ${curr_dir}
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
if [ -d ${compile_dir} ]; then
|
||||
${csudo} rm -rf ${compile_dir}
|
||||
${csudo} rm -rf ${compile_dir}
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} mkdir -p ${compile_dir}
|
||||
${csudo} mkdir -p ${compile_dir}
|
||||
else
|
||||
mkdir -p ${compile_dir}
|
||||
mkdir -p ${compile_dir}
|
||||
fi
|
||||
cd ${compile_dir}
|
||||
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DPAGMODE=${pagMode}
|
||||
else
|
||||
cmake ../../ -DCPUTYPE=${cpuType}
|
||||
fi
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
|
||||
else
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
make
|
||||
|
||||
cd ${curr_dir}
|
||||
|
||||
# 3. judge the operating system type, then Call the corresponding script for packaging
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
|
||||
#echo "osinfo: ${osinfo}"
|
||||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo} rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo} mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo} rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo} mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo} rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo} mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo} rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo} mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
else
|
||||
cd ${script_dir}/tools
|
||||
./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
|
||||
cd ${script_dir}/tools
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
|
||||
# 4. Clean up temporary compile directories
|
||||
#${csudo} rm -rf ${compile_dir}
|
||||
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDEngine
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop taosd
|
||||
# Short-Description: start and stop tarbitrator
|
||||
# Description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
### END INIT INFO
|
||||
|
||||
|
|
|
@ -76,7 +76,11 @@ fi
|
|||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu" ; then
|
||||
|
@ -95,8 +99,10 @@ elif echo $osinfo | grep -qwi "fedora" ; then
|
|||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, "
|
||||
echo "please feel free to contact taosdata.com for support."
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
|
@ -192,13 +198,12 @@ function install_lib() {
|
|||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo} rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d ${lib64_link_dir} ]; then
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
|
@ -306,14 +311,27 @@ function clean_service_on_sysvinit() {
|
|||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/taosd ]; then
|
||||
${csudo} chkconfig --del taosd || :
|
||||
fi
|
||||
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/taosd ]; then
|
||||
${csudo} insserv -r taosd || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/taosd ]; then
|
||||
${csudo} update-rc.d -f taosd remove || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/taosd || :
|
||||
|
@ -326,7 +344,6 @@ function clean_service_on_sysvinit() {
|
|||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
|
||||
sleep 1
|
||||
|
||||
# Install taosd service
|
||||
|
@ -364,34 +381,29 @@ function install_service_on_sysvinit() {
|
|||
|
||||
function clean_service_on_systemd() {
|
||||
taosd_service_config="${service_config_dir}/taosd.service"
|
||||
|
||||
if systemctl is-active --quiet taosd; then
|
||||
echo "TDengine is running, stopping it..."
|
||||
${csudo} systemctl stop taosd &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${taosd_service_config}
|
||||
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
||||
|
||||
if systemctl is-active --quiet nginxd; then
|
||||
echo "Nginx for TDengine is running, stopping it..."
|
||||
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${nginx_service_config}
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -401,7 +413,6 @@ function install_service_on_systemd() {
|
|||
clean_service_on_systemd
|
||||
|
||||
taosd_service_config="${service_config_dir}/taosd.service"
|
||||
|
||||
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
|
||||
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}"
|
||||
|
@ -422,32 +433,30 @@ function install_service_on_systemd() {
|
|||
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
|
||||
${csudo} systemctl enable taosd
|
||||
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
#${csudo} systemctl enable tarbitratord
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
# ${csudo} systemctl enable tarbitratord
|
||||
|
||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
||||
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
|
||||
|
@ -696,6 +705,7 @@ function install_TDengine() {
|
|||
echo
|
||||
echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}"
|
||||
fi
|
||||
touch ~/.taos_history
|
||||
|
||||
rm -rf $(tar -tf taos.tar.gz)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,297 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu" ; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian" ; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin" ; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos" ; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora" ; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo} rm -rf ${install_main_dir} || :
|
||||
${csudo} mkdir -p ${install_main_dir}
|
||||
${csudo} mkdir -p ${install_main_dir}/bin
|
||||
#${csudo} mkdir -p ${install_main_dir}/include
|
||||
${csudo} mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo} init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
# Install taosd service
|
||||
|
||||
if ((${os_type}==1)); then
|
||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type}==2)); then
|
||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo} chkconfig --add tarbitratord || :
|
||||
${csudo} chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo} insserv tarbitratord || :
|
||||
${csudo} insserv -d tarbitratord || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo} update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
# taos:2345:respawn:/etc/init.d/tarbitratord start
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
if ((${service_mod}==0)); then
|
||||
${csudo} systemctl stop tarbitratord || :
|
||||
elif ((${service_mod}==1)); then
|
||||
${csudo} service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
|
||||
echo
|
||||
#echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
echo
|
||||
#echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/TDengine-enterprise-arbitrator"
|
||||
else
|
||||
install_dir="${release_dir}/TDengine-arbitrator"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
|
||||
install_files="${script_dir}/install_arbi.sh"
|
||||
|
||||
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
pkg_name=${pkg_name}-${verType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
|
|||
strip ${build_dir}/bin/taos
|
||||
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
|
||||
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
|
|
|
@ -69,11 +69,12 @@ function kill_tarbitrator() {
|
|||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
|
@ -86,7 +87,7 @@ function clean_lib() {
|
|||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
|
@ -148,15 +149,27 @@ function clean_service_on_sysvinit() {
|
|||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/taosd ]; then
|
||||
${csudo} chkconfig --del taosd || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/taosd ]; then
|
||||
${csudo} insserv -r taosd || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/taosd ]; then
|
||||
${csudo} update-rc.d -f taosd remove || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/taosd || :
|
||||
|
@ -196,13 +209,20 @@ ${csudo} rm -rf ${data_link_dir} || :
|
|||
|
||||
${csudo} rm -rf ${install_main_dir}
|
||||
${csudo} rm -rf ${install_nginxd_dir}
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
|
||||
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if echo $osinfo | grep -qwi "ubuntu" ; then
|
||||
# echo "this is ubuntu system"
|
||||
${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
|
||||
elif echo $osinfo | grep -qwi "debian" ; then
|
||||
# echo "this is debian system"
|
||||
${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
|
||||
elif echo $osinfo | grep -qwi "centos" ; then
|
||||
echo "this is centos system"
|
||||
# echo "this is centos system"
|
||||
${csudo} rpm -e --noscripts tdengine || :
|
||||
fi
|
||||
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo} rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "TDengine's tarbitrator is running, stopping it..."
|
||||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo} init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo} rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
|
|
@ -37,7 +37,7 @@ function kill_client() {
|
|||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosump || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
}
|
||||
|
||||
|
|
|
@ -35,12 +35,14 @@ IF (TD_LINUX)
|
|||
ELSEIF (TD_WINDOWS)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32)
|
||||
|
||||
|
||||
CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/client/src/taos.rc.in" "${TD_COMMUNITY_DIR}/src/client/src/taos.rc")
|
||||
|
||||
ADD_LIBRARY(taos_static STATIC ${SRC})
|
||||
TARGET_LINK_LIBRARIES(taos_static trpc tutil query)
|
||||
|
||||
# generate dynamic library (*.dll)
|
||||
ADD_LIBRARY(taos SHARED ${SRC})
|
||||
ADD_LIBRARY(taos SHARED ${SRC} ${TD_COMMUNITY_DIR}/src/client/src/taos.rc)
|
||||
IF (NOT TD_GODLL)
|
||||
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
|
||||
ENDIF ()
|
||||
|
|
|
@ -87,7 +87,6 @@ typedef struct SRetrieveSupport {
|
|||
SSqlObj * pParentSql;
|
||||
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
|
||||
uint32_t numOfRetry; // record the number of retry times
|
||||
pthread_mutex_t queryMutex;
|
||||
} SRetrieveSupport;
|
||||
|
||||
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
|
||||
|
|
|
@ -108,7 +108,7 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
|||
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
|
||||
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
|
||||
|
||||
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
|
||||
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
|
||||
uint32_t offset);
|
||||
|
||||
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
|
||||
|
@ -138,10 +138,10 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
|
|||
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
bool tscQueryTags(SQueryInfo* pQueryInfo);
|
||||
|
||||
void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
|
||||
SSchema* pColSchema, int16_t colType);
|
||||
SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
|
||||
SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
|
||||
|
||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql);
|
||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql);
|
||||
void tscClearInterpInfo(SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscIsInsertData(char* sqlstr);
|
||||
|
@ -194,11 +194,11 @@ SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
|
|||
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
|
||||
void tscColumnListDestroy(SArray* pColList);
|
||||
|
||||
int32_t tscValidateName(SSQLToken* pToken);
|
||||
int32_t tscValidateName(SStrToken* pToken);
|
||||
|
||||
void tscIncStreamExecutionCount(void* pStream);
|
||||
|
||||
bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId);
|
||||
bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams);
|
||||
|
||||
// get starter position of metric query condition (query on tags) in SSqlCmd.payload
|
||||
SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
|
||||
|
@ -217,7 +217,7 @@ STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex
|
|||
STableMetaInfo* tscGetMetaInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||
|
||||
SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
|
||||
int32_t tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo);
|
||||
SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
|
||||
|
||||
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache);
|
||||
|
||||
|
|
|
@ -35,6 +35,40 @@ extern "C" {
|
|||
#include "qTsbuf.h"
|
||||
#include "tcmdtype.h"
|
||||
|
||||
#if 0
|
||||
static UNUSED_FUNC void *u_malloc (size_t __size) {
|
||||
uint32_t v = rand();
|
||||
|
||||
if (v % 5000 <= 0) {
|
||||
return NULL;
|
||||
} else {
|
||||
return malloc(__size);
|
||||
}
|
||||
}
|
||||
|
||||
static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) {
|
||||
uint32_t v = rand();
|
||||
if (v % 5000 <= 0) {
|
||||
return NULL;
|
||||
} else {
|
||||
return calloc(num, __size);
|
||||
}
|
||||
}
|
||||
|
||||
static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
|
||||
uint32_t v = rand();
|
||||
if (v % 5000 <= 0) {
|
||||
return NULL;
|
||||
} else {
|
||||
return realloc(p, __size);
|
||||
}
|
||||
}
|
||||
|
||||
#define calloc u_calloc
|
||||
#define malloc u_malloc
|
||||
#define realloc u_realloc
|
||||
#endif
|
||||
|
||||
// forward declaration
|
||||
struct SSqlInfo;
|
||||
struct SLocalReducer;
|
||||
|
@ -195,9 +229,9 @@ typedef struct STableDataBlocks {
|
|||
|
||||
typedef struct SQueryInfo {
|
||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||
uint32_t type; // query/insert/import type
|
||||
uint32_t type; // query/insert type
|
||||
char slidingTimeUnit;
|
||||
STimeWindow window;
|
||||
STimeWindow window; // query time window
|
||||
int64_t intervalTime; // aggregation time interval
|
||||
int64_t slidingTime; // sliding window in mseconds
|
||||
SSqlGroupbyExpr groupbyExpr; // group by tags info
|
||||
|
@ -216,6 +250,7 @@ typedef struct SQueryInfo {
|
|||
char * msg; // pointer to the pCmd->payload to keep error message temporarily
|
||||
int64_t clauseLimit; // limit for current sub clause
|
||||
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
|
||||
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
|
||||
} SQueryInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -234,7 +269,7 @@ typedef struct {
|
|||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
int8_t parseFinished;
|
||||
|
||||
short numOfCols;
|
||||
int16_t numOfCols;
|
||||
uint32_t allocSize;
|
||||
char * payload;
|
||||
int32_t payloadLen;
|
||||
|
@ -431,31 +466,36 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
|||
int32_t bytes = pInfo->pSqlExpr->resBytes;
|
||||
|
||||
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
int32_t realLen = varDataLen(pData);
|
||||
assert(realLen <= bytes - VARSTR_HEADER_SIZE);
|
||||
|
||||
if (isNull(pData, type)) {
|
||||
pRes->tsrow[columnIndex] = NULL;
|
||||
// user defined constant value output columns
|
||||
if (pInfo->pSqlExpr->colInfo.flag == TSDB_COL_UDC) {
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
pData = pInfo->pSqlExpr->param[1].pz;
|
||||
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
|
||||
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : pData;
|
||||
} else {
|
||||
pRes->tsrow[columnIndex] = ((tstr*)pData)->data;
|
||||
}
|
||||
assert(bytes == tDataTypeDesc[type].nSize);
|
||||
|
||||
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
|
||||
*(pData + realLen + VARSTR_HEADER_SIZE) = 0;
|
||||
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : &pInfo->pSqlExpr->param[1].i64Key;
|
||||
pRes->length[columnIndex] = bytes;
|
||||
}
|
||||
|
||||
pRes->length[columnIndex] = realLen;
|
||||
} else {
|
||||
assert(bytes == tDataTypeDesc[type].nSize);
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
int32_t realLen = varDataLen(pData);
|
||||
assert(realLen <= bytes - VARSTR_HEADER_SIZE);
|
||||
|
||||
if (isNull(pData, type)) {
|
||||
pRes->tsrow[columnIndex] = NULL;
|
||||
pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : ((tstr *)pData)->data;
|
||||
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
|
||||
*(pData + realLen + VARSTR_HEADER_SIZE) = 0;
|
||||
}
|
||||
|
||||
pRes->length[columnIndex] = realLen;
|
||||
} else {
|
||||
pRes->tsrow[columnIndex] = pData;
|
||||
}
|
||||
assert(bytes == tDataTypeDesc[type].nSize);
|
||||
|
||||
pRes->length[columnIndex] = bytes;
|
||||
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : pData;
|
||||
pRes->length[columnIndex] = bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -89,6 +89,14 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp
|
|||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
|
||||
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
* Method: isUpdateQueryImp
|
||||
* Signature: (J)J
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
|
||||
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
* Method: freeResultSetImp
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
1 VERSIONINFO
|
||||
FILEVERSION ${TD_VER_NUMBER}
|
||||
PRODUCTVERSION ${TD_VER_NUMBER}
|
||||
FILEFLAGSMASK 0x17L
|
||||
#ifdef _DEBUG
|
||||
FILEFLAGS 0x1L
|
||||
#else
|
||||
FILEFLAGS 0x0L
|
||||
#endif
|
||||
FILEOS 0x4L
|
||||
FILETYPE 0x0L
|
||||
FILESUBTYPE 0x0L
|
||||
BEGIN
|
||||
BLOCK "StringFileInfo"
|
||||
BEGIN
|
||||
BLOCK "040904b0"
|
||||
BEGIN
|
||||
VALUE "FileDescription", "Native C Driver for TDengine"
|
||||
VALUE "FileVersion", "${TD_VER_NUMBER}"
|
||||
VALUE "InternalName", "taos.dll(${TD_VER_CPUTYPE})"
|
||||
VALUE "LegalCopyright", "Copyright (C) 2020 TAOS Data"
|
||||
VALUE "OriginalFilename", ""
|
||||
VALUE "ProductName", "taos.dll(${TD_VER_CPUTYPE})"
|
||||
VALUE "ProductVersion", "${TD_VER_NUMBER}"
|
||||
END
|
||||
END
|
||||
BLOCK "VarFileInfo"
|
||||
BEGIN
|
||||
VALUE "Translation", 0x409, 1200
|
||||
END
|
||||
END
|
|
@ -433,7 +433,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
tscError("%p get tableMeta failed, code:%s", pSql, tstrerror(code));
|
||||
goto _error;
|
||||
} else {
|
||||
tscDebug("%p get tableMeta successfully", pSql);
|
||||
const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
|
||||
tscDebug("%p get %s successfully", pSql, msg);
|
||||
}
|
||||
|
||||
if (pSql->pStream == NULL) {
|
||||
|
|
|
@ -56,7 +56,8 @@
|
|||
for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \
|
||||
SQLFunctionCtx *__ctx = (ctx)->tagInfo.pTagCtxList[i]; \
|
||||
if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { \
|
||||
__ctx->tag = (tVariant){.i64Key = (ts), .nType = TSDB_DATA_TYPE_BIGINT}; \
|
||||
__ctx->tag.i64Key = (ts); \
|
||||
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \
|
||||
} \
|
||||
aAggs[TSDB_FUNC_TAG].xFunction(__ctx); \
|
||||
} \
|
||||
|
@ -963,7 +964,8 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
|
|||
for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) {
|
||||
SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[i];
|
||||
if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
__ctx->tag = (tVariant){.i64Key = key, .nType = TSDB_DATA_TYPE_BIGINT};
|
||||
__ctx->tag.i64Key = key;
|
||||
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
}
|
||||
|
||||
aAggs[TSDB_FUNC_TAG].xFunction(__ctx);
|
||||
|
@ -1811,23 +1813,19 @@ static void last_dist_func_second_merge(SQLFunctionCtx *pCtx) {
|
|||
* NOTE: last_row does not use the interResultBuf to keep the result
|
||||
*/
|
||||
static void last_row_function(SQLFunctionCtx *pCtx) {
|
||||
assert(pCtx->size == 1);
|
||||
|
||||
assert(pCtx->size >= 1);
|
||||
char *pData = GET_INPUT_CHAR(pCtx);
|
||||
assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType);
|
||||
|
||||
// assign the last element in current data block
|
||||
assignVal(pCtx->aOutputBuf, pData + (pCtx->size - 1) * pCtx->inputBytes, pCtx->inputBytes, pCtx->inputType);
|
||||
|
||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
pResInfo->hasResult = DATA_SET_FLAG;
|
||||
|
||||
SLastrowInfo *pInfo = (SLastrowInfo *)pResInfo->interResultBuf;
|
||||
pInfo->ts = pCtx->ptsList[0];
|
||||
|
||||
pInfo->hasResult = DATA_SET_FLAG;
|
||||
|
||||
// set the result to final result buffer
|
||||
// set the result to final result buffer in case of super table query
|
||||
if (pResInfo->superTableQ) {
|
||||
SLastrowInfo *pInfo1 = (SLastrowInfo *)(pCtx->aOutputBuf + pCtx->inputBytes);
|
||||
pInfo1->ts = pCtx->ptsList[0];
|
||||
pInfo1->ts = pCtx->ptsList[pCtx->size - 1];
|
||||
pInfo1->hasResult = DATA_SET_FLAG;
|
||||
|
||||
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo1->ts);
|
||||
|
@ -1867,7 +1865,8 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6
|
|||
for (int32_t i = 0; i < pTagInfo->numOfTagCols; ++i) {
|
||||
SQLFunctionCtx* ctx = pTagInfo->pTagCtxList[i];
|
||||
if (ctx->functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
ctx->tag = (tVariant) {.nType = TSDB_DATA_TYPE_BIGINT, .i64Key = tsKey};
|
||||
ctx->tag.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
ctx->tag.i64Key = tsKey;
|
||||
}
|
||||
|
||||
tVariantDump(&ctx->tag, dst->pTags + size, ctx->tag.nType, true);
|
||||
|
@ -2035,7 +2034,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
|
|||
tValuePair **tvp = pRes->res;
|
||||
|
||||
int32_t step = QUERY_ASC_FORWARD_STEP;
|
||||
int32_t len = GET_RES_INFO(pCtx)->numOfRes;
|
||||
int32_t len = (int32_t)(GET_RES_INFO(pCtx)->numOfRes);
|
||||
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
|
@ -2409,10 +2408,10 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
|
|||
// user specify the order of output by sort the result according to timestamp
|
||||
if (pCtx->param[1].i64Key == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
__compar_fn_t comparator = (pCtx->param[2].i64Key == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
|
||||
qsort(tvp, pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||
} else if (pCtx->param[1].i64Key > PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
__compar_fn_t comparator = (pCtx->param[2].i64Key == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
|
||||
qsort(tvp, pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||
}
|
||||
|
||||
GET_TRUE_DATA_TYPE();
|
||||
|
@ -2906,33 +2905,41 @@ static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_
|
|||
}
|
||||
|
||||
static void col_project_function(SQLFunctionCtx *pCtx) {
|
||||
// the number of output rows should not affect the final number of rows, so set it to be 0
|
||||
if (pCtx->numOfParams == 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
INC_INIT_VAL(pCtx, pCtx->size);
|
||||
|
||||
|
||||
char *pData = GET_INPUT_CHAR(pCtx);
|
||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
||||
memcpy(pCtx->aOutputBuf, pData, (size_t)pCtx->size * pCtx->inputBytes);
|
||||
memcpy(pCtx->aOutputBuf, pData, (size_t) pCtx->size * pCtx->inputBytes);
|
||||
} else {
|
||||
for(int32_t i = 0; i < pCtx->size; ++i) {
|
||||
memcpy(pCtx->aOutputBuf + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes,
|
||||
pCtx->inputBytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes;
|
||||
}
|
||||
|
||||
static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
|
||||
if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0
|
||||
return;
|
||||
}
|
||||
|
||||
// only one output
|
||||
if (pCtx->param[0].i64Key == 1 && pResInfo->numOfRes >= 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
INC_INIT_VAL(pCtx, 1);
|
||||
char *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
|
||||
memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes);
|
||||
|
||||
|
||||
pCtx->aOutputBuf += pCtx->inputBytes;
|
||||
}
|
||||
|
||||
|
@ -3900,11 +3907,11 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) {
|
|||
|
||||
// primary ts must be existed, so no need to check its existance
|
||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
||||
tsBufAppend(pTSbuf, 0, pCtx->tag.i64Key, input, pCtx->size * TSDB_KEYSIZE);
|
||||
tsBufAppend(pTSbuf, 0, &pCtx->tag, input, pCtx->size * TSDB_KEYSIZE);
|
||||
} else {
|
||||
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
|
||||
char *d = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||
tsBufAppend(pTSbuf, 0, pCtx->tag.i64Key, d, TSDB_KEYSIZE);
|
||||
tsBufAppend(pTSbuf, 0, &pCtx->tag, d, TSDB_KEYSIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3923,7 +3930,7 @@ static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
|
||||
STSBuf *pTSbuf = pInfo->pTSBuf;
|
||||
|
||||
tsBufAppend(pTSbuf, 0, pCtx->tag.i64Key, pData, TSDB_KEYSIZE);
|
||||
tsBufAppend(pTSbuf, 0, &pCtx->tag, pData, TSDB_KEYSIZE);
|
||||
SET_VAL(pCtx, pCtx->size, 1);
|
||||
|
||||
pResInfo->hasResult = DATA_SET_FLAG;
|
||||
|
|
|
@ -370,7 +370,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
||||
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
|
||||
int64_t revisedSTime =
|
||||
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
|
||||
|
||||
|
@ -843,28 +843,6 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
|
|||
tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1);
|
||||
}
|
||||
|
||||
static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRes *pRes, tFilePage **pResPages,
|
||||
SLocalReducer *pLocalReducer) {
|
||||
assert(0);
|
||||
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
|
||||
assert(offset == getColumnModelOffset(pLocalReducer->resColModel, i));
|
||||
|
||||
char *src = pResPages[i]->data + (pRes->numOfRows - 1) * pField->bytes;
|
||||
char *dst = pRes->data + pRes->numOfRows * offset;
|
||||
|
||||
for (int32_t j = 0; j < pRes->numOfRows; ++j) {
|
||||
memcpy(dst, src, (size_t)pField->bytes);
|
||||
dst += pField->bytes;
|
||||
src -= pField->bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
|
||||
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
|
||||
|
||||
|
@ -907,7 +885,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
|
|||
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
|
||||
}
|
||||
|
||||
memcpy(pRes->data, pBeforeFillData->data, pRes->numOfRows * pLocalReducer->finalRowSize);
|
||||
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalRowSize));
|
||||
|
||||
pRes->numOfClauseTotal += pRes->numOfRows;
|
||||
pBeforeFillData->num = 0;
|
||||
|
@ -925,7 +903,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
|
||||
|
||||
int64_t actualETime = MAX(pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
||||
// todo extract function
|
||||
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
|
||||
|
||||
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
|
@ -943,7 +922,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
|
|||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset,
|
||||
newRows * pField->bytes);
|
||||
(size_t)(newRows * pField->bytes));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -984,14 +963,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
|
|||
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
|
||||
}
|
||||
|
||||
if (pQueryInfo->order.order == TSDB_ORDER_ASC) {
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
|
||||
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, pField->bytes * pRes->numOfRows);
|
||||
}
|
||||
} else { // todo bug??
|
||||
reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer);
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
|
||||
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
|
||||
}
|
||||
|
||||
pRes->numOfRowsGroup += pRes->numOfRows;
|
||||
|
@ -1248,8 +1223,6 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
|
|||
printf("final result before interpo:\n");
|
||||
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// no interval query, no fill operation
|
||||
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
|
||||
|
@ -1257,7 +1230,9 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
|
|||
} else {
|
||||
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
|
||||
if (pFillInfo != NULL) {
|
||||
taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, pQueryInfo->window.ekey);
|
||||
TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
|
||||
|
||||
taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, ekey);
|
||||
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
|
||||
}
|
||||
|
||||
|
@ -1292,7 +1267,7 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
|
|||
|
||||
// for group result interpolation, do not return if not data is generated
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
||||
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
||||
int64_t newTime =
|
||||
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
|
||||
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
|
||||
|
@ -1345,7 +1320,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
|
|||
// if fillType == TSDB_FILL_NONE, return directly
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE &&
|
||||
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
|
||||
int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey;
|
||||
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
|
||||
|
||||
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
|
||||
if (rows > 0) {
|
||||
|
@ -1402,13 +1377,12 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
tscResetForNextRetrieve(pRes);
|
||||
|
||||
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
|
||||
tscDebug("%p %s call the drop local reducer", pSql, __FUNCTION__);
|
||||
tscDestroyLocalReducer(pSql);
|
||||
return 0;
|
||||
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
|
||||
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
// set the data merge in progress
|
||||
int32_t prevStatus =
|
||||
|
@ -1503,8 +1477,8 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
|
|||
* so the processing of previous group is completed.
|
||||
*/
|
||||
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
|
||||
bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
|
||||
|
||||
bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
|
||||
tFilePage *pResBuf = pLocalReducer->pResultBuf;
|
||||
|
||||
/*
|
||||
|
|
|
@ -40,7 +40,7 @@ enum {
|
|||
|
||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
|
||||
|
||||
static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
|
||||
static int32_t tscToInteger(SStrToken *pToken, int64_t *value, char **endPtr) {
|
||||
if (pToken->n == 0) {
|
||||
return TK_ILLEGAL;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
|
|||
return pToken->type;
|
||||
}
|
||||
|
||||
static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
|
||||
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
|
||||
if (pToken->n == 0) {
|
||||
return TK_ILLEGAL;
|
||||
}
|
||||
|
@ -89,9 +89,9 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
|
|||
return pToken->type;
|
||||
}
|
||||
|
||||
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
|
||||
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
|
||||
int32_t index = 0;
|
||||
SSQLToken sToken;
|
||||
SStrToken sToken;
|
||||
int64_t interval;
|
||||
int64_t useconds = 0;
|
||||
char * pTokenEnd = *next;
|
||||
|
@ -128,7 +128,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
|
|||
* time expression:
|
||||
* e.g., now+12a, now-5h
|
||||
*/
|
||||
SSQLToken valueToken;
|
||||
SStrToken valueToken;
|
||||
index = 0;
|
||||
sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
|
||||
pTokenEnd += index;
|
||||
|
@ -163,7 +163,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
|
||||
int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
|
||||
int16_t timePrec) {
|
||||
int64_t iv;
|
||||
int32_t numType;
|
||||
|
@ -409,7 +409,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
|
|||
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
|
||||
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
|
||||
int32_t index = 0;
|
||||
SSQLToken sToken = {0};
|
||||
SStrToken sToken = {0};
|
||||
char * payload = pDataBlocks->pData + pDataBlocks->size;
|
||||
|
||||
// 1. set the parsed value from sql string
|
||||
|
@ -524,7 +524,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
|
|||
int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows,
|
||||
SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) {
|
||||
int32_t index = 0;
|
||||
SSQLToken sToken;
|
||||
SStrToken sToken;
|
||||
|
||||
int16_t numOfRows = 0;
|
||||
|
||||
|
@ -734,8 +734,8 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
|
|||
|
||||
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||
int32_t index = 0;
|
||||
SSQLToken sToken = {0};
|
||||
SSQLToken tableToken = {0};
|
||||
SStrToken sToken = {0};
|
||||
SStrToken tableToken = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
const int32_t TABLE_INDEX = 0;
|
||||
|
@ -993,7 +993,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int validateTableName(char *tblName, int len, SSQLToken* psTblToken) {
|
||||
int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
|
||||
tstrncpy(psTblToken->z, tblName, TSDB_TABLE_FNAME_LEN);
|
||||
|
||||
psTblToken->n = len;
|
||||
|
@ -1031,11 +1031,11 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
assert(pQueryInfo != NULL);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = NULL;
|
||||
if (pQueryInfo->numOfTables == 0) {
|
||||
pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
|
||||
} else {
|
||||
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMetaInfo *pTableMetaInfo = (pQueryInfo->numOfTables == 0)? tscAddEmptyMetaInfo(pQueryInfo):tscGetMetaInfo(pQueryInfo, 0);
|
||||
if (pTableMetaInfo == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
if ((code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1057,7 +1057,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
|
||||
while (1) {
|
||||
int32_t index = 0;
|
||||
SSQLToken sToken = tStrGetToken(str, &index, false, 0, NULL);
|
||||
SStrToken sToken = tStrGetToken(str, &index, false, 0, NULL);
|
||||
|
||||
// no data in the sql string anymore.
|
||||
if (sToken.n == 0) {
|
||||
|
@ -1083,7 +1083,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
|
||||
pCmd->curSql = sToken.z;
|
||||
char buf[TSDB_TABLE_FNAME_LEN];
|
||||
SSQLToken sTblToken;
|
||||
SStrToken sTblToken;
|
||||
sTblToken.z = buf;
|
||||
// Check if the table name available or not
|
||||
if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1285,15 +1285,14 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
|
|||
int32_t index = 0;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
SSQLToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
|
||||
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
|
||||
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
|
||||
|
||||
pCmd->count = 0;
|
||||
pCmd->command = TSDB_SQL_INSERT;
|
||||
pSql->res.numOfRows = 0;
|
||||
|
||||
SQueryInfo *pQueryInfo = NULL;
|
||||
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
|
||||
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
|
|||
uint32_t i = 0, start = 0;
|
||||
|
||||
while (sql[i] != 0) {
|
||||
SSQLToken token = {0};
|
||||
SStrToken token = {0};
|
||||
token.n = tSQLGetToken(sql + i, &token.type);
|
||||
|
||||
if (token.type == TK_QUESTION) {
|
||||
|
|
|
@ -285,9 +285,9 @@ void tscKillConnection(STscObj *pObj) {
|
|||
|
||||
SSqlObj *pSql = pObj->sqlList;
|
||||
while (pSql) {
|
||||
//taosStopRpcConn(pSql->thandle);
|
||||
pSql = pSql->next;
|
||||
}
|
||||
|
||||
|
||||
SSqlStream *pStream = pObj->streamList;
|
||||
while (pStream) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -43,6 +43,14 @@ void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts);
|
|||
void tscSaveSubscriptionProgress(void* sub);
|
||||
|
||||
static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
|
||||
static int32_t getWaitingTimeInterval(int32_t count) {
|
||||
int32_t initial = 100; // 100 ms by default
|
||||
if (count <= 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return initial * (2<<(count - 2));
|
||||
}
|
||||
|
||||
static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
|
||||
assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
|
||||
|
@ -117,7 +125,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
|
|||
pVgroupInfo->inUse = pEpSet->inUse;
|
||||
pVgroupInfo->numOfEps = pEpSet->numOfEps;
|
||||
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
|
||||
tstrncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], sizeof(pEpSet->fqdn[i]));
|
||||
tstrncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
|
||||
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
|
||||
}
|
||||
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
|
||||
|
@ -218,13 +226,17 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
.handle = &pSql->pRpcCtx,
|
||||
.code = 0
|
||||
};
|
||||
|
||||
// NOTE: the rpc context should be acquired before sending data to server.
|
||||
// Otherwise, the pSql object may have been released already during the response function, which is
|
||||
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
|
||||
// cause crash.
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
if (pObj != NULL && pObj->signature == pObj) {
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
//pObj->signature has been reset by other thread, ignore concurrency problem
|
||||
return TSDB_CODE_TSC_CONN_KILLED;
|
||||
}
|
||||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||
|
@ -275,6 +287,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
(rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
|
||||
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
|
||||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
|
||||
rpcMsg->code == TSDB_CODE_APP_NOT_READY ||
|
||||
rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE)) {
|
||||
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
|
||||
|
||||
|
@ -287,6 +300,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
if (pSql->retry > pSql->maxRetry) {
|
||||
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
|
||||
} else {
|
||||
// wait for a little bit moment and then retry
|
||||
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
||||
int32_t duration = getWaitingTimeInterval(pSql->retry);
|
||||
taosMsleep(duration);
|
||||
}
|
||||
|
||||
rpcMsg->code = tscRenewTableMeta(pSql, pTableMetaInfo->name);
|
||||
|
||||
// if there is an error occurring, proceed to the following error handling procedure.
|
||||
|
@ -299,10 +318,10 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
|
||||
pRes->rspLen = 0;
|
||||
|
||||
if (pRes->code != TSDB_CODE_TSC_QUERY_CANCELLED) {
|
||||
pRes->code = (rpcMsg->code != TSDB_CODE_SUCCESS) ? rpcMsg->code : TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||
} else {
|
||||
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
|
||||
tscDebug("%p query is cancelled, code:%s", pSql, tstrerror(pRes->code));
|
||||
} else {
|
||||
pRes->code = rpcMsg->code;
|
||||
}
|
||||
|
||||
if (pRes->code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -439,35 +458,21 @@ void tscKillSTableQuery(SSqlObj *pSql) {
|
|||
return;
|
||||
}
|
||||
|
||||
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
|
||||
for (int i = 0; i < pSql->numOfSubs; ++i) {
|
||||
// NOTE: pSub may have been released already here
|
||||
SSqlObj *pSub = pSql->pSubs[i];
|
||||
if (pSub == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause
|
||||
* sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
|
||||
*/
|
||||
rpcCancelRequest(pSub->pRpcCtx);
|
||||
pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
tscQueueAsyncRes(pSub);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. if the subqueries are not launched or partially launched, we need to waiting the launched
|
||||
* query return to successfully free allocated resources.
|
||||
* 2. if no any subqueries are launched yet, which means the super table query only in parse sql stage,
|
||||
* set the res.code, and return.
|
||||
*/
|
||||
const int64_t MAX_WAITING_TIME = 10000; // 10 Sec.
|
||||
int64_t stime = taosGetTimestampMs();
|
||||
|
||||
while (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
|
||||
taosMsleep(100);
|
||||
if (taosGetTimestampMs() - stime > MAX_WAITING_TIME) {
|
||||
break;
|
||||
if (pSub->pRpcCtx != NULL) {
|
||||
rpcCancelRequest(pSub->pRpcCtx);
|
||||
}
|
||||
|
||||
tscQueueAsyncRes(pSub); // async res? not other functions?
|
||||
}
|
||||
|
||||
tscDebug("%p super table query cancelled", pSql);
|
||||
|
@ -623,26 +628,29 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||
tscError("%p failed to malloc for query msg", pSql);
|
||||
return -1; // todo add test for this
|
||||
return TSDB_CODE_TSC_INVALID_SQL; // todo add test for this
|
||||
}
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
if (taosArrayGetSize(pQueryInfo->colList) <= 0 && !tscQueryTags(pQueryInfo)) {
|
||||
tscError("%p illegal value of numOfCols in query msg: %d", pSql, tscGetNumOfColumns(pTableMeta));
|
||||
return -1;
|
||||
|
||||
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
|
||||
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
|
||||
tscError("%p illegal value of numOfCols in query msg: %"PRIu64", table cols:%d", pSql, numOfSrcCols,
|
||||
tscGetNumOfColumns(pTableMeta));
|
||||
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
if (pQueryInfo->intervalTime < 0) {
|
||||
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols < 0) {
|
||||
tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pQueryInfo->groupbyExpr.numOfGroupCols);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
|
||||
|
@ -708,7 +716,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
if (pColFilter->filterstr) {
|
||||
pFilterMsg->len = htobe64(pColFilter->len);
|
||||
memcpy(pMsg, (void *)pColFilter->pz, pColFilter->len + 1);
|
||||
memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
|
||||
pMsg += (pColFilter->len + 1); // append the additional filter binary info
|
||||
} else {
|
||||
pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
|
||||
|
@ -720,7 +728,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
|
||||
tscError("invalid filter info");
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -729,10 +737,10 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) {
|
||||
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
|
||||
if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId)) {
|
||||
if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
|
||||
/* column id is not valid according to the cached table meta, the table meta is expired */
|
||||
tscError("%p table schema is not matched with parsed sql", pSql);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId);
|
||||
|
@ -929,8 +937,8 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
SCMCreateAcctMsg *pAlterMsg = (SCMCreateAcctMsg *)pCmd->payload;
|
||||
|
||||
SSQLToken *pName = &pInfo->pDCLInfo->user.user;
|
||||
SSQLToken *pPwd = &pInfo->pDCLInfo->user.passwd;
|
||||
SStrToken *pName = &pInfo->pDCLInfo->user.user;
|
||||
SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
|
||||
|
||||
strncpy(pAlterMsg->user, pName->z, pName->n);
|
||||
strncpy(pAlterMsg->pass, pPwd->z, pPwd->n);
|
||||
|
@ -1132,13 +1140,13 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pShowMsg->type = pShowInfo->showType;
|
||||
|
||||
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
|
||||
SSQLToken *pPattern = &pShowInfo->pattern;
|
||||
SStrToken *pPattern = &pShowInfo->pattern;
|
||||
if (pPattern->type > 0) { // only show tables support wildcard query
|
||||
strncpy(pShowMsg->payload, pPattern->z, pPattern->n);
|
||||
pShowMsg->payloadLen = htons(pPattern->n);
|
||||
}
|
||||
} else {
|
||||
SSQLToken *pEpAddr = &pShowInfo->prefix;
|
||||
SStrToken *pEpAddr = &pShowInfo->prefix;
|
||||
assert(pEpAddr->n > 0 && pEpAddr->type > 0);
|
||||
|
||||
strncpy(pShowMsg->payload, pEpAddr->z, pEpAddr->n);
|
||||
|
@ -1280,7 +1288,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
int size = tscEstimateAlterTableMsgLength(pCmd);
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||
tscError("%p failed to malloc for alter table msg", pSql);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SCMAlterTableMsg *pAlterTableMsg = (SCMAlterTableMsg *)pCmd->payload;
|
||||
|
@ -1428,6 +1436,12 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
int32_t code = pRes->code;
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
tscQueueAsyncRes(pSql);
|
||||
return code;
|
||||
}
|
||||
|
||||
pRes->code = tscDoLocalMerge(pSql);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
|
@ -1438,7 +1452,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
|
|||
pRes->row = 0;
|
||||
pRes->completed = (pRes->numOfRows == 0);
|
||||
|
||||
int32_t code = pRes->code;
|
||||
code = pRes->code;
|
||||
if (pRes->code == TSDB_CODE_SUCCESS) {
|
||||
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
|
||||
} else {
|
||||
|
@ -1632,7 +1646,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||
pthread_mutex_unlock(&pObj->mutex);
|
||||
tscError("%p failed to malloc for heartbeat msg", pSql);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SCMHeartBeatMsg *pHeartbeat = (SCMHeartBeatMsg *)pCmd->payload;
|
||||
|
@ -1702,7 +1716,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
assert(pTableMetaInfo->pTableMeta == NULL);
|
||||
|
||||
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name,
|
||||
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer);
|
||||
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
|
||||
|
||||
// todo handle out of memory case
|
||||
if (pTableMetaInfo->pTableMeta == NULL) {
|
||||
|
@ -1906,7 +1920,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
|
|||
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
|
||||
|
||||
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, strlen(key), (char *)pTableMeta, size,
|
||||
tsTableMetaKeepTimer);
|
||||
tsTableMetaKeepTimer * 1000);
|
||||
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||
|
||||
if (pQueryInfo->colList == NULL) {
|
||||
|
@ -1946,8 +1960,12 @@ static void createHBObj(STscObj* pObj) {
|
|||
|
||||
pSql->fp = tscProcessHeartBeatRsp;
|
||||
|
||||
SQueryInfo *pQueryInfo = NULL;
|
||||
tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
|
||||
if (pQueryInfo == NULL) {
|
||||
pSql->res.code = terrno;
|
||||
return;
|
||||
}
|
||||
|
||||
pQueryInfo->command = TSDB_SQL_HB;
|
||||
|
||||
pSql->cmd.command = pQueryInfo->command;
|
||||
|
@ -2132,8 +2150,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
|
|||
|
||||
tscAddSubqueryInfo(&pNew->cmd);
|
||||
|
||||
SQueryInfo *pNewQueryInfo = NULL;
|
||||
tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo);
|
||||
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
|
||||
|
||||
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
|
||||
|
@ -2236,8 +2253,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
|
|||
|
||||
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
|
||||
|
||||
SQueryInfo *pNewQueryInfo = NULL;
|
||||
if ((code = tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo)) != TSDB_CODE_SUCCESS) {
|
||||
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
|
||||
if (pNewQueryInfo == NULL) {
|
||||
tscFreeSqlObj(pNew);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -201,7 +201,7 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
|
|||
}
|
||||
|
||||
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
|
||||
tscDebug("try to create a connection to %s:%u, user:%s db:%s", ip, port, user, db);
|
||||
tscDebug("try to create a connection to %s:%u, user:%s db:%s", ip, port != 0 ? port : tsServerPort , user, db);
|
||||
if (user == NULL) user = TSDB_DEFAULT_USER;
|
||||
if (pass == NULL) pass = TSDB_DEFAULT_PASS;
|
||||
|
||||
|
@ -655,27 +655,30 @@ int* taos_fetch_lengths(TAOS_RES *res) {
|
|||
char *taos_get_client_info() { return version; }
|
||||
|
||||
void taos_stop_query(TAOS_RES *res) {
|
||||
if (res == NULL) {
|
||||
SSqlObj *pSql = (SSqlObj *)res;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlObj *pSql = (SSqlObj *)res;
|
||||
tscDebug("%p start to cancel query", res);
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
if (pSql->signature != pSql) return;
|
||||
tscDebug("%p start to cancel query", res);
|
||||
|
||||
|
||||
// TODO there are multi-thread problem.
|
||||
// It may have been released by the other thread already.
|
||||
// The ref count may fix this problem.
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||
tscKillSTableQuery(pSql);
|
||||
}
|
||||
|
||||
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
|
||||
rpcCancelRequest(pSql->pRpcCtx);
|
||||
}
|
||||
// set the error code for master pSqlObj firstly
|
||||
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
tscQueueAsyncRes(pSql);
|
||||
|
||||
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||
assert(pSql->pRpcCtx == NULL);
|
||||
tscKillSTableQuery(pSql);
|
||||
} else {
|
||||
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
|
||||
rpcCancelRequest(pSql->pRpcCtx);
|
||||
}
|
||||
}
|
||||
|
||||
tscDebug("%p query is cancelled", res);
|
||||
}
|
||||
|
@ -824,8 +827,11 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
|
|||
int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||
char *str = (char *)tblNameList;
|
||||
|
||||
SQueryInfo *pQueryInfo = NULL;
|
||||
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
|
||||
if (pQueryInfo == NULL) {
|
||||
pSql->res.code = terrno;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
|
||||
|
||||
|
@ -850,7 +856,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
|
|||
str = nextStr + 1;
|
||||
len = (int32_t)strtrim(tblName);
|
||||
|
||||
SSQLToken sToken = {.n = len, .type = TK_ID, .z = tblName};
|
||||
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
|
||||
tSQLGetToken(tblName, &sToken.type);
|
||||
|
||||
// Check if the table name available or not
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include "os.h"
|
||||
|
||||
#include "qAst.h"
|
||||
|
@ -93,13 +95,14 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
|
|||
tscInfo("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
|
||||
#endif
|
||||
|
||||
if (elem1.tag < elem2.tag || (elem1.tag == elem2.tag && tsCompare(order, elem1.ts, elem2.ts))) {
|
||||
int32_t res = tVariantCompare(&elem1.tag, &elem2.tag);
|
||||
if (res == -1 || (res == 0 && tsCompare(order, elem1.ts, elem2.ts))) {
|
||||
if (!tsBufNextPos(pSupporter1->pTSBuf)) {
|
||||
break;
|
||||
}
|
||||
|
||||
numOfInput1++;
|
||||
} else if (elem1.tag > elem2.tag || (elem1.tag == elem2.tag && tsCompare(order, elem2.ts, elem1.ts))) {
|
||||
} else if ((res > 0) || (res == 0 && tsCompare(order, elem2.ts, elem1.ts))) {
|
||||
if (!tsBufNextPos(pSupporter2->pTSBuf)) {
|
||||
break;
|
||||
}
|
||||
|
@ -119,8 +122,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
|
|||
win->ekey = elem1.ts;
|
||||
}
|
||||
|
||||
tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
|
||||
tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
|
||||
tsBufAppend(output1, elem1.vnode, &elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
|
||||
tsBufAppend(output2, elem2.vnode, &elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
|
||||
} else {
|
||||
pLimit->offset -= 1;
|
||||
}
|
||||
|
@ -352,11 +355,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
pExpr = tscSqlExprGet(pQueryInfo, 0);
|
||||
}
|
||||
|
||||
// set the join condition tag column info, to do extract method
|
||||
// set the join condition tag column info, todo extract method
|
||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
assert(pQueryInfo->tagCond.joinInfo.hasJoin);
|
||||
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
|
||||
|
||||
// set the tag column id for executor to extract correct tag value
|
||||
pExpr->param[0].i64Key = colId;
|
||||
pExpr->numOfParams = 1;
|
||||
}
|
||||
|
@ -433,6 +437,7 @@ int32_t tscCompareTidTags(const void* p1, const void* p2) {
|
|||
if (t1->vgId != t2->vgId) {
|
||||
return (t1->vgId > t2->vgId) ? 1 : -1;
|
||||
}
|
||||
|
||||
if (t1->tid != t2->tid) {
|
||||
return (t1->tid > t2->tid) ? 1 : -1;
|
||||
}
|
||||
|
@ -539,6 +544,7 @@ static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1,
|
|||
for(int32_t i = 1; i < p1->num; ++i) {
|
||||
STidTags* prev = (STidTags*) varDataVal(p1->pIdTagList + (i - 1) * p1->tagSize);
|
||||
STidTags* p = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize);
|
||||
assert(prev->vgId >= 1 && p->vgId >= 1);
|
||||
|
||||
if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) {
|
||||
tscError("%p join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj);
|
||||
|
@ -575,6 +581,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
|
|||
while(i < p1->num && j < p2->num) {
|
||||
STidTags* pp1 = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize);
|
||||
STidTags* pp2 = (STidTags*) varDataVal(p2->pIdTagList + j * p2->tagSize);
|
||||
assert(pp1->tid != 0 && pp2->tid != 0);
|
||||
|
||||
int32_t ret = doCompare(pp1->tag, pp2->tag, pColSchema->type, pColSchema->bytes);
|
||||
if (ret == 0) {
|
||||
|
@ -623,7 +630,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
|
||||
// keep the results in memory
|
||||
if (numOfRows > 0) {
|
||||
size_t validLen = pSupporter->tagSize * pRes->numOfRows;
|
||||
size_t validLen = (size_t)(pSupporter->tagSize * pRes->numOfRows);
|
||||
size_t length = pSupporter->totalLen + validLen;
|
||||
|
||||
// todo handle memory error
|
||||
|
@ -684,6 +691,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
freeJoinSubqueryObj(pParentSql);
|
||||
pParentSql->res.code = code;
|
||||
tscQueueAsyncRes(pParentSql);
|
||||
|
||||
taosArrayDestroy(s1);
|
||||
taosArrayDestroy(s2);
|
||||
return;
|
||||
|
@ -748,7 +756,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
}
|
||||
|
||||
if (numOfRows > 0) { // write the compressed timestamp to disk file
|
||||
fwrite(pRes->data, pRes->numOfRows, 1, pSupporter->f);
|
||||
fwrite(pRes->data, (size_t)pRes->numOfRows, 1, pSupporter->f);
|
||||
fclose(pSupporter->f);
|
||||
pSupporter->f = NULL;
|
||||
|
||||
|
@ -1143,7 +1151,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
|||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code);
|
||||
|
||||
static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj);
|
||||
static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj);
|
||||
|
||||
int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
|
||||
SSqlCmd * pCmd = &pSql->cmd;
|
||||
|
@ -1216,6 +1224,16 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
|||
int32_t tagColId = tscGetJoinTagColIdByUid(pTagCond, pTableMetaInfo->pTableMeta->id.uid);
|
||||
SSchema* s = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
|
||||
|
||||
// get the tag colId column index
|
||||
int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
|
||||
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
|
||||
for(int32_t i = 0; i < numOfTags; ++i) {
|
||||
if (pSchema[i].colId == tagColId) {
|
||||
index.columnIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t bytes = 0;
|
||||
int16_t type = 0;
|
||||
int32_t inter = 0;
|
||||
|
@ -1285,8 +1303,14 @@ int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0);
|
||||
|
||||
|
||||
// todo add test
|
||||
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
|
||||
if (pState == NULL) {
|
||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return pSql->res.code;
|
||||
}
|
||||
|
||||
pState->numOfTotal = pQueryInfo->numOfTables;
|
||||
pState->numOfRemain = pState->numOfTotal;
|
||||
|
||||
|
@ -1300,7 +1324,7 @@ int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
if (0 == i) {
|
||||
taosTFree(pState);
|
||||
}
|
||||
}
|
||||
return pSql->res.code;
|
||||
}
|
||||
|
||||
|
@ -1330,10 +1354,6 @@ static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState
|
|||
SRetrieveSupport* pSupport = pSub->param;
|
||||
|
||||
taosTFree(pSupport->localBuffer);
|
||||
|
||||
pthread_mutex_unlock(&pSupport->queryMutex);
|
||||
pthread_mutex_destroy(&pSupport->queryMutex);
|
||||
|
||||
taosTFree(pSupport);
|
||||
|
||||
tscFreeSqlObj(pSub);
|
||||
|
@ -1406,14 +1426,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
trs->pParentSql = pSql;
|
||||
trs->pFinalColModel = pModel;
|
||||
|
||||
pthread_mutexattr_t mutexattr;
|
||||
memset(&mutexattr, 0, sizeof(pthread_mutexattr_t));
|
||||
|
||||
pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE_NP);
|
||||
pthread_mutex_init(&trs->queryMutex, &mutexattr);
|
||||
pthread_mutexattr_destroy(&mutexattr);
|
||||
|
||||
SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs, NULL);
|
||||
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
|
||||
if (pNew == NULL) {
|
||||
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
||||
taosTFree(trs->localBuffer);
|
||||
|
@ -1458,15 +1471,16 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
|
||||
tscDebug("%p start to free subquery result", pSql);
|
||||
|
||||
tscDebug("%p start to free subquery obj", pSql);
|
||||
|
||||
int32_t index = trsupport->subqueryIndex;
|
||||
SSqlObj *pParentSql = trsupport->pParentSql;
|
||||
|
||||
assert(pSql == pParentSql->pSubs[index]);
|
||||
pParentSql->pSubs[index] = NULL;
|
||||
|
||||
taos_free_result(pSql);
|
||||
|
||||
taosTFree(trsupport->localBuffer);
|
||||
|
||||
pthread_mutex_unlock(&trsupport->queryMutex);
|
||||
pthread_mutex_destroy(&trsupport->queryMutex);
|
||||
|
||||
taosTFree(trsupport);
|
||||
}
|
||||
|
||||
|
@ -1475,23 +1489,11 @@ static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, i
|
|||
|
||||
static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) {
|
||||
// set no disk space error info
|
||||
#ifdef WINDOWS
|
||||
LPVOID lpMsgBuf;
|
||||
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL,
|
||||
GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
|
||||
(LPTSTR)&lpMsgBuf, 0, NULL);
|
||||
tscError("sub:%p failed to flush data to disk:reason:%s", tres, lpMsgBuf);
|
||||
LocalFree(lpMsgBuf);
|
||||
#else
|
||||
tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code));
|
||||
#endif
|
||||
|
||||
SSqlObj* pParentSql = trsupport->pParentSql;
|
||||
|
||||
pParentSql->res.code = code;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
|
||||
pthread_mutex_unlock(&trsupport->queryMutex);
|
||||
tscHandleSubqueryError(trsupport, tres, pParentSql->res.code);
|
||||
}
|
||||
|
||||
|
@ -1510,13 +1512,10 @@ static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, in
|
|||
|
||||
// clear local saved number of results
|
||||
trsupport->localBuffer->num = 0;
|
||||
pthread_mutex_unlock(&trsupport->queryMutex);
|
||||
|
||||
tscTrace("%p sub:%p retrieve failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
|
||||
tscError("%p sub:%p retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
|
||||
tstrerror(code), subqueryIndex, trsupport->numOfRetry);
|
||||
|
||||
SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSql, trsupport, pSql);
|
||||
|
||||
SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
|
||||
if (pNew == NULL) {
|
||||
tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
|
||||
trsupport->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
@ -1527,8 +1526,15 @@ static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, in
|
|||
return pParentSql->res.code;
|
||||
}
|
||||
|
||||
taos_free_result(pSql);
|
||||
return tscProcessSql(pNew);
|
||||
int32_t ret = tscProcessSql(pNew);
|
||||
|
||||
// if failed to process sql, let following code handle the pSql
|
||||
if (ret == TSDB_CODE_SUCCESS) {
|
||||
taos_free_result(pSql);
|
||||
return ret;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) {
|
||||
|
@ -1548,14 +1554,14 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
*/
|
||||
pSql->res.numOfRows = 0;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts
|
||||
tscDebug("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", pParentSql, pSql,
|
||||
subqueryIndex, pParentSql->res.code);
|
||||
tscDebug("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%s", pParentSql, pSql,
|
||||
subqueryIndex, tstrerror(pParentSql->res.code));
|
||||
}
|
||||
|
||||
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
|
||||
tscDebug("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pParentSql, pSql, numOfRows, subqueryIndex);
|
||||
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pParentSql, pSql,
|
||||
subqueryIndex, pParentSql->res.code);
|
||||
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql, pSql,
|
||||
subqueryIndex, tstrerror(pParentSql->res.code));
|
||||
} else {
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) {
|
||||
|
@ -1587,10 +1593,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
|
||||
taosTFree(trsupport->pState);
|
||||
tscFreeSubSqlObj(trsupport, pSql);
|
||||
|
||||
|
||||
// in case of second stage join subquery, invoke its callback function instead of regular QueueAsyncRes
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
|
||||
|
||||
|
||||
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
|
||||
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
|
||||
} else { // regular super table query
|
||||
|
@ -1669,7 +1675,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
// only free once
|
||||
taosTFree(trsupport->pState);
|
||||
tscFreeSubSqlObj(trsupport, pSql);
|
||||
|
||||
|
||||
// set the command flag must be after the semaphore been correctly set.
|
||||
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE;
|
||||
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -1685,24 +1691,22 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
int32_t idx = trsupport->subqueryIndex;
|
||||
SSqlObj * pParentSql = trsupport->pParentSql;
|
||||
|
||||
assert(tres != NULL);
|
||||
SSqlObj *pSql = (SSqlObj *)tres;
|
||||
if (pSql == NULL) { // sql object has been released in error process, return immediately
|
||||
tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
|
||||
return;
|
||||
}
|
||||
|
||||
// if (pSql == NULL) { // sql object has been released in error process, return immediately
|
||||
// tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
|
||||
// return;
|
||||
// }
|
||||
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
|
||||
|
||||
// query process and cancel query process may execute at the same time
|
||||
pthread_mutex_lock(&trsupport->queryMutex);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
|
||||
SCMVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
tscTrace("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||
tscDebug("%p query cancelled/failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(numOfRows), tstrerror(pParentSql->res.code));
|
||||
|
||||
tscHandleSubqueryError(param, tres, numOfRows);
|
||||
|
@ -1713,13 +1717,13 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
assert(numOfRows == taos_errno(pSql));
|
||||
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||
tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
|
||||
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
|
||||
|
||||
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(numOfRows));
|
||||
tscDebug("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(numOfRows));
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows); // set global code and abort
|
||||
}
|
||||
|
||||
|
@ -1764,13 +1768,9 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
(int32_t)pRes->numOfRows, pQueryInfo->groupbyExpr.orderType);
|
||||
if (ret != 0) { // set no disk space error info, and abort retry
|
||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||
|
||||
} else if (pRes->completed) {
|
||||
tscAllDataRetrievedFromDnode(trsupport, pSql);
|
||||
return;
|
||||
|
||||
} else { // continue fetch data from dnode
|
||||
pthread_mutex_unlock(&trsupport->queryMutex);
|
||||
taos_fetch_rows_a(tres, tscRetrieveFromDnodeCallBack, param);
|
||||
}
|
||||
|
||||
|
@ -1779,15 +1779,15 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
}
|
||||
}
|
||||
|
||||
static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) {
|
||||
static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) {
|
||||
const int32_t table_index = 0;
|
||||
|
||||
SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, TSDB_SQL_SELECT, prevSqlObj);
|
||||
if (pNew != NULL) { // the sub query of two-stage super table query
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
|
||||
|
||||
pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY;
|
||||
|
||||
assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1);
|
||||
assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1 && trsupport->subqueryIndex < pSql->numOfSubs);
|
||||
|
||||
// launch subquery for each vnode, so the subquery index equals to the vgroupIndex.
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
|
||||
|
@ -1804,7 +1804,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
|
||||
SSqlObj* pParentSql = trsupport->pParentSql;
|
||||
SSqlObj* pSql = (SSqlObj *) tres;
|
||||
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
assert(pSql->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1);
|
||||
|
||||
|
@ -1814,7 +1814,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
// stable query killed or other subquery failed, all query stopped
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
tscTrace("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||
tscError("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
|
||||
|
||||
tscHandleSubqueryError(param, tres, code);
|
||||
|
@ -1832,12 +1832,12 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
assert(code == taos_errno(pSql));
|
||||
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||
tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
|
||||
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
|
||||
if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
|
||||
tscError("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
|
||||
}
|
||||
|
||||
|
@ -1845,7 +1845,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscTrace("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
||||
tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
||||
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
||||
if (pSql->res.qhandle == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
|
||||
|
@ -1924,8 +1924,14 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
|
||||
assert(size > 0);
|
||||
|
||||
pSql->pSubs = calloc(size, POINTER_BYTES);
|
||||
// the number of already initialized subqueries
|
||||
int32_t numOfSub = 0;
|
||||
|
||||
pSql->numOfSubs = (uint16_t)size;
|
||||
pSql->pSubs = calloc(size, POINTER_BYTES);
|
||||
if (pSql->pSubs == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size);
|
||||
|
||||
|
@ -1934,10 +1940,13 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
pState->numOfRemain = pSql->numOfSubs;
|
||||
|
||||
pRes->code = TSDB_CODE_SUCCESS;
|
||||
int32_t numOfSub = 0;
|
||||
|
||||
while(numOfSub < pSql->numOfSubs) {
|
||||
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
|
||||
if (pSupporter == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pSupporter->pSql = pSql;
|
||||
pSupporter->pState = pState;
|
||||
pSupporter->index = numOfSub;
|
||||
|
@ -1970,7 +1979,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
if (numOfSub < pSql->numOfSubs) {
|
||||
tscError("%p failed to prepare subObj structure and launch sub-insertion", pSql);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return pRes->code; // free all allocated resource
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||
|
@ -2068,46 +2077,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
|
|||
|
||||
doBuildResFromSubqueries(pSql);
|
||||
tsem_post(&pSql->rspSem);
|
||||
|
||||
return;
|
||||
|
||||
// continue retrieve data from vnode
|
||||
// if (!tscHasRemainDataInSubqueryResultSet(pSql)) {
|
||||
// tscDebug("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
|
||||
// SSubqueryState* pState = NULL;
|
||||
//
|
||||
// // free all sub sqlobj
|
||||
// for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||
// SSqlObj* pChildObj = pSql->pSubs[i];
|
||||
// if (pChildObj == NULL) {
|
||||
// continue;
|
||||
// }
|
||||
//
|
||||
// SJoinSupporter* pSupporter = (SJoinSupporter*)pChildObj->param;
|
||||
// pState = pSupporter->pState;
|
||||
//
|
||||
// tscDestroyJoinSupporter(pChildObj->param);
|
||||
// taos_free_result(pChildObj);
|
||||
// }
|
||||
//
|
||||
// free(pState);
|
||||
//
|
||||
// pRes->completed = true; // set query completed
|
||||
// tsem_post(&pSql->rspSem);
|
||||
// return;
|
||||
// }
|
||||
|
||||
// tscFetchDatablockFromSubquery(pSql);
|
||||
// if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
// return;
|
||||
// }
|
||||
}
|
||||
|
||||
// if (pSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
// (*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
|
||||
// } else {
|
||||
// tscQueueAsyncRes(pSql);
|
||||
// }
|
||||
}
|
||||
|
||||
static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) {
|
||||
|
@ -2157,7 +2128,6 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows);
|
||||
|
||||
if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker
|
||||
taosTFree(pRes->tsrow);
|
||||
return pRes->tsrow;
|
||||
|
|
|
@ -373,7 +373,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
tscDebug("%p start to free sql object", pSql);
|
||||
tscPartiallyFreeSqlObj(pSql);
|
||||
|
||||
|
@ -388,6 +388,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
|
||||
taosTFree(pSql->sqlstr);
|
||||
tsem_destroy(&pSql->rspSem);
|
||||
|
||||
free(pSql);
|
||||
}
|
||||
|
||||
|
@ -404,7 +405,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
|
|||
taosTFree(pDataBlock);
|
||||
}
|
||||
|
||||
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
|
||||
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
|
||||
uint32_t offset) {
|
||||
uint32_t needed = pDataBlock->numOfParams + 1;
|
||||
if (needed > pDataBlock->numOfAllocedParams) {
|
||||
|
@ -485,15 +486,6 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
//void tscFreeUnusedDataBlocks(SDataBlockList* pList) {
|
||||
// /* release additional memory consumption */
|
||||
// for (int32_t i = 0; i < pList->nSize; ++i) {
|
||||
// STableDataBlocks* pDataBlock = pList->pData[i];
|
||||
// pDataBlock->pData = realloc(pDataBlock->pData, pDataBlock->size);
|
||||
// pDataBlock->nAllocSize = (uint32_t)pDataBlock->size;
|
||||
// }
|
||||
//}
|
||||
|
||||
/**
|
||||
* create the in-memory buffer for each table to keep the submitted data block
|
||||
* @param initialSize
|
||||
|
@ -518,6 +510,11 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
|||
}
|
||||
|
||||
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
|
||||
if (dataBuf->pData == NULL) {
|
||||
tscError("failed to allocated memory, reason:%s", strerror(errno));
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
dataBuf->ordered = true;
|
||||
dataBuf->prevTS = INT64_MIN;
|
||||
|
||||
|
@ -742,7 +739,7 @@ bool tscIsInsertData(char* sqlstr) {
|
|||
int32_t index = 0;
|
||||
|
||||
do {
|
||||
SSQLToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL);
|
||||
SStrToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL);
|
||||
if (t0.type != TK_LP) {
|
||||
return t0.type == TK_INSERT || t0.type == TK_IMPORT;
|
||||
}
|
||||
|
@ -926,17 +923,23 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
|
|||
}
|
||||
|
||||
static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t interSize, bool isTagCol) {
|
||||
int16_t size, int16_t interSize, int32_t colType) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
|
||||
|
||||
SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr));
|
||||
if (pExpr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pExpr->functionId = functionId;
|
||||
|
||||
|
||||
// set the correct columnIndex index
|
||||
if (pColIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
pExpr->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX;
|
||||
} else if (pColIndex->columnIndex <= TSDB_UD_COLUMN_INDEX) {
|
||||
pExpr->colInfo.colId = pColIndex->columnIndex;
|
||||
} else {
|
||||
if (isTagCol) {
|
||||
if (TSDB_COL_IS_TAG(colType)) {
|
||||
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
|
||||
pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
|
||||
tstrncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(pExpr->colInfo.name));
|
||||
|
@ -948,9 +951,9 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
|
|||
}
|
||||
}
|
||||
|
||||
pExpr->colInfo.flag = isTagCol? TSDB_COL_TAG:TSDB_COL_NORMAL;
|
||||
|
||||
pExpr->colInfo.flag = colType;
|
||||
pExpr->colInfo.colIndex = pColIndex->columnIndex;
|
||||
|
||||
pExpr->resType = type;
|
||||
pExpr->resBytes = size;
|
||||
pExpr->interBytes = interSize;
|
||||
|
@ -1060,8 +1063,11 @@ void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy)
|
|||
|
||||
if (deepcopy) {
|
||||
SSqlExpr* p1 = calloc(1, sizeof(SSqlExpr));
|
||||
if (p1 == NULL) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
*p1 = *pExpr;
|
||||
|
||||
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
|
||||
tVariantAssign(&p1->param[j], &pExpr->param[j]);
|
||||
}
|
||||
|
@ -1097,16 +1103,22 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
|
|||
|
||||
if (i >= numOfCols || numOfCols == 0) {
|
||||
SColumn* b = calloc(1, sizeof(SColumn));
|
||||
if (b == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
b->colIndex = *pColIndex;
|
||||
|
||||
taosArrayInsert(pColumnList, i, &b);
|
||||
} else {
|
||||
SColumn* pCol = taosArrayGetP(pColumnList, i);
|
||||
|
||||
if (i < numOfCols && (pCol->colIndex.columnIndex > col || pCol->colIndex.tableIndex != pColIndex->tableIndex)) {
|
||||
SColumn* b = calloc(1, sizeof(SColumn));
|
||||
if (b == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
b->colIndex = *pColIndex;
|
||||
|
||||
taosArrayInsert(pColumnList, i, &b);
|
||||
}
|
||||
}
|
||||
|
@ -1128,7 +1140,10 @@ SColumn* tscColumnClone(const SColumn* src) {
|
|||
assert(src != NULL);
|
||||
|
||||
SColumn* dst = calloc(1, sizeof(SColumn));
|
||||
|
||||
if (dst == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dst->colIndex = src->colIndex;
|
||||
dst->numOfFilters = src->numOfFilters;
|
||||
dst->filterInfo = tscFilterInfoClone(src->filterInfo, src->numOfFilters);
|
||||
|
@ -1183,7 +1198,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
|
|||
* 'first_part.second_part'
|
||||
*
|
||||
*/
|
||||
static int32_t validateQuoteToken(SSQLToken* pToken) {
|
||||
static int32_t validateQuoteToken(SStrToken* pToken) {
|
||||
strdequote(pToken->z);
|
||||
pToken->n = (uint32_t)strtrim(pToken->z);
|
||||
|
||||
|
@ -1199,7 +1214,7 @@ static int32_t validateQuoteToken(SSQLToken* pToken) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tscValidateName(SSQLToken* pToken) {
|
||||
int32_t tscValidateName(SStrToken* pToken) {
|
||||
if (pToken->type != TK_STRING && pToken->type != TK_ID) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
@ -1286,12 +1301,12 @@ void tscIncStreamExecutionCount(void* pStream) {
|
|||
ps->num += 1;
|
||||
}
|
||||
|
||||
bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId) {
|
||||
bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams) {
|
||||
if (pTableMetaInfo->pTableMeta == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
if (colId == TSDB_TBNAME_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1338,6 +1353,10 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
|||
if (pCond->len > 0) {
|
||||
assert(pCond->cond != NULL);
|
||||
c.cond = malloc(c.len);
|
||||
if (c.cond == NULL) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
memcpy(c.cond, pCond->cond, c.len);
|
||||
}
|
||||
|
||||
|
@ -1463,20 +1482,20 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) {
|
|||
return pQueryInfo->pTableMetaInfo[tableIndex];
|
||||
}
|
||||
|
||||
int32_t tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo) {
|
||||
SQueryInfo* tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
|
||||
*pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
|
||||
|
||||
while ((*pQueryInfo) == NULL) {
|
||||
while ((pQueryInfo) == NULL) {
|
||||
if ((ret = tscAddSubqueryInfo(pCmd)) != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
(*pQueryInfo) = tscGetQueryInfoDetail(pCmd, subClauseIndex);
|
||||
pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return pQueryInfo;
|
||||
}
|
||||
|
||||
STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) {
|
||||
|
@ -1507,6 +1526,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
|
|||
assert(pQueryInfo->exprList == NULL);
|
||||
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
|
||||
}
|
||||
|
||||
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
||||
|
@ -1522,8 +1542,11 @@ int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
|||
pCmd->pQueryInfo = (SQueryInfo**)tmp;
|
||||
|
||||
SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo));
|
||||
if (pQueryInfo == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
tscInitQueryInfo(pQueryInfo);
|
||||
|
||||
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
|
||||
|
||||
pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
|
||||
|
@ -1584,14 +1607,18 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
|
|||
SVgroupsInfo* vgroupList, SArray* pTagCols) {
|
||||
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
|
||||
if (pAlloc == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pQueryInfo->pTableMetaInfo = pAlloc;
|
||||
pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = calloc(1, sizeof(STableMetaInfo));
|
||||
STableMetaInfo* pTableMetaInfo = calloc(1, sizeof(STableMetaInfo));
|
||||
if (pTableMetaInfo == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables];
|
||||
assert(pTableMetaInfo != NULL);
|
||||
pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo;
|
||||
|
||||
if (name != NULL) {
|
||||
tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
|
||||
|
@ -1602,10 +1629,18 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
|
|||
if (vgroupList != NULL) {
|
||||
size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups;
|
||||
pTableMetaInfo->vgroupList = malloc(size);
|
||||
if (pTableMetaInfo->vgroupList == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(pTableMetaInfo->vgroupList, vgroupList, size);
|
||||
}
|
||||
|
||||
pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES);
|
||||
if (pTableMetaInfo->tagColList == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pTagCols != NULL) {
|
||||
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
|
||||
}
|
||||
|
@ -1671,8 +1706,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = NULL;
|
||||
tscGetQueryInfoDetailSafely(pCmd, 0, &pQueryInfo);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, 0);
|
||||
|
||||
assert(pSql->cmd.clauseIndex == 0);
|
||||
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
|
||||
|
@ -1754,6 +1788,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
|
|||
|
||||
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
||||
if (pNew == NULL) {
|
||||
tscError("%p new subquery failed, tableIndex:%d", pSql, tableIndex);
|
||||
|
@ -1769,10 +1804,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
pNew->sqlstr = strdup(pSql->sqlstr);
|
||||
if (pNew->sqlstr == NULL) {
|
||||
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
|
||||
|
||||
free(pNew);
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SSqlCmd* pnCmd = &pNew->cmd;
|
||||
|
@ -1789,9 +1822,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
pnCmd->parseFinished = 1;
|
||||
|
||||
if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
|
||||
tscFreeSqlObj(pNew);
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pnCmd, 0);
|
||||
|
@ -1816,20 +1848,28 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
|
||||
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
|
||||
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayClone(pQueryInfo->groupbyExpr.columnInfo);
|
||||
if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
|
||||
tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond);
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
if (pNewQueryInfo->fillVal == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
}
|
||||
|
||||
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
|
||||
tscFreeSqlObj(pNew);
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, (int16_t)tableIndex);
|
||||
|
@ -1872,16 +1912,15 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
|
||||
if (pFinalInfo->pTableMeta == NULL) {
|
||||
tscError("%p new subquery failed since no tableMeta in cache, name:%s", pSql, name);
|
||||
tscFreeSqlObj(pNew);
|
||||
|
||||
if (pPrevSql != NULL) {
|
||||
if (pPrevSql != NULL) { // pass the previous error to client
|
||||
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
|
||||
terrno = pPrevSql->res.code;
|
||||
} else {
|
||||
terrno = TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
assert(pNewQueryInfo->numOfTables == 1);
|
||||
|
@ -1906,6 +1945,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
}
|
||||
|
||||
return pNew;
|
||||
|
||||
_error:
|
||||
tscFreeSqlObj(pNew);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -29,6 +29,7 @@ extern uint16_t tsServerPort;
|
|||
extern uint16_t tsDnodeShellPort;
|
||||
extern uint16_t tsDnodeDnodePort;
|
||||
extern uint16_t tsSyncPort;
|
||||
extern uint16_t tsArbitratorPort;
|
||||
extern int32_t tsStatusInterval;
|
||||
extern int32_t tsNumOfMnodes;
|
||||
extern int32_t tsEnableVnodeBak;
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "os.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tstoken.h"
|
||||
#include "tvariant.h"
|
||||
|
||||
typedef struct SDataStatis {
|
||||
int16_t colId;
|
||||
|
@ -24,10 +25,12 @@ void extractTableName(const char *tableId, char *name);
|
|||
|
||||
char* extractDBName(const char *tableId, char *name);
|
||||
|
||||
void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable);
|
||||
void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable);
|
||||
|
||||
SSchema tGetTableNameColumnSchema();
|
||||
|
||||
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name);
|
||||
|
||||
bool tscValidateTableNameLength(size_t len);
|
||||
|
||||
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
|
||||
|
|
|
@ -36,7 +36,7 @@ typedef struct tVariant {
|
|||
};
|
||||
} tVariant;
|
||||
|
||||
void tVariantCreate(tVariant *pVar, SSQLToken *token);
|
||||
void tVariantCreate(tVariant *pVar, SStrToken *token);
|
||||
|
||||
void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type);
|
||||
|
||||
|
@ -46,6 +46,8 @@ void tVariantDestroy(tVariant *pV);
|
|||
|
||||
void tVariantAssign(tVariant *pDst, const tVariant *pSrc);
|
||||
|
||||
int32_t tVariantCompare(const tVariant* p1, const tVariant* p2);
|
||||
|
||||
int32_t tVariantToString(tVariant *pVar, char *dst);
|
||||
|
||||
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix);
|
||||
|
|
|
@ -37,6 +37,7 @@ uint16_t tsServerPort = 6030;
|
|||
uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
|
||||
uint16_t tsDnodeDnodePort = 6035; // udp/tcp
|
||||
uint16_t tsSyncPort = 6040;
|
||||
uint16_t tsArbitratorPort = 6042;
|
||||
int32_t tsStatusInterval = 1; // second
|
||||
int32_t tsNumOfMnodes = 3;
|
||||
int32_t tsEnableVnodeBak = 1;
|
||||
|
@ -54,7 +55,7 @@ int8_t tsDaylight = 0;
|
|||
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
|
||||
char tsLocale[TSDB_LOCALE_LEN] = {0};
|
||||
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
|
||||
int32_t tsEnableCoreFile = 1;
|
||||
int32_t tsEnableCoreFile = 0;
|
||||
int32_t tsMaxBinaryDisplayWidth = 30;
|
||||
|
||||
/*
|
||||
|
@ -1331,7 +1332,10 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
|
|||
*port = atoi(temp+1);
|
||||
}
|
||||
|
||||
if (*port == 0) *port = tsServerPort;
|
||||
if (*port == 0) {
|
||||
*port = tsServerPort;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "tname.h"
|
||||
#include "tstoken.h"
|
||||
#include "ttokendef.h"
|
||||
#include "tvariant.h"
|
||||
|
||||
// todo refactor
|
||||
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
|
||||
|
@ -43,7 +44,30 @@ SSchema tGetTableNameColumnSchema() {
|
|||
s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE;
|
||||
s.type = TSDB_DATA_TYPE_BINARY;
|
||||
s.colId = TSDB_TBNAME_COLUMN_INDEX;
|
||||
strncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
|
||||
tstrncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
|
||||
return s;
|
||||
}
|
||||
|
||||
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name) {
|
||||
SSchema s = {0};
|
||||
|
||||
s.type = pVal->nType;
|
||||
if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE);
|
||||
} else {
|
||||
s.bytes = tDataTypeDesc[pVal->nType].nSize;
|
||||
}
|
||||
|
||||
s.colId = TSDB_UD_COLUMN_INDEX;
|
||||
if (name != NULL) {
|
||||
tstrncpy(s.name, name, sizeof(s.name));
|
||||
} else {
|
||||
size_t len = strdequote(exprStr->z);
|
||||
size_t tlen = MIN(sizeof(s.name), len + 1);
|
||||
|
||||
tstrncpy(s.name, exprStr->z, tlen);
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -110,7 +134,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
|
|||
* tablePrefix.columnName
|
||||
* extract table name and save it in pTable, with only column name in pToken
|
||||
*/
|
||||
void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) {
|
||||
void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
|
||||
const char sep = TS_PATH_DELIMITER[0];
|
||||
|
||||
if (pToken == pTable || pToken == NULL || pTable == NULL) {
|
||||
|
|
|
@ -12,12 +12,10 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "os.h"
|
||||
|
||||
#include "tvariant.h"
|
||||
#include "hash.h"
|
||||
#include "hashfunc.h"
|
||||
#include "os.h"
|
||||
#include "hash.h"
|
||||
#include "taos.h"
|
||||
#include "taosdef.h"
|
||||
#include "tstoken.h"
|
||||
|
@ -25,7 +23,7 @@
|
|||
#include "tutil.h"
|
||||
|
||||
// todo support scientific expression number and oct number
|
||||
void tVariantCreate(tVariant *pVar, SSQLToken *token) { tVariantCreateFromString(pVar, token->z, token->n, token->type); }
|
||||
void tVariantCreate(tVariant *pVar, SStrToken *token) { tVariantCreateFromString(pVar, token->z, token->n, token->type); }
|
||||
|
||||
void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type) {
|
||||
memset(pVar, 0, sizeof(tVariant));
|
||||
|
@ -102,10 +100,9 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
|
|||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length
|
||||
size_t lenInwchar = len / TSDB_NCHAR_SIZE;
|
||||
|
||||
pVar->wpz = calloc(1, (lenInwchar + 1) * TSDB_NCHAR_SIZE);
|
||||
|
||||
wcsncpy(pVar->wpz, (wchar_t *)pz, lenInwchar);
|
||||
pVar->wpz[lenInwchar] = 0;
|
||||
memcpy(pVar->wpz, pz, lenInwchar * TSDB_NCHAR_SIZE);
|
||||
pVar->nLen = (int32_t)len;
|
||||
|
||||
break;
|
||||
|
@ -169,6 +166,50 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
|
|||
char* n = strdup(p);
|
||||
taosArrayPush(pDst->arr, &n);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
pDst->nLen = tDataTypeDesc[pDst->nType].nSize;
|
||||
}
|
||||
|
||||
int32_t tVariantCompare(const tVariant* p1, const tVariant* p2) {
|
||||
if (p1->nType == TSDB_DATA_TYPE_NULL && p2->nType == TSDB_DATA_TYPE_NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (p1->nType == TSDB_DATA_TYPE_NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (p2->nType == TSDB_DATA_TYPE_NULL) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (p1->nType) {
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
if (p1->nLen == p2->nLen) {
|
||||
return memcmp(p1->pz, p2->pz, p1->nLen);
|
||||
} else {
|
||||
return p1->nLen > p2->nLen? 1:-1;
|
||||
}
|
||||
};
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
if (p1->dKey == p2->dKey) {
|
||||
return 0;
|
||||
} else {
|
||||
return p1->dKey > p2->dKey? 1:-1;
|
||||
}
|
||||
|
||||
default:
|
||||
if (p1->i64Key == p2->i64Key) {
|
||||
return 0;
|
||||
} else {
|
||||
return p1->i64Key > p2->i64Key? 1:-1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,7 +269,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type,
|
|||
errno = 0;
|
||||
char *endPtr = NULL;
|
||||
|
||||
SSQLToken token = {0};
|
||||
SStrToken token = {0};
|
||||
token.n = tSQLGetToken(pVariant->pz, &token.type);
|
||||
|
||||
if (token.type == TK_MINUS || token.type == TK_PLUS) {
|
||||
|
@ -277,7 +318,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type,
|
|||
errno = 0;
|
||||
wchar_t *endPtr = NULL;
|
||||
|
||||
SSQLToken token = {0};
|
||||
SStrToken token = {0};
|
||||
token.n = tSQLGetToken(pVariant->pz, &token.type);
|
||||
|
||||
if (token.type == TK_MINUS || token.type == TK_PLUS) {
|
||||
|
@ -436,7 +477,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
|
|||
}
|
||||
|
||||
static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *value) {
|
||||
SSQLToken stoken = {.z = pStr, .n = len};
|
||||
SStrToken stoken = {.z = pStr, .n = len};
|
||||
|
||||
if (TK_ILLEGAL == isValidNumber(&stoken)) {
|
||||
return -1;
|
||||
|
@ -462,7 +503,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
errno = 0;
|
||||
char *endPtr = NULL;
|
||||
|
||||
SSQLToken token = {0};
|
||||
SStrToken token = {0};
|
||||
token.n = tSQLGetToken(pVariant->pz, &token.type);
|
||||
|
||||
if (token.type == TK_MINUS || token.type == TK_PLUS) {
|
||||
|
@ -479,7 +520,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
return 0;
|
||||
}
|
||||
|
||||
SSQLToken sToken = {.z = pVariant->pz, .n = pVariant->nLen};
|
||||
SStrToken sToken = {.z = pVariant->pz, .n = pVariant->nLen};
|
||||
if (TK_ILLEGAL == isValidNumber(&sToken)) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -515,7 +556,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
errno = 0;
|
||||
wchar_t *endPtr = NULL;
|
||||
|
||||
SSQLToken token = {0};
|
||||
SStrToken token = {0};
|
||||
token.n = tSQLGetToken(pVariant->pz, &token.type);
|
||||
|
||||
if (token.type == TK_MINUS || token.type == TK_PLUS) {
|
||||
|
|
|
@ -28,22 +28,5 @@
|
|||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="src" path="target/generated-sources/annotations">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
<attribute name="ignore_optional_problems" value="true"/>
|
||||
<attribute name="m2e-apt" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="src" output="target/test-classes" path="target/generated-test-sources/test-annotations">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
<attribute name="ignore_optional_problems" value="true"/>
|
||||
<attribute name="m2e-apt" value="true"/>
|
||||
<attribute name="test" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*****************************************************************************/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
import java.io.*;
|
||||
import java.sql.Array;
|
||||
import java.sql.Blob;
|
||||
import java.sql.CallableStatement;
|
||||
|
@ -30,336 +31,392 @@ import java.sql.SQLXML;
|
|||
import java.sql.Savepoint;
|
||||
import java.sql.Statement;
|
||||
import java.sql.Struct;
|
||||
import java.util.Enumeration;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
public class TSDBConnection implements Connection {
|
||||
|
||||
private TSDBJNIConnector connector = null;
|
||||
|
||||
protected Properties props = null;
|
||||
|
||||
private String catalog = null;
|
||||
|
||||
private TSDBDatabaseMetaData dbMetaData = null;
|
||||
|
||||
private Properties clientInfoProps = new Properties();
|
||||
|
||||
private int timeoutMilliseconds = 0;
|
||||
|
||||
private String tsCharSet = "";
|
||||
|
||||
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
||||
this.dbMetaData = meta;
|
||||
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
||||
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
||||
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
|
||||
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
||||
}
|
||||
|
||||
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
||||
this.connector = new TSDBJNIConnector();
|
||||
this.connector.connect(host, port, dbName, user, password);
|
||||
|
||||
try {
|
||||
this.setCatalog(dbName);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
this.dbMetaData.setConnection(this);
|
||||
}
|
||||
|
||||
public TSDBJNIConnector getConnection() {
|
||||
return this.connector;
|
||||
}
|
||||
|
||||
public Statement createStatement() throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBStatement(this.connector);
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
|
||||
if (this.connector.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
|
||||
long id = this.connector.subscribe(topic, sql, restart, 0);
|
||||
if (id == 0) {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription"));
|
||||
}
|
||||
|
||||
return new TSDBSubscribe(this.connector, id);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBPreparedStatement(this.connector, sql);
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
public CallableStatement prepareCall(String sql) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public String nativeSQL(String sql) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setAutoCommit(boolean autoCommit) throws SQLException {
|
||||
}
|
||||
|
||||
public boolean getAutoCommit() throws SQLException {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void commit() throws SQLException {
|
||||
}
|
||||
|
||||
public void rollback() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void close() throws SQLException {
|
||||
if (this.connector != null && !this.connector.isClosed()) {
|
||||
this.connector.closeConnection();
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg("connection is already closed!"));
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isClosed() throws SQLException {
|
||||
return this.connector.isClosed();
|
||||
}
|
||||
|
||||
/**
|
||||
* A connection's database is able to provide information describing its tables,
|
||||
* its supported SQL grammar, its stored procedures, the capabilities of this
|
||||
* connection, etc. This information is made available through a
|
||||
* DatabaseMetaData object.
|
||||
*
|
||||
* @return a DatabaseMetaData object for this connection
|
||||
* @exception SQLException
|
||||
* if a database access error occurs
|
||||
*/
|
||||
public DatabaseMetaData getMetaData() throws SQLException {
|
||||
return this.dbMetaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* This readOnly option is not supported by TDengine. However, the method is intentionally left blank here to
|
||||
* support HikariCP connection.
|
||||
* @param readOnly
|
||||
* @throws SQLException
|
||||
*/
|
||||
public void setReadOnly(boolean readOnly) throws SQLException {
|
||||
}
|
||||
|
||||
public boolean isReadOnly() throws SQLException {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void setCatalog(String catalog) throws SQLException {
|
||||
this.catalog = catalog;
|
||||
}
|
||||
|
||||
public String getCatalog() throws SQLException {
|
||||
return this.catalog;
|
||||
}
|
||||
|
||||
/**
|
||||
* The transaction isolation level option is not supported by TDengine.
|
||||
* This method is intentionally left empty to support HikariCP connection.
|
||||
* @param level
|
||||
* @throws SQLException
|
||||
*/
|
||||
public void setTransactionIsolation(int level) throws SQLException {
|
||||
}
|
||||
|
||||
/**
|
||||
* The transaction isolation level option is not supported by TDengine.
|
||||
* @return
|
||||
* @throws SQLException
|
||||
*/
|
||||
public int getTransactionIsolation() throws SQLException {
|
||||
return Connection.TRANSACTION_NONE;
|
||||
}
|
||||
|
||||
public SQLWarning getWarnings() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void clearWarnings() throws SQLException {
|
||||
// left blank to support HikariCP connection
|
||||
//todo: implement getWarnings according to the warning messages returned from TDengine
|
||||
}
|
||||
|
||||
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
|
||||
throws SQLException {
|
||||
// This method is implemented in the current way to support Spark
|
||||
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) {
|
||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
||||
}
|
||||
|
||||
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) {
|
||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
||||
}
|
||||
|
||||
return this.prepareStatement(sql);
|
||||
}
|
||||
|
||||
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Map<String, Class<?>> getTypeMap() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setHoldability(int holdability) throws SQLException {
|
||||
// intentionally left empty to support druid connection pool.
|
||||
}
|
||||
|
||||
/**
|
||||
* the transaction is not supported by TDengine, so the opened ResultSet Objects will remain open
|
||||
* @return
|
||||
* @throws SQLException
|
||||
*/
|
||||
public int getHoldability() throws SQLException {
|
||||
//intentionally left empty to support HikariCP connection.
|
||||
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
|
||||
}
|
||||
|
||||
public Savepoint setSavepoint() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Savepoint setSavepoint(String name) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void rollback(Savepoint savepoint) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
|
||||
throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
|
||||
int resultSetHoldability) throws SQLException {
|
||||
return this.prepareStatement(sql, resultSetType, resultSetConcurrency);
|
||||
}
|
||||
|
||||
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
|
||||
int resultSetHoldability) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Clob createClob() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Blob createBlob() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public NClob createNClob() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public SQLXML createSQLXML() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public boolean isValid(int timeout) throws SQLException {
|
||||
return !this.isClosed();
|
||||
}
|
||||
|
||||
public void setClientInfo(String name, String value) throws SQLClientInfoException {
|
||||
clientInfoProps.setProperty(name, value);
|
||||
}
|
||||
|
||||
public void setClientInfo(Properties properties) throws SQLClientInfoException {
|
||||
for (Enumeration<Object> enumer = properties.keys(); enumer.hasMoreElements();) {
|
||||
String name = (String) enumer.nextElement();
|
||||
clientInfoProps.put(name, properties.getProperty(name));
|
||||
}
|
||||
}
|
||||
|
||||
public String getClientInfo(String name) throws SQLException {
|
||||
return clientInfoProps.getProperty(name);
|
||||
}
|
||||
|
||||
public Properties getClientInfo() throws SQLException {
|
||||
return clientInfoProps;
|
||||
}
|
||||
|
||||
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setSchema(String schema) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public String getSchema() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void abort(Executor executor) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
|
||||
this.timeoutMilliseconds = milliseconds;
|
||||
}
|
||||
|
||||
public int getNetworkTimeout() throws SQLException {
|
||||
return this.timeoutMilliseconds;
|
||||
}
|
||||
|
||||
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
private TSDBJNIConnector connector = null;
|
||||
|
||||
protected Properties props = null;
|
||||
|
||||
private String catalog = null;
|
||||
|
||||
private TSDBDatabaseMetaData dbMetaData = null;
|
||||
|
||||
private Properties clientInfoProps = new Properties();
|
||||
|
||||
private int timeoutMilliseconds = 0;
|
||||
|
||||
private String tsCharSet = "";
|
||||
|
||||
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
||||
this.dbMetaData = meta;
|
||||
|
||||
//load taos.cfg start
|
||||
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
|
||||
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
|
||||
List<String> endpoints = loadConfigEndpoints(cfgFile);
|
||||
if (!endpoints.isEmpty()){
|
||||
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST,endpoints.get(0).split(":")[0]);
|
||||
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT,endpoints.get(0).split(":")[1]);
|
||||
}
|
||||
//load taos.cfg end
|
||||
|
||||
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
||||
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
||||
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
|
||||
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
||||
}
|
||||
|
||||
private List<String> loadConfigEndpoints(File cfgFile){
|
||||
List<String> endpoints = new ArrayList<>();
|
||||
try(BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
|
||||
String line = null;
|
||||
while ((line = reader.readLine())!=null){
|
||||
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")){
|
||||
endpoints.add(line.substring(line.indexOf('p')+1).trim());
|
||||
}
|
||||
if (endpoints.size()>1)
|
||||
break;
|
||||
}
|
||||
} catch (FileNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return endpoints;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cfgDirPath
|
||||
* @return return the config dir
|
||||
* **/
|
||||
private File loadConfigDir(String cfgDirPath) {
|
||||
if (cfgDirPath == null)
|
||||
return loadDefaultConfigDir();
|
||||
File cfgDir = new File(cfgDirPath);
|
||||
if (!cfgDir.exists())
|
||||
return loadDefaultConfigDir();
|
||||
return cfgDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return search the default config dir, if the config dir is not exist will return null
|
||||
* */
|
||||
private File loadDefaultConfigDir(){
|
||||
File cfgDir;
|
||||
File cfgDir_linux = new File("/etc/taos");
|
||||
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
|
||||
File cfgDir_windows = new File("C:\\TDengine\\cfg");
|
||||
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
|
||||
return cfgDir;
|
||||
}
|
||||
|
||||
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
||||
this.connector = new TSDBJNIConnector();
|
||||
this.connector.connect(host, port, dbName, user, password);
|
||||
|
||||
try {
|
||||
this.setCatalog(dbName);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
this.dbMetaData.setConnection(this);
|
||||
}
|
||||
|
||||
public TSDBJNIConnector getConnection() {
|
||||
return this.connector;
|
||||
}
|
||||
|
||||
public Statement createStatement() throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBStatement(this.connector);
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
|
||||
if (this.connector.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
|
||||
long id = this.connector.subscribe(topic, sql, restart, 0);
|
||||
if (id == 0) {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription"));
|
||||
}
|
||||
|
||||
return new TSDBSubscribe(this.connector, id);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBPreparedStatement(this.connector, sql);
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
public CallableStatement prepareCall(String sql) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public String nativeSQL(String sql) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setAutoCommit(boolean autoCommit) throws SQLException {
|
||||
}
|
||||
|
||||
public boolean getAutoCommit() throws SQLException {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void commit() throws SQLException {
|
||||
}
|
||||
|
||||
public void rollback() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void close() throws SQLException {
|
||||
if (this.connector != null && !this.connector.isClosed()) {
|
||||
this.connector.closeConnection();
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg("connection is already closed!"));
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isClosed() throws SQLException {
|
||||
return this.connector.isClosed();
|
||||
}
|
||||
|
||||
/**
|
||||
* A connection's database is able to provide information describing its tables,
|
||||
* its supported SQL grammar, its stored procedures, the capabilities of this
|
||||
* connection, etc. This information is made available through a
|
||||
* DatabaseMetaData object.
|
||||
*
|
||||
* @return a DatabaseMetaData object for this connection
|
||||
* @throws SQLException if a database access error occurs
|
||||
*/
|
||||
public DatabaseMetaData getMetaData() throws SQLException {
|
||||
return this.dbMetaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* This readOnly option is not supported by TDengine. However, the method is intentionally left blank here to
|
||||
* support HikariCP connection.
|
||||
*
|
||||
* @param readOnly
|
||||
* @throws SQLException
|
||||
*/
|
||||
public void setReadOnly(boolean readOnly) throws SQLException {
|
||||
}
|
||||
|
||||
public boolean isReadOnly() throws SQLException {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void setCatalog(String catalog) throws SQLException {
|
||||
this.catalog = catalog;
|
||||
}
|
||||
|
||||
public String getCatalog() throws SQLException {
|
||||
return this.catalog;
|
||||
}
|
||||
|
||||
/**
|
||||
* The transaction isolation level option is not supported by TDengine.
|
||||
* This method is intentionally left empty to support HikariCP connection.
|
||||
*
|
||||
* @param level
|
||||
* @throws SQLException
|
||||
*/
|
||||
public void setTransactionIsolation(int level) throws SQLException {
|
||||
}
|
||||
|
||||
/**
|
||||
* The transaction isolation level option is not supported by TDengine.
|
||||
*
|
||||
* @return
|
||||
* @throws SQLException
|
||||
*/
|
||||
public int getTransactionIsolation() throws SQLException {
|
||||
return Connection.TRANSACTION_NONE;
|
||||
}
|
||||
|
||||
public SQLWarning getWarnings() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void clearWarnings() throws SQLException {
|
||||
// left blank to support HikariCP connection
|
||||
//todo: implement getWarnings according to the warning messages returned from TDengine
|
||||
}
|
||||
|
||||
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
|
||||
throws SQLException {
|
||||
// This method is implemented in the current way to support Spark
|
||||
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) {
|
||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
||||
}
|
||||
|
||||
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) {
|
||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
||||
}
|
||||
|
||||
return this.prepareStatement(sql);
|
||||
}
|
||||
|
||||
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Map<String, Class<?>> getTypeMap() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setHoldability(int holdability) throws SQLException {
|
||||
// intentionally left empty to support druid connection pool.
|
||||
}
|
||||
|
||||
/**
|
||||
* the transaction is not supported by TDengine, so the opened ResultSet Objects will remain open
|
||||
*
|
||||
* @return
|
||||
* @throws SQLException
|
||||
*/
|
||||
public int getHoldability() throws SQLException {
|
||||
//intentionally left empty to support HikariCP connection.
|
||||
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
|
||||
}
|
||||
|
||||
public Savepoint setSavepoint() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Savepoint setSavepoint(String name) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void rollback(Savepoint savepoint) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
|
||||
throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
|
||||
int resultSetHoldability) throws SQLException {
|
||||
return this.prepareStatement(sql, resultSetType, resultSetConcurrency);
|
||||
}
|
||||
|
||||
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
|
||||
int resultSetHoldability) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Clob createClob() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Blob createBlob() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public NClob createNClob() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public SQLXML createSQLXML() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public boolean isValid(int timeout) throws SQLException {
|
||||
return !this.isClosed();
|
||||
}
|
||||
|
||||
public void setClientInfo(String name, String value) throws SQLClientInfoException {
|
||||
clientInfoProps.setProperty(name, value);
|
||||
}
|
||||
|
||||
public void setClientInfo(Properties properties) throws SQLClientInfoException {
|
||||
for (Enumeration<Object> enumer = properties.keys(); enumer.hasMoreElements(); ) {
|
||||
String name = (String) enumer.nextElement();
|
||||
clientInfoProps.put(name, properties.getProperty(name));
|
||||
}
|
||||
}
|
||||
|
||||
public String getClientInfo(String name) throws SQLException {
|
||||
return clientInfoProps.getProperty(name);
|
||||
}
|
||||
|
||||
public Properties getClientInfo() throws SQLException {
|
||||
return clientInfoProps;
|
||||
}
|
||||
|
||||
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setSchema(String schema) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public String getSchema() throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void abort(Executor executor) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
|
||||
this.timeoutMilliseconds = milliseconds;
|
||||
}
|
||||
|
||||
public int getNetworkTimeout() throws SQLException {
|
||||
return this.timeoutMilliseconds;
|
||||
}
|
||||
|
||||
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TSDBStatement implements Statement {
|
|||
if (isClosed) {
|
||||
throw new SQLException("Invalid method call on a closed statement.");
|
||||
}
|
||||
|
||||
|
||||
// TODO make sure it is not a update query
|
||||
pSql = this.connecter.executeQuery(sql);
|
||||
|
||||
|
@ -68,21 +68,21 @@ public class TSDBStatement implements Statement {
|
|||
this.connecter.freeResultSet(pSql);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
if (!this.connecter.isUpdateQuery(pSql)) {
|
||||
return new TSDBResultSet(this.connecter, resultSetPointer);
|
||||
} else {
|
||||
this.connecter.freeResultSet(pSql);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public int executeUpdate(String sql) throws SQLException {
|
||||
if (isClosed) {
|
||||
throw new SQLException("Invalid method call on a closed statement.");
|
||||
}
|
||||
|
||||
|
||||
// TODO check if current query is update query
|
||||
pSql = this.connecter.executeQuery(sql);
|
||||
long resultSetPointer = this.connecter.getResultSet();
|
||||
|
@ -94,7 +94,7 @@ public class TSDBStatement implements Statement {
|
|||
|
||||
this.affectedRows = this.connecter.getAffectedRows(pSql);
|
||||
this.connecter.freeResultSet(pSql);
|
||||
|
||||
|
||||
return this.affectedRows;
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ public class TSDBStatement implements Statement {
|
|||
if (isClosed) {
|
||||
throw new SQLException("Invalid method call on a closed statement.");
|
||||
}
|
||||
|
||||
|
||||
return this.affectedRows;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ public class TestPreparedStatement {
|
|||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/?user=root&password=taosdata", properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
|
||||
String rawSql = "select * from test.log0601";
|
||||
// String[] params = new String[]{"ts", "c1"};
|
||||
PreparedStatement pstmt = (TSDBPreparedStatement) connection.prepareStatement(rawSql);
|
||||
|
|
|
@ -13,7 +13,7 @@ public class TestTSDBDatabaseMetaData {
|
|||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/?user=root&password=taosdata", properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
|
||||
dbMetaData = connection.getMetaData();
|
||||
resSet = dbMetaData.getCatalogs();
|
||||
while(resSet.next()) {
|
||||
|
|
|
@ -3,7 +3,6 @@ import com.taosdata.jdbc.TSDBDriver;
|
|||
import com.taosdata.jdbc.TSDBResultSet;
|
||||
import com.taosdata.jdbc.TSDBSubscribe;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.util.Properties;
|
||||
|
||||
|
@ -17,12 +16,10 @@ public class TestTSDBSubscribe {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
String cs = String.format("jdbc:TAOS://%s:0/%s?user=root&password=taosdata", host, database);
|
||||
String cs = String.format("jdbc:TAOS://%s:0/%s", host, database);
|
||||
return (TSDBConnection)DriverManager.getConnection(cs, properties);
|
||||
}
|
||||
|
||||
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String usage = "java -Djava.ext.dirs=../ TestTSDBSubscribe [-host host] <-db database> <-topic topic> <-sql sql>";
|
||||
if (args.length < 2) {
|
||||
|
|
|
@ -40,8 +40,7 @@ public class BatchInsertTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
|
|
|
@ -29,16 +29,14 @@ public class ConnectionTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
assertTrue(null != connection);
|
||||
statement = connection.createStatement();
|
||||
assertTrue(null != statement);
|
||||
|
||||
// try reconnect
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
try {
|
||||
statement.execute("create database if not exists " + dbName);
|
||||
|
|
|
@ -26,8 +26,7 @@ public class DatabaseMetaDataTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata",
|
||||
properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
String sql = "drop database if exists " + dbName;
|
||||
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
|
||||
|
|
|
@ -28,8 +28,7 @@ public class ImportTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
|
|
|
@ -33,8 +33,7 @@ public class PreparedStatementTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata",
|
||||
properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/",properties);
|
||||
|
||||
String sql = "drop database if exists " + dbName;
|
||||
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
|
||||
|
|
|
@ -35,8 +35,7 @@ public class ResultSetTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
|
|
|
@ -28,8 +28,7 @@ public class SelectTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
|
|
|
@ -31,8 +31,7 @@ public class StableTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("create database if not exists " + dbName);
|
||||
|
|
|
@ -30,8 +30,7 @@ public class StatementTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("drop database if exists " + dbName);
|
||||
|
|
|
@ -32,8 +32,7 @@ public class SubscribeTest extends BaseTest {
|
|||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
|
||||
statement = connection.createStatement();
|
||||
statement.executeUpdate("create database if not exists " + dbName);
|
||||
|
|
|
@ -12,7 +12,7 @@ public class TSDBDriverTest {
|
|||
@Test
|
||||
public void urlParserTest() throws SQLException {
|
||||
TSDBDriver driver = new TSDBDriver();
|
||||
String url = "jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password";
|
||||
String url = "jdbc:TSDB://127.0.0.1:0/db";
|
||||
|
||||
Properties properties = new Properties();
|
||||
driver.parseURL(url, properties);
|
||||
|
|
|
@ -6,6 +6,10 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC)
|
|||
|
||||
IF (TD_LINUX)
|
||||
ADD_LIBRARY(tcq ${SRC})
|
||||
TARGET_LINK_LIBRARIES(tcq tutil common taos)
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(tcq tutil common taos_static)
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(tcq tutil common taos)
|
||||
ENDIF ()
|
||||
ADD_SUBDIRECTORY(test)
|
||||
ENDIF ()
|
||||
|
|
|
@ -11,7 +11,11 @@ AUX_SOURCE_DIRECTORY(src SRC)
|
|||
|
||||
IF (TD_LINUX)
|
||||
ADD_EXECUTABLE(taosd ${SRC})
|
||||
TARGET_LINK_LIBRARIES(taosd mnode taos monitor http mqtt tsdb twal vnode cJson lz4 balance sync)
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(taosd mnode taos_static monitor http mqtt tsdb twal vnode cJson lz4 balance sync)
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(taosd mnode taos monitor http mqtt tsdb twal vnode cJson lz4 balance sync)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_ACCOUNT)
|
||||
TARGET_LINK_LIBRARIES(taosd account)
|
||||
|
@ -35,4 +39,4 @@ IF (TD_LINUX)
|
|||
COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||
COMMENT "prepare taosd environment")
|
||||
ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${TD_EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD})
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
|
|
@ -165,6 +165,13 @@ int32_t dnodeInitMgmtTimer() {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void dnodeSendStatusMsgToMnode() {
|
||||
if (tsDnodeTmr != NULL && tsStatusTimer != NULL) {
|
||||
dInfo("force send status msg to mnode");
|
||||
taosTmrReset(dnodeSendStatusMsg, 3, NULL, tsDnodeTmr, &tsStatusTimer);
|
||||
}
|
||||
}
|
||||
|
||||
void dnodeCleanupMgmtTimer() {
|
||||
if (tsStatusTimer != NULL) {
|
||||
taosTmrStopA(&tsStatusTimer);
|
||||
|
@ -717,6 +724,9 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb);
|
||||
tstrncpy(pStatus->clusterCfg.arbitrator, tsArbitrator, TSDB_EP_LEN);
|
||||
tstrncpy(pStatus->clusterCfg.timezone, tsTimezone, 64);
|
||||
pStatus->clusterCfg.checkTime = 0;
|
||||
char timestr[32] = "1970-01-01 00:00:00.00";
|
||||
(void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
||||
tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN);
|
||||
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
|||
if (pMsg->pCont == NULL) return;
|
||||
|
||||
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) {
|
||||
rspMsg.code = TSDB_CODE_RPC_NOT_READY;
|
||||
rspMsg.code = TSDB_CODE_APP_NOT_READY;
|
||||
rpcSendResponse(&rspMsg);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
dDebug("RPC %p, msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]);
|
||||
|
|
|
@ -119,7 +119,7 @@ void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
|||
|
||||
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) {
|
||||
dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]);
|
||||
rpcMsg.code = TSDB_CODE_RPC_NOT_READY;
|
||||
rpcMsg.code = TSDB_CODE_APP_NOT_READY;
|
||||
rpcSendResponse(&rpcMsg);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
return;
|
||||
|
@ -144,7 +144,7 @@ void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
|||
|
||||
static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey) {
|
||||
int code = mnodeRetriveAuth(user, spi, encrypt, secret, ckey);
|
||||
if (code != TSDB_CODE_RPC_NOT_READY) return code;
|
||||
if (code != TSDB_CODE_APP_NOT_READY) return code;
|
||||
|
||||
SDMAuthMsg *pMsg = rpcMallocCont(sizeof(SDMAuthMsg));
|
||||
tstrncpy(pMsg->user, user, sizeof(pMsg->user));
|
||||
|
|
|
@ -119,11 +119,8 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
|
||||
syslog(LOG_INFO, "Started TDengine service successfully.");
|
||||
|
||||
for (int res = tsem_wait(&exitSem); res != 0; res = tsem_wait(&exitSem)) {
|
||||
if (res != EINTR) {
|
||||
syslog(LOG_ERR, "failed to wait exit semphore: %d", res);
|
||||
break;
|
||||
}
|
||||
if (tsem_wait(&exitSem) != 0) {
|
||||
syslog(LOG_ERR, "failed to wait exit semphore: %s", strerror(errno));
|
||||
}
|
||||
|
||||
dnodeCleanUpSystem();
|
||||
|
|
|
@ -65,6 +65,8 @@ void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code);
|
|||
void dnodeReprocessMnodeWriteMsg(void *pMsg);
|
||||
void dnodeDelayReprocessMnodeWriteMsg(void *pMsg);
|
||||
|
||||
void dnodeSendStatusMsgToMnode();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -93,7 +93,7 @@ DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision
|
|||
DLL_EXPORT void taos_free_result(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_field_count(TAOS_RES *tres);
|
||||
DLL_EXPORT int taos_num_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_affected_rows(TAOS_RES *taos);
|
||||
DLL_EXPORT int taos_affected_rows(TAOS_RES *res);
|
||||
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
|
||||
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
|
|
|
@ -63,7 +63,7 @@ typedef struct tstr {
|
|||
extern const int32_t TYPE_BYTES[11];
|
||||
// TODO: replace and remove code below
|
||||
#define CHAR_BYTES sizeof(char)
|
||||
#define SHORT_BYTES sizeof(short)
|
||||
#define SHORT_BYTES sizeof(int16_t)
|
||||
#define INT_BYTES sizeof(int)
|
||||
#define LONG_BYTES sizeof(int64_t)
|
||||
#define FLOAT_BYTES sizeof(float)
|
||||
|
@ -286,7 +286,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
|
||||
#define TSDB_MAX_REPLICA 5
|
||||
|
||||
#define TSDB_TBNAME_COLUMN_INDEX (-1)
|
||||
#define TSDB_TBNAME_COLUMN_INDEX (-1)
|
||||
#define TSDB_UD_COLUMN_INDEX (-100)
|
||||
#define TSDB_MULTI_METERMETA_MAX_NUM 100000 // maximum batch size allowed to load metermeta
|
||||
|
||||
#define TSDB_MIN_CACHE_BLOCK_SIZE 1
|
||||
|
@ -395,6 +396,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
#define TSDB_PORT_DNODEDNODE 5
|
||||
#define TSDB_PORT_SYNC 10
|
||||
#define TSDB_PORT_HTTP 11
|
||||
#define TSDB_PORT_ARBITRATOR 12
|
||||
|
||||
#define TAOS_QTYPE_RPC 0
|
||||
#define TAOS_QTYPE_FWD 1
|
||||
|
|
|
@ -65,6 +65,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_SESSION_ID, 0, 0x0010, "Invalid se
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_MSG_TYPE, 0, 0x0011, "Invalid message type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_RESPONSE_TYPE, 0, 0x0012, "Invalid response type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, 0, 0x0013, "Invalid timestamp")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, 0, 0x0014, "Database not ready")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, 0, 0x0015, "Unable to resolve FQDN")
|
||||
|
||||
//common & util
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_COM_OPS_NOT_SUPPORT, 0, 0x0100, "Operation not supported")
|
||||
|
@ -95,6 +97,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_APP_ERROR, 0, 0x0211, "Applicatio
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ACTION_IN_PROGRESS, 0, 0x0212, "Action in progress")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DISCONNECTED, 0, 0x0213, "Disconnected from service")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_WRITE_AUTH, 0, 0x0214, "No write permission")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection killed")
|
||||
|
||||
// mnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, 0, 0x0300, "Message not processed")
|
||||
|
@ -137,6 +140,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_READY, 0, 0x033C, "Cluster no
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "Account already exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "Invalid account")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0342, "Invalid account options")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_EXPIRED, 0, 0x0343, "Account authorization has expired")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_ALREADY_EXIST, 0, 0x0350, "User already exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER, 0, 0x0351, "Invalid user")
|
||||
|
@ -184,7 +188,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, 0, 0x0506, "No write p
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing data file")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_STATUS, 0, 0x0510, "Database not ready")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied")
|
||||
|
||||
|
@ -220,6 +223,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, 0, 0x0706, "Tag condit
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, 0, 0x0707, "Query not ready")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, 0, 0x0708, "Query should response")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, 0, 0x0709, "Multiple retrieval of this query")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, 0, 0x070A, "Too many time window in query")
|
||||
|
||||
// grant
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "License expired")
|
||||
|
|
|
@ -133,7 +133,7 @@ enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_MODULE,
|
||||
TSDB_MGMT_TABLE_QUERIES,
|
||||
TSDB_MGMT_TABLE_STREAMS,
|
||||
TSDB_MGMT_TABLE_CONFIGS,
|
||||
TSDB_MGMT_TABLE_VARIABLES,
|
||||
TSDB_MGMT_TABLE_CONNS,
|
||||
TSDB_MGMT_TABLE_SCORES,
|
||||
TSDB_MGMT_TABLE_GRANTS,
|
||||
|
@ -167,9 +167,9 @@ enum _mgmt_table {
|
|||
#define TSDB_VN_WRITE_ACCCESS ((char)0x2)
|
||||
#define TSDB_VN_ALL_ACCCESS (TSDB_VN_READ_ACCCESS | TSDB_VN_WRITE_ACCCESS)
|
||||
|
||||
#define TSDB_COL_NORMAL 0x0u
|
||||
#define TSDB_COL_TAG 0x1u
|
||||
#define TSDB_COL_JOIN 0x2u
|
||||
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
|
||||
#define TSDB_COL_TAG 0x1u // the tag column type
|
||||
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
|
||||
|
||||
extern char *taosMsg[];
|
||||
|
||||
|
@ -424,7 +424,10 @@ typedef struct SColumnInfo {
|
|||
int16_t type;
|
||||
int16_t bytes;
|
||||
int16_t numOfFilters;
|
||||
SColumnFilterInfo *filters;
|
||||
union{
|
||||
int64_t placeholder;
|
||||
SColumnFilterInfo *filters;
|
||||
};
|
||||
} SColumnInfo;
|
||||
|
||||
typedef struct STableIdInfo {
|
||||
|
@ -575,6 +578,7 @@ typedef struct {
|
|||
int32_t maxVgroupsPerDb;
|
||||
char arbitrator[TSDB_EP_LEN]; // tsArbitrator
|
||||
char timezone[64]; // tsTimezone
|
||||
int64_t checkTime; // 1970-01-01 00:00:00.000
|
||||
char locale[TSDB_LOCALE_LEN]; // tsLocale
|
||||
char charset[TSDB_LOCALE_LEN]; // tsCharset
|
||||
} SClusterCfg;
|
||||
|
|
|
@ -115,7 +115,7 @@ int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId);
|
|||
int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
|
||||
TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
|
||||
|
||||
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int32_t *size);
|
||||
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size);
|
||||
|
||||
// the TSDB repository info
|
||||
typedef struct STsdbRepoInfo {
|
||||
|
|
|
@ -64,7 +64,7 @@ typedef struct {
|
|||
if name is provided(name[0] is not zero), get the named file at the specified index. If not there, return
|
||||
zero. If it is there, set the size to file size, and return file magic number. Index shall not be updated.
|
||||
*/
|
||||
typedef uint32_t (*FGetFileInfo)(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int32_t *size, uint64_t *fversion);
|
||||
typedef uint32_t (*FGetFileInfo)(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion);
|
||||
|
||||
// get the wal file from index or after
|
||||
// return value, -1: error, 1:more wal files, 0:last WAL. if name[0]==0, no WAL file
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
#define TK_QUERIES 51
|
||||
#define TK_CONNECTIONS 52
|
||||
#define TK_STREAMS 53
|
||||
#define TK_CONFIGS 54
|
||||
#define TK_VARIABLES 54
|
||||
#define TK_SCORES 55
|
||||
#define TK_GRANTS 56
|
||||
#define TK_VNODES 57
|
||||
|
|
|
@ -11,11 +11,11 @@ IF (TD_LINUX)
|
|||
LIST(REMOVE_ITEM SRC ./src/shellDarwin.c)
|
||||
ADD_EXECUTABLE(shell ${SRC})
|
||||
|
||||
# IF (TD_PAGMODE_LITE)
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(shell taos_static)
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(shell taos)
|
||||
# ELSE ()
|
||||
# TARGET_LINK_LIBRARIES(shell taos_static)
|
||||
# ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
|
||||
ELSEIF (TD_WINDOWS)
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include "taosdef.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
#include <regex.h>
|
||||
|
||||
/**************** Global variables ****************/
|
||||
|
@ -64,11 +66,6 @@ TAOS *shellInit(SShellArguments *args) {
|
|||
}
|
||||
|
||||
taos_init();
|
||||
/*
|
||||
* set tsTableMetaKeepTimer = 3000ms
|
||||
* means not save cache in shell
|
||||
*/
|
||||
tsTableMetaKeepTimer = 3000;
|
||||
|
||||
// Connect to the database.
|
||||
TAOS *con = NULL;
|
||||
|
@ -287,7 +284,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
st = taosGetTimestampUs();
|
||||
|
||||
TAOS_RES* pSql = taos_query(con, command);
|
||||
result = pSql; // set it into the global variable
|
||||
atomic_store_ptr(&result, pSql); // set the global TAOS_RES pointer
|
||||
|
||||
if (taos_errno(pSql)) {
|
||||
taos_error(pSql);
|
||||
|
@ -298,17 +295,16 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
fprintf(stdout, "Database changed.\n\n");
|
||||
fflush(stdout);
|
||||
|
||||
result = NULL;
|
||||
atomic_store_ptr(&result, 0);
|
||||
taos_free_result(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
int num_fields = taos_field_count(pSql);
|
||||
if (num_fields != 0) { // select and show kinds of commands
|
||||
if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands
|
||||
int error_no = 0;
|
||||
int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode);
|
||||
if (numOfRows < 0) {
|
||||
result = NULL;
|
||||
atomic_store_ptr(&result, 0);
|
||||
taos_free_result(pSql);
|
||||
return;
|
||||
}
|
||||
|
@ -331,7 +327,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
wordfree(&full_path);
|
||||
}
|
||||
|
||||
result = NULL;
|
||||
atomic_store_ptr(&result, 0);
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
|
@ -497,7 +493,6 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) {
|
|||
} while( row != NULL);
|
||||
|
||||
result = NULL;
|
||||
//taos_free_result(tres);
|
||||
fclose(fp);
|
||||
|
||||
return numOfRows;
|
||||
|
@ -802,8 +797,8 @@ void write_history() {
|
|||
}
|
||||
|
||||
void taos_error(TAOS_RES *tres) {
|
||||
atomic_store_ptr(&result, 0);
|
||||
fprintf(stderr, "\nDB error: %s\n", taos_errstr(tres));
|
||||
result = NULL;
|
||||
taos_free_result(tres);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
|
||||
pthread_t pid;
|
||||
|
||||
// TODO: IMPLEMENT INTERRUPT HANDLER.
|
||||
void interruptHandler(int signum) {
|
||||
void shellQueryInterruptHandler(int signum) {
|
||||
#ifdef LINUX
|
||||
taos_stop_query(result);
|
||||
result = NULL;
|
||||
void* pResHandle = atomic_val_compare_exchange_64(&result, result, 0);
|
||||
taos_stop_query(pResHandle);
|
||||
#else
|
||||
printf("\nReceive ctrl+c or other signal, quit shell.\n");
|
||||
exit(0);
|
||||
|
@ -86,7 +85,7 @@ int main(int argc, char* argv[]) {
|
|||
struct sigaction act;
|
||||
memset(&act, 0, sizeof(struct sigaction));
|
||||
|
||||
act.sa_handler = interruptHandler;
|
||||
act.sa_handler = shellQueryInterruptHandler;
|
||||
sigaction(SIGTERM, &act, NULL);
|
||||
sigaction(SIGINT, &act, NULL);
|
||||
|
||||
|
|
|
@ -8,11 +8,11 @@ IF (TD_LINUX)
|
|||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
ADD_EXECUTABLE(taosdemo ${SRC})
|
||||
|
||||
# IF (TD_PAGMODE_LITE)
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos_static)
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos)
|
||||
# ELSE ()
|
||||
# TARGET_LINK_LIBRARIES(taosdemo taos_static)
|
||||
# ENDIF ()
|
||||
ENDIF ()
|
||||
ELSEIF (TD_WINDOWS)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
ADD_EXECUTABLE(taosdemo ${SRC})
|
||||
|
|
|
@ -57,29 +57,30 @@ extern char configDir[];
|
|||
|
||||
/* Used by main to communicate with parse_opt. */
|
||||
typedef struct DemoArguments {
|
||||
char *host;
|
||||
uint16_t port;
|
||||
char *user;
|
||||
char *password;
|
||||
char *database;
|
||||
char *tb_prefix;
|
||||
char *sqlFile;
|
||||
bool use_metric;
|
||||
bool insert_only;
|
||||
char *output_file;
|
||||
int mode;
|
||||
char *datatype[MAX_NUM_DATATYPE+1];
|
||||
int len_of_binary;
|
||||
int num_of_CPR;
|
||||
int num_of_threads;
|
||||
int num_of_RPR;
|
||||
int num_of_tables;
|
||||
int num_of_DPT;
|
||||
int abort;
|
||||
int order;
|
||||
int rate;
|
||||
int method_of_delete;
|
||||
char **arg_list;
|
||||
char * host;
|
||||
uint16_t port;
|
||||
char * user;
|
||||
char * password;
|
||||
char * database;
|
||||
int replica;
|
||||
char * tb_prefix;
|
||||
char * sqlFile;
|
||||
bool use_metric;
|
||||
bool insert_only;
|
||||
char * output_file;
|
||||
int mode;
|
||||
char * datatype[MAX_NUM_DATATYPE + 1];
|
||||
int len_of_binary;
|
||||
int num_of_CPR;
|
||||
int num_of_threads;
|
||||
int num_of_RPR;
|
||||
int num_of_tables;
|
||||
int num_of_DPT;
|
||||
int abort;
|
||||
int order;
|
||||
int rate;
|
||||
int method_of_delete;
|
||||
char ** arg_list;
|
||||
} SDemoArguments;
|
||||
|
||||
#ifdef LINUX
|
||||
|
@ -90,6 +91,7 @@ typedef struct DemoArguments {
|
|||
{0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
|
||||
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
|
||||
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
|
||||
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
|
||||
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
|
||||
{0, 's', "sql file", 0, "The select sql file.", 3},
|
||||
{0, 'M', 0, 0, "Use metric flag.", 13},
|
||||
|
@ -225,6 +227,13 @@ typedef struct DemoArguments {
|
|||
arguments->rate = 10;
|
||||
}
|
||||
break;
|
||||
case 'a':
|
||||
arguments->replica = atoi(arg);
|
||||
if (arguments->replica > 3 || arguments->replica < 1)
|
||||
{
|
||||
arguments->replica = 1;
|
||||
}
|
||||
break;
|
||||
case 'D':
|
||||
arguments->method_of_delete = atoi(arg);
|
||||
if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3)
|
||||
|
@ -273,6 +282,8 @@ typedef struct DemoArguments {
|
|||
printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'.");
|
||||
printf("%s%s\n", indent, "-d");
|
||||
printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'.");
|
||||
printf("%s%s\n", indent, "-a");
|
||||
printf("%s%s%s\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 3.");
|
||||
printf("%s%s\n", indent, "-m");
|
||||
printf("%s%s%s\n", indent, indent, "table_prefix, Table prefix name. Default is 't'.");
|
||||
printf("%s%s\n", indent, "-s");
|
||||
|
@ -396,6 +407,11 @@ typedef struct DemoArguments {
|
|||
if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0)) {
|
||||
arguments->rate = 10;
|
||||
}
|
||||
} else if (strcmp(argv[i], "-a") == 0) {
|
||||
arguments->replica = atoi(argv[++i]);
|
||||
if (arguments->rate > 3 || arguments->rate < 1) {
|
||||
arguments->rate = 1;
|
||||
}
|
||||
} else if (strcmp(argv[i], "-D") == 0) {
|
||||
arguments->method_of_delete = atoi(argv[++i]);
|
||||
if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3) {
|
||||
|
@ -499,6 +515,7 @@ int main(int argc, char *argv[]) {
|
|||
"root", // user
|
||||
"taosdata", // password
|
||||
"test", // database
|
||||
1, // replica
|
||||
"t", // tb_prefix
|
||||
NULL,
|
||||
false, // use_metric
|
||||
|
@ -564,6 +581,7 @@ int main(int argc, char *argv[]) {
|
|||
int count_data_type = 0;
|
||||
char dataString[STRING_LEN];
|
||||
bool do_aggreFunc = true;
|
||||
int replica = arguments.replica;
|
||||
|
||||
if (NULL != arguments.sqlFile) {
|
||||
TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port);
|
||||
|
@ -661,7 +679,7 @@ int main(int argc, char *argv[]) {
|
|||
TAOS_RES* res = taos_query(taos, command);
|
||||
taos_free_result(res);
|
||||
|
||||
sprintf(command, "create database %s;", db_name);
|
||||
sprintf(command, "create database %s replica %d;", db_name, replica);
|
||||
res = taos_query(taos, command);
|
||||
taos_free_result(res);
|
||||
|
||||
|
|
|
@ -27,35 +27,46 @@
|
|||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <wordexp.h>
|
||||
|
||||
#define MAX_PKG_LEN (64*1000)
|
||||
#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
|
||||
#define MAX_PKG_LEN (64*1000)
|
||||
#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
|
||||
#define TEST_FQDN_LEN 128
|
||||
#define TEST_IPv4ADDR_LEN 16
|
||||
|
||||
typedef struct {
|
||||
int port;
|
||||
char *host;
|
||||
uint16_t port;
|
||||
uint32_t hostIp;
|
||||
char fqdn[TEST_FQDN_LEN];
|
||||
uint16_t pktLen;
|
||||
} info_s;
|
||||
|
||||
typedef struct Arguments {
|
||||
char * host;
|
||||
char host[TEST_IPv4ADDR_LEN];
|
||||
char fqdn[TEST_FQDN_LEN];
|
||||
uint16_t port;
|
||||
uint16_t max_port;
|
||||
uint16_t pktLen;
|
||||
} SArguments;
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{0, 'h', "host", 0, "The host to connect to TDEngine. Default is localhost.", 0},
|
||||
{0, 'h', "host ip", 0, "The host ip to connect to TDEngine. Default is localhost.", 0},
|
||||
{0, 'p', "port", 0, "The TCP or UDP port number to use for the connection. Default is 6030.", 1},
|
||||
{0, 'm', "max port", 0, "The max TCP or UDP port number to use for the connection. Default is 6060.", 2},
|
||||
{0, 'm', "max port", 0, "The max TCP or UDP port number to use for the connection. Default is 6042.", 2},
|
||||
{0, 'f', "host fqdn", 0, "The host fqdn to connect to TDEngine.", 3},
|
||||
{0, 'l', "test pkg len", 0, "The len of pkg for test. Default is 1000 Bytes, max not greater than 64k Bytes.\nNotes: This parameter must be consistent between the client and the server.", 3}};
|
||||
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
|
||||
wordexp_t full_path;
|
||||
SArguments *arguments = state->input;
|
||||
switch (key) {
|
||||
case 'h':
|
||||
arguments->host = arg;
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
fprintf(stderr, "Invalid host ip %s\n", arg);
|
||||
return -1;
|
||||
}
|
||||
strcpy(arguments->host, full_path.we_wordv[0]);
|
||||
wordfree(&full_path);
|
||||
break;
|
||||
case 'p':
|
||||
arguments->port = atoi(arg);
|
||||
|
@ -66,6 +77,14 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
case 'l':
|
||||
arguments->pktLen = atoi(arg);
|
||||
break;
|
||||
case 'f':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
fprintf(stderr, "Invalid host fqdn %s\n", arg);
|
||||
return -1;
|
||||
}
|
||||
strcpy(arguments->fqdn, full_path.we_wordv[0]);
|
||||
wordfree(&full_path);
|
||||
break;
|
||||
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
|
@ -76,8 +95,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
static struct argp argp = {options, parse_opt, 0, 0};
|
||||
|
||||
int checkTcpPort(info_s *info) {
|
||||
int port = info->port;
|
||||
char *host = info->host;
|
||||
int clientSocket;
|
||||
|
||||
struct sockaddr_in serverAddr;
|
||||
|
@ -88,21 +105,35 @@ int checkTcpPort(info_s *info) {
|
|||
printf("socket() fail: %s\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(port);
|
||||
|
||||
serverAddr.sin_addr.s_addr = inet_addr(host);
|
||||
// set send and recv overtime
|
||||
struct timeval timeout;
|
||||
timeout.tv_sec = 2; //s
|
||||
timeout.tv_usec = 0; //us
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt send timer failed:");
|
||||
}
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt recv timer failed:");
|
||||
}
|
||||
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(info->port);
|
||||
|
||||
serverAddr.sin_addr.s_addr = info->hostIp;
|
||||
|
||||
//printf("=================================\n");
|
||||
if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) {
|
||||
printf("connect() fail: %s\n", strerror(errno));
|
||||
printf("connect() fail: %s\t", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
//printf("Connect to: %s:%d...success\n", host, port);
|
||||
memset(sendbuf, 0, BUFFER_SIZE);
|
||||
memset(recvbuf, 0, BUFFER_SIZE);
|
||||
|
||||
sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", host, port);
|
||||
struct in_addr ipStr;
|
||||
memcpy(&ipStr, &info->hostIp, 4);
|
||||
sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
|
||||
sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
|
||||
|
||||
send(clientSocket, sendbuf, info->pktLen, 0);
|
||||
|
@ -120,7 +151,7 @@ int checkTcpPort(info_s *info) {
|
|||
if (errno == EINTR) {
|
||||
continue;
|
||||
} else {
|
||||
printf("recv ack pkg from TCP port: %d fail:%s.\n", port, strerror(errno));
|
||||
printf("recv ack pkg from TCP port: %d fail:%s.\n", info->port, strerror(errno));
|
||||
close(clientSocket);
|
||||
return -1;
|
||||
}
|
||||
|
@ -132,7 +163,7 @@ int checkTcpPort(info_s *info) {
|
|||
}
|
||||
|
||||
if (iDataNum < info->pktLen) {
|
||||
printf("recv ack pkg len: %d, less than req pkg len: %d from tcp port: %d\n", iDataNum, info->pktLen, port);
|
||||
printf("recv ack pkg len: %d, less than req pkg len: %d from tcp port: %d\n", iDataNum, info->pktLen, info->port);
|
||||
return -1;
|
||||
}
|
||||
//printf("Read ack pkg len:%d from tcp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
|
||||
|
@ -142,8 +173,6 @@ int checkTcpPort(info_s *info) {
|
|||
}
|
||||
|
||||
int checkUdpPort(info_s *info) {
|
||||
int port = info->port;
|
||||
char *host = info->host;
|
||||
int clientSocket;
|
||||
|
||||
struct sockaddr_in serverAddr;
|
||||
|
@ -154,15 +183,28 @@ int checkUdpPort(info_s *info) {
|
|||
perror("socket");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// set overtime
|
||||
struct timeval timeout;
|
||||
timeout.tv_sec = 2; //s
|
||||
timeout.tv_usec = 0; //us
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt send timer failed:");
|
||||
}
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt recv timer failed:");
|
||||
}
|
||||
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(port);
|
||||
serverAddr.sin_addr.s_addr = inet_addr(host);
|
||||
serverAddr.sin_port = htons(info->port);
|
||||
serverAddr.sin_addr.s_addr = info->hostIp;
|
||||
|
||||
memset(sendbuf, 0, BUFFER_SIZE);
|
||||
memset(recvbuf, 0, BUFFER_SIZE);
|
||||
|
||||
sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", host, port);
|
||||
struct in_addr ipStr;
|
||||
memcpy(&ipStr, &info->hostIp, 4);
|
||||
sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
|
||||
sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
|
||||
|
||||
socklen_t sin_size = sizeof(*(struct sockaddr *)&serverAddr);
|
||||
|
@ -176,7 +218,7 @@ int checkUdpPort(info_s *info) {
|
|||
iDataNum = recvfrom(clientSocket, recvbuf, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size);
|
||||
|
||||
if (iDataNum < info->pktLen) {
|
||||
printf("Read ack pkg len: %d, less than req pkg len: %d from udp port: %d\n", iDataNum, info->pktLen, port);
|
||||
printf("Read ack pkg len: %d, less than req pkg len: %d from udp port: %d\t\t", iDataNum, info->pktLen, info->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -185,10 +227,61 @@ int checkUdpPort(info_s *info) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SArguments arguments = {"127.0.0.1", 6030, 6060, 1000};
|
||||
info_s info;
|
||||
int32_t getIpFromFqdn(const char *fqdn, uint32_t* ip) {
|
||||
struct addrinfo hints = {0};
|
||||
hints.ai_family = AF_UNSPEC;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
|
||||
struct addrinfo *result = NULL;
|
||||
|
||||
int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
|
||||
if (result) {
|
||||
struct sockaddr *sa = result->ai_addr;
|
||||
struct sockaddr_in *si = (struct sockaddr_in*)sa;
|
||||
struct in_addr ia = si->sin_addr;
|
||||
*ip = ia.s_addr;
|
||||
freeaddrinfo(result);
|
||||
return 0;
|
||||
} else {
|
||||
printf("Failed get the ip address from fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uint16_t pktLen) {
|
||||
int ret;
|
||||
info_s info;
|
||||
memset(&info, 0, sizeof(info_s));
|
||||
info.hostIp = hostIp;
|
||||
info.pktLen = pktLen;
|
||||
|
||||
for (uint16_t port = startPort; port <= maxPort; port++) {
|
||||
//printf("test: %s:%d\n", info.host, port);
|
||||
printf("\n");
|
||||
|
||||
info.port = port;
|
||||
ret = checkTcpPort(&info);
|
||||
if (ret != 0) {
|
||||
printf("tcp port:%d test fail.\t\n", port);
|
||||
} else {
|
||||
printf("tcp port:%d test ok.\t\t", port);
|
||||
}
|
||||
|
||||
ret = checkUdpPort(&info);
|
||||
if (ret != 0) {
|
||||
printf("udp port:%d test fail.\t\n", port);
|
||||
} else {
|
||||
printf("udp port:%d test ok.\t\t", port);
|
||||
}
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
return ;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SArguments arguments = {"127.0.0.1", "", 6030, 6042, 1000};
|
||||
int ret;
|
||||
|
||||
argp_parse(&argp, argc, argv, 0, 0, &arguments);
|
||||
if (arguments.pktLen > MAX_PKG_LEN) {
|
||||
|
@ -196,32 +289,25 @@ int main(int argc, char *argv[]) {
|
|||
exit(0);
|
||||
}
|
||||
|
||||
printf("host: %s\tport: %d\tmax_port: %d\tpkgLen: %d\n", arguments.host, arguments.port, arguments.max_port, arguments.pktLen);
|
||||
printf("host ip: %s\thost fqdn: %s\tport: %d\tmax_port: %d\tpkgLen: %d\n", arguments.host, arguments.fqdn, arguments.port, arguments.max_port, arguments.pktLen);
|
||||
|
||||
int port = arguments.port;
|
||||
|
||||
info.host = arguments.host;
|
||||
info.pktLen = arguments.pktLen;
|
||||
|
||||
for (; port <= arguments.max_port; port++) {
|
||||
//printf("test: %s:%d\n", info.host, port);
|
||||
if (arguments.host[0] != 0) {
|
||||
printf("\nstart connect to %s test:\n", arguments.host);
|
||||
checkPort(inet_addr(arguments.host), arguments.port, arguments.max_port, arguments.pktLen);
|
||||
printf("\n");
|
||||
|
||||
info.port = port;
|
||||
ret = checkTcpPort(&info);
|
||||
if (ret != 0) {
|
||||
printf("tcp port:%d test fail.\t\t", port);
|
||||
} else {
|
||||
printf("tcp port:%d test ok.\t\t", port);
|
||||
}
|
||||
|
||||
ret = checkUdpPort(&info);
|
||||
if (ret != 0) {
|
||||
printf("udp port:%d test fail.\t\t", port);
|
||||
} else {
|
||||
printf("udp port:%d test ok.\t\t", port);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
if (arguments.fqdn[0] != 0) {
|
||||
uint32_t hostIp = 0;
|
||||
ret = getIpFromFqdn(arguments.fqdn, &hostIp);
|
||||
if (ret) {
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
printf("\nstart connetc to %s test:\n", arguments.fqdn);
|
||||
checkPort(hostIp, arguments.port, arguments.max_port, arguments.pktLen);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -142,9 +142,9 @@ static void *bindTcpPort(void *sarg) {
|
|||
printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
|
||||
if (iDataNum > 0) {
|
||||
send(client, buffer, iDataNum, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
close(serverSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ static void *bindUdpPort(void *sarg) {
|
|||
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SArguments arguments = {"127.0.0.1", 6030, 6060, 1000};
|
||||
SArguments arguments = {"127.0.0.1", 6030, 6042, 1000};
|
||||
argp_parse(&argp, argc, argv, 0, 0, &arguments);
|
||||
if (arguments.pktLen > MAX_PKG_LEN) {
|
||||
printf("test pkg len overflow: %d, max len not greater than %d bytes\n", arguments.pktLen, MAX_PKG_LEN);
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
static void * tsDbSdb = NULL;
|
||||
static int32_t tsDbUpdateSize;
|
||||
|
||||
static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMsg);
|
||||
static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, SMnodeMsg *pMsg);
|
||||
static int32_t mnodeDropDb(SMnodeMsg *newMsg);
|
||||
static int32_t mnodeSetDbDropping(SDbObj *pDb);
|
||||
static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
|
@ -343,7 +343,7 @@ static int32_t mnodeCreateDbCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMsg) {
|
||||
static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, SMnodeMsg *pMsg) {
|
||||
int32_t code = acctCheck(pAcct, ACCT_GRANT_DB);
|
||||
if (code != 0) return code;
|
||||
|
||||
|
@ -354,7 +354,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
|
|||
mDebug("db:%s, already exist, ignore exist is set", pCreate->db);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
mError("db:%s, is already exist, ignore exist not set", pCreate->db);
|
||||
mError("db:%s, already exist, ignore exist not set", pCreate->db);
|
||||
return TSDB_CODE_MND_DB_ALREADY_EXIST;
|
||||
}
|
||||
}
|
||||
|
@ -393,6 +393,9 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
|
|||
return code;
|
||||
}
|
||||
|
||||
pMsg->pDb = pDb;
|
||||
mnodeIncDbRef(pDb);
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsDbSdb,
|
||||
|
@ -405,6 +408,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
|
|||
code = sdbInsertRow(&oper);
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
mError("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code));
|
||||
pMsg->pDb = NULL;
|
||||
mnodeDestroyDb(pDb);
|
||||
}
|
||||
|
||||
|
@ -1055,10 +1059,12 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||
mError("db:%s, can't drop monitor database", pDrop->db);
|
||||
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t code = mnodeSetDbDropping(pMsg->pDb);
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
|
|
|
@ -49,7 +49,7 @@ static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg);
|
|||
static int32_t mnodeProcessDropDnodeMsg(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg);
|
||||
static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) ;
|
||||
static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *rpcMsg);
|
||||
static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mnodeRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
static int32_t mnodeGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
|
@ -161,8 +161,8 @@ int32_t mnodeInitDnodes() {
|
|||
mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_STATUS, mnodeProcessDnodeStatusMsg);
|
||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_MODULE, mnodeGetModuleMeta);
|
||||
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_MODULE, mnodeRetrieveModules);
|
||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_CONFIGS, mnodeGetConfigMeta);
|
||||
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_CONFIGS, mnodeRetrieveConfigs);
|
||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeGetConfigMeta);
|
||||
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeRetrieveConfigs);
|
||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VNODES, mnodeGetVnodeMeta);
|
||||
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VNODES, mnodeRetrieveVnodes);
|
||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DNODE, mnodeGetDnodeMeta);
|
||||
|
@ -363,10 +363,15 @@ static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
|
|||
mError("\"arbitrator\"[%s - %s] cfg parameters inconsistent", clusterCfg->arbitrator, tsArbitrator);
|
||||
return false;
|
||||
}
|
||||
if (0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) {
|
||||
mError("\"timezone\"[%s - %s] cfg parameters inconsistent", clusterCfg->timezone, tsTimezone);
|
||||
|
||||
int64_t checkTime = 0;
|
||||
char timestr[32] = "1970-01-01 00:00:00.00";
|
||||
(void)taosParseTime(timestr, &checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
||||
if ((0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) && (checkTime != clusterCfg->checkTime)) {
|
||||
mError("\"timezone\"[%s - %s] [%" PRId64 " - %" PRId64"] cfg parameters inconsistent", clusterCfg->timezone, tsTimezone, clusterCfg->checkTime, checkTime);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
|
||||
mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
|
||||
return false;
|
||||
|
@ -513,7 +518,7 @@ static int32_t mnodeCreateDnode(char *ep, SMnodeMsg *pMsg) {
|
|||
SDnodeObj *pDnode = mnodeGetDnodeByEp(ep);
|
||||
if (pDnode != NULL) {
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
mError("dnode:%d is already exist, %s:%d", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodePort);
|
||||
mError("dnode:%d, already exist, %s:%d", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodePort);
|
||||
return TSDB_CODE_MND_DNODE_ALREADY_EXIST;
|
||||
}
|
||||
|
||||
|
@ -728,7 +733,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
|
|||
}
|
||||
|
||||
static bool mnodeCheckModuleInDnode(SDnodeObj *pDnode, int32_t moduleType) {
|
||||
uint32_t status = pDnode->moduleStatus & (1 << moduleType);
|
||||
uint32_t status = pDnode->moduleStatus & (1u << moduleType);
|
||||
return status > 0;
|
||||
}
|
||||
|
||||
|
@ -753,7 +758,7 @@ static int32_t mnodeGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
|
||||
pShow->bytes[cols] = 40 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "end point");
|
||||
strcpy(pSchema[cols].name, "end_point");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -787,7 +792,9 @@ static int32_t mnodeGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
|
||||
int32_t mnodeRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
int32_t numOfRows = 0;
|
||||
char * pWrite;
|
||||
|
||||
char* pWrite;
|
||||
char* moduleName[5] = { "MNODE", "HTTP", "MONITOR", "MQTT", "UNKNOWN" };
|
||||
|
||||
while (numOfRows < rows) {
|
||||
SDnodeObj *pDnode = NULL;
|
||||
|
@ -802,28 +809,18 @@ int32_t mnodeRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pC
|
|||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strncpy(pWrite, pDnode->dnodeEp, pShow->bytes[cols]-1);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDnode->dnodeEp, pShow->bytes[cols] - 1);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
switch (moduleType) {
|
||||
case TSDB_MOD_MNODE:
|
||||
strcpy(pWrite, "mnode");
|
||||
break;
|
||||
case TSDB_MOD_HTTP:
|
||||
strcpy(pWrite, "http");
|
||||
break;
|
||||
case TSDB_MOD_MONITOR:
|
||||
strcpy(pWrite, "monitor");
|
||||
break;
|
||||
default:
|
||||
strcpy(pWrite, "unknown");
|
||||
}
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, moduleName[moduleType], pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
bool enable = mnodeCheckModuleInDnode(pDnode, moduleType);
|
||||
strcpy(pWrite, enable ? "enable" : "disable");
|
||||
|
||||
char* v = enable? "enable":"disable";
|
||||
STR_TO_VARSTR(pWrite, v);
|
||||
cols++;
|
||||
|
||||
numOfRows++;
|
||||
|
@ -857,13 +854,13 @@ static int32_t mnodeGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
|
||||
pShow->bytes[cols] = TSDB_CFG_OPTION_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "config name");
|
||||
tstrncpy(pSchema[cols].name, "name", sizeof(pSchema[cols].name));
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = TSDB_CFG_VALUE_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "config value");
|
||||
tstrncpy(pSchema[cols].name, "value", sizeof(pSchema[cols].name));
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -898,27 +895,32 @@ static int32_t mnodeRetrieveConfigs(SShowObj *pShow, char *data, int32_t rows, v
|
|||
int32_t cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
snprintf(pWrite, TSDB_CFG_OPTION_LEN, "%s", cfg->option);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, cfg->option, TSDB_CFG_OPTION_LEN);
|
||||
|
||||
cols++;
|
||||
int32_t t = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
switch (cfg->valType) {
|
||||
case TAOS_CFG_VTYPE_INT16:
|
||||
snprintf(pWrite, TSDB_CFG_VALUE_LEN, "%d", *((int16_t *)cfg->ptr));
|
||||
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%d", *((int16_t *)cfg->ptr));
|
||||
varDataSetLen(pWrite, t);
|
||||
numOfRows++;
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_INT32:
|
||||
snprintf(pWrite, TSDB_CFG_VALUE_LEN, "%d", *((int32_t *)cfg->ptr));
|
||||
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%d", *((int32_t *)cfg->ptr));
|
||||
varDataSetLen(pWrite, t);
|
||||
numOfRows++;
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_FLOAT:
|
||||
snprintf(pWrite, TSDB_CFG_VALUE_LEN, "%f", *((float *)cfg->ptr));
|
||||
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%f", *((float *)cfg->ptr));
|
||||
varDataSetLen(pWrite, t);
|
||||
numOfRows++;
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_STRING:
|
||||
case TAOS_CFG_VTYPE_IPSTR:
|
||||
case TAOS_CFG_VTYPE_DIRECTORY:
|
||||
snprintf(pWrite, TSDB_CFG_VALUE_LEN, "%s", (char *)cfg->ptr);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, cfg->ptr, TSDB_CFG_VALUE_LEN);
|
||||
numOfRows++;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "tgrant.h"
|
||||
#include "ttimer.h"
|
||||
#include "tglobal.h"
|
||||
#include "mnode.h"
|
||||
#include "dnode.h"
|
||||
#include "mnodeDef.h"
|
||||
#include "mnodeInt.h"
|
||||
|
@ -107,13 +108,18 @@ int32_t mnodeStartSystem() {
|
|||
tsMgmtIsRunning = true;
|
||||
|
||||
mInfo("mnode is initialized successfully");
|
||||
|
||||
sdbUpdateSync();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mnodeInitSystem() {
|
||||
mnodeInitTimer();
|
||||
if (!mnodeNeedStart()) return 0;
|
||||
return mnodeStartSystem();
|
||||
if (mnodeNeedStart()) {
|
||||
return mnodeStartSystem();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mnodeCleanupSystem() {
|
||||
|
@ -159,14 +165,19 @@ static void mnodeCleanupTimer() {
|
|||
|
||||
static bool mnodeNeedStart() {
|
||||
struct stat dirstat;
|
||||
bool fileExist = (stat(tsMnodeDir, &dirstat) == 0);
|
||||
char mnodeFileName[TSDB_FILENAME_LEN * 2] = {0};
|
||||
sprintf(mnodeFileName, "%s/wal/wal0", tsMnodeDir);
|
||||
|
||||
bool fileExist = (stat(mnodeFileName, &dirstat) == 0);
|
||||
bool asMaster = (strcmp(tsFirst, tsLocalEp) == 0);
|
||||
|
||||
if (asMaster || fileExist) {
|
||||
mDebug("mnode module start, asMaster:%d fileExist:%d", asMaster, fileExist);
|
||||
return true;
|
||||
} else {
|
||||
mDebug("mnode module won't start, asMaster:%d fileExist:%d", asMaster, fileExist);
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool mnodeIsRunning() {
|
||||
|
|
|
@ -35,8 +35,8 @@
|
|||
#include "mnodeVgroup.h"
|
||||
#include "mnodeWrite.h"
|
||||
|
||||
#define CONN_KEEP_TIME (tsShellActivityTimer * 3000)
|
||||
#define CONN_CHECK_TIME (tsShellActivityTimer * 2000)
|
||||
#define CONN_KEEP_TIME (tsShellActivityTimer * 3)
|
||||
#define CONN_CHECK_TIME (tsShellActivityTimer * 2)
|
||||
#define QUERY_ID_SIZE 20
|
||||
#define QUERY_STREAM_SAVE_SIZE 20
|
||||
|
||||
|
@ -100,7 +100,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
|
|||
};
|
||||
tstrncpy(connObj.user, user, sizeof(connObj.user));
|
||||
|
||||
SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME);
|
||||
SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME * 1000);
|
||||
|
||||
mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
|
||||
return pConn;
|
||||
|
|
|
@ -224,7 +224,7 @@ void sdbUpdateMnodeRoles() {
|
|||
mnodeUpdateMnodeEpSet();
|
||||
}
|
||||
|
||||
static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int32_t *size, uint64_t *fversion) {
|
||||
static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion) {
|
||||
sdbUpdateMnodeRoles();
|
||||
return 0;
|
||||
}
|
||||
|
@ -291,6 +291,11 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
|
|||
}
|
||||
|
||||
void sdbUpdateSync() {
|
||||
if (!mnodeIsRunning()) {
|
||||
mDebug("mnode not start yet, update sync info later");
|
||||
return;
|
||||
}
|
||||
|
||||
SSyncCfg syncCfg = {0};
|
||||
int32_t index = 0;
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ static char *mnodeGetShowType(int32_t showType) {
|
|||
case TSDB_MGMT_TABLE_MODULE: return "show modules";
|
||||
case TSDB_MGMT_TABLE_QUERIES: return "show queries";
|
||||
case TSDB_MGMT_TABLE_STREAMS: return "show streams";
|
||||
case TSDB_MGMT_TABLE_CONFIGS: return "show configs";
|
||||
case TSDB_MGMT_TABLE_VARIABLES: return "show configs";
|
||||
case TSDB_MGMT_TABLE_CONNS: return "show connections";
|
||||
case TSDB_MGMT_TABLE_SCORES: return "show scores";
|
||||
case TSDB_MGMT_TABLE_GRANTS: return "show grants";
|
||||
|
@ -313,6 +313,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
code = TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
mnodeDecDbRef(pDb);
|
||||
goto connect_over;
|
||||
}
|
||||
mnodeDecDbRef(pDb);
|
||||
|
|
|
@ -294,6 +294,7 @@ static int32_t mnodeChildTableActionRestored() {
|
|||
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
mnodeDecTableRef(pTable);
|
||||
mnodeDecDbRef(pDb);
|
||||
continue;
|
||||
}
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -1259,6 +1260,7 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
|
@ -1323,6 +1325,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1714,7 +1717,8 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
|
|||
(sdbGetVersion() & ((1ul << 16) - 1ul));
|
||||
pTable->superTable = pMsg->pSTable;
|
||||
} else {
|
||||
pTable->uid = (((uint64_t) pTable->createdTime) << 16) + (sdbGetVersion() & ((1ul << 16) - 1ul));
|
||||
pTable->uid = (((uint64_t)pTable->vgId) << 40) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 16) +
|
||||
(sdbGetVersion() & ((1ul << 16) - 1ul));
|
||||
pTable->sversion = 0;
|
||||
pTable->numOfColumns = htons(pCreate->numOfColumns);
|
||||
pTable->sqlLen = htons(pCreate->sqlLen);
|
||||
|
@ -2494,6 +2498,7 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
|
@ -2547,6 +2552,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2715,6 +2721,7 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
|
@ -2767,6 +2774,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -581,8 +581,8 @@ void mnodeDropAllUsers(SAcctObj *pAcct) {
|
|||
int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, char *ckey) {
|
||||
if (!sdbIsMaster()) {
|
||||
*secret = 0;
|
||||
mDebug("user:%s, failed to auth user, reason:%s", user, tstrerror(TSDB_CODE_RPC_NOT_READY));
|
||||
return TSDB_CODE_RPC_NOT_READY;
|
||||
mDebug("user:%s, failed to auth user, reason:%s", user, tstrerror(TSDB_CODE_APP_NOT_READY));
|
||||
return TSDB_CODE_APP_NOT_READY;
|
||||
}
|
||||
|
||||
SUserObj *pUser = mnodeGetUser(user);
|
||||
|
|
|
@ -89,6 +89,7 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) {
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("vgId:%d, db:%s status:%d, in dropping", pVgroup->vgId, pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
|
@ -617,6 +618,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
|
@ -708,6 +710,7 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
|
|||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
mnodeDecDbRef(pDb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -784,7 +787,10 @@ void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
|
|||
if (pTable->sid >= 1) {
|
||||
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
|
||||
pVgroup->numOfTables++;
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
// The create vgroup message may be received later than the create table message
|
||||
// and the writing order in sdb is therefore uncertain
|
||||
// which will cause the reference count of the vgroup to be incorrect when restarting
|
||||
// mnodeIncVgroupRef(pVgroup);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -792,7 +798,10 @@ void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
|
|||
if (pTable->sid >= 1) {
|
||||
taosFreeId(pVgroup->idPool, pTable->sid);
|
||||
pVgroup->numOfTables--;
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
// The create vgroup message may be received later than the create table message
|
||||
// and the writing order in sdb is therefore uncertain
|
||||
// which will cause the reference count of the vgroup to be incorrect when restarting
|
||||
// mnodeDecVgroupRef(pVgroup);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,10 @@ extern "C" {
|
|||
#include "osAlpine.h"
|
||||
#endif
|
||||
|
||||
#ifdef _TD_NINGSI_60_
|
||||
#include "osNingsi.h"
|
||||
#endif
|
||||
|
||||
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
||||
#include "osWindows.h"
|
||||
#endif
|
||||
|
|
|
@ -23,7 +23,7 @@ extern "C" {
|
|||
// TAOS_OS_FUNC_DIR
|
||||
void taosRemoveDir(char *rootDir);
|
||||
int taosMkDir(const char *pathname, mode_t mode);
|
||||
void taosMvDir(char* destDir, char *srcDir);
|
||||
void taosRename(char* oldName, char *newName);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_OS_NINGSI_H
|
||||
#define TDENGINE_OS_NINGSI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define TAOS_OS_FUNC_ATOMIC
|
||||
/*
|
||||
* type __sync_fetch_and_add (type *ptr, type value);
|
||||
* type __sync_fetch_and_sub (type *ptr, type value);
|
||||
* type __sync_fetch_and_or (type *ptr, type value);
|
||||
* type __sync_fetch_and_and (type *ptr, type value);
|
||||
* type __sync_fetch_and_xor (type *ptr, type value);
|
||||
* type __sync_fetch_and_nand (type *ptr, type value);
|
||||
* type __sync_add_and_fetch (type *ptr, type value);
|
||||
* type __sync_sub_and_fetch (type *ptr, type value);
|
||||
* type __sync_or_and_fetch (type *ptr, type value);
|
||||
* type __sync_and_and_fetch (type *ptr, type value);
|
||||
* type __sync_xor_and_fetch (type *ptr, type value);
|
||||
* type __sync_nand_and_fetch (type *ptr, type value);
|
||||
*
|
||||
* bool __sync_bool_compare_and_swap (type*ptr, type oldval, type newval, ...)
|
||||
* type __sync_val_compare_and_swap (type *ptr, type oldval, ?type newval, ...)
|
||||
* */
|
||||
|
||||
#define atomic_load_8(ptr) __sync_fetch_and_add((ptr), 0)
|
||||
#define atomic_load_16(ptr) __sync_fetch_and_add((ptr), 0)
|
||||
#define atomic_load_32(ptr) __sync_fetch_and_add((ptr), 0)
|
||||
#define atomic_load_64(ptr) __sync_fetch_and_add((ptr), 0)
|
||||
#define atomic_load_ptr(ptr) __sync_fetch_and_add((ptr), 0)
|
||||
|
||||
#define atomic_store_8(ptr, val) (*(ptr)=(val))
|
||||
#define atomic_store_16(ptr, val) (*(ptr)=(val))
|
||||
#define atomic_store_32(ptr, val) (*(ptr)=(val))
|
||||
#define atomic_store_64(ptr, val) (*(ptr)=(val))
|
||||
#define atomic_store_ptr(ptr, val) (*(ptr)=(val))
|
||||
|
||||
int8_t atomic_exchange_8_impl(int8_t* ptr, int8_t val );
|
||||
int16_t atomic_exchange_16_impl(int16_t* ptr, int16_t val );
|
||||
int32_t atomic_exchange_32_impl(int32_t* ptr, int32_t val );
|
||||
int64_t atomic_exchange_64_impl(int64_t* ptr, int64_t val );
|
||||
void* atomic_exchange_ptr_impl( void **ptr, void *val );
|
||||
|
||||
#define atomic_exchange_8(ptr, val) atomic_exchange_8_impl((int8_t*)ptr, (int8_t)val)
|
||||
#define atomic_exchange_16(ptr, val) atomic_exchange_16_impl((int16_t*)ptr, (int16_t)val)
|
||||
#define atomic_exchange_32(ptr, val) atomic_exchange_32_impl((int32_t*)ptr, (int32_t)val)
|
||||
#define atomic_exchange_64(ptr, val) atomic_exchange_64_impl((int64_t*)ptr, (int64_t)val)
|
||||
#define atomic_exchange_ptr(ptr, val) atomic_exchange_ptr_impl((void **)ptr, (void*)val)
|
||||
|
||||
#define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
|
||||
#define atomic_val_compare_exchange_16 __sync_val_compare_and_swap
|
||||
#define atomic_val_compare_exchange_32 __sync_val_compare_and_swap
|
||||
#define atomic_val_compare_exchange_64 __sync_val_compare_and_swap
|
||||
#define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap
|
||||
|
||||
#define atomic_add_fetch_8(ptr, val) __sync_add_and_fetch((ptr), (val))
|
||||
#define atomic_add_fetch_16(ptr, val) __sync_add_and_fetch((ptr), (val))
|
||||
#define atomic_add_fetch_32(ptr, val) __sync_add_and_fetch((ptr), (val))
|
||||
#define atomic_add_fetch_64(ptr, val) __sync_add_and_fetch((ptr), (val))
|
||||
#define atomic_add_fetch_ptr(ptr, val) __sync_add_and_fetch((ptr), (val))
|
||||
|
||||
#define atomic_fetch_add_8(ptr, val) __sync_fetch_and_add((ptr), (val))
|
||||
#define atomic_fetch_add_16(ptr, val) __sync_fetch_and_add((ptr), (val))
|
||||
#define atomic_fetch_add_32(ptr, val) __sync_fetch_and_add((ptr), (val))
|
||||
#define atomic_fetch_add_64(ptr, val) __sync_fetch_and_add((ptr), (val))
|
||||
#define atomic_fetch_add_ptr(ptr, val) __sync_fetch_and_add((ptr), (val))
|
||||
|
||||
#define atomic_sub_fetch_8(ptr, val) __sync_sub_and_fetch((ptr), (val))
|
||||
#define atomic_sub_fetch_16(ptr, val) __sync_sub_and_fetch((ptr), (val))
|
||||
#define atomic_sub_fetch_32(ptr, val) __sync_sub_and_fetch((ptr), (val))
|
||||
#define atomic_sub_fetch_64(ptr, val) __sync_sub_and_fetch((ptr), (val))
|
||||
#define atomic_sub_fetch_ptr(ptr, val) __sync_sub_and_fetch((ptr), (val))
|
||||
|
||||
#define atomic_fetch_sub_8(ptr, val) __sync_fetch_and_sub((ptr), (val))
|
||||
#define atomic_fetch_sub_16(ptr, val) __sync_fetch_and_sub((ptr), (val))
|
||||
#define atomic_fetch_sub_32(ptr, val) __sync_fetch_and_sub((ptr), (val))
|
||||
#define atomic_fetch_sub_64(ptr, val) __sync_fetch_and_sub((ptr), (val))
|
||||
#define atomic_fetch_sub_ptr(ptr, val) __sync_fetch_and_sub((ptr), (val))
|
||||
|
||||
#define atomic_and_fetch_8(ptr, val) __sync_and_and_fetch((ptr), (val))
|
||||
#define atomic_and_fetch_16(ptr, val) __sync_and_and_fetch((ptr), (val))
|
||||
#define atomic_and_fetch_32(ptr, val) __sync_and_and_fetch((ptr), (val))
|
||||
#define atomic_and_fetch_64(ptr, val) __sync_and_and_fetch((ptr), (val))
|
||||
#define atomic_and_fetch_ptr(ptr, val) __sync_and_and_fetch((ptr), (val))
|
||||
|
||||
#define atomic_fetch_and_8(ptr, val) __sync_fetch_and_and((ptr), (val))
|
||||
#define atomic_fetch_and_16(ptr, val) __sync_fetch_and_and((ptr), (val))
|
||||
#define atomic_fetch_and_32(ptr, val) __sync_fetch_and_and((ptr), (val))
|
||||
#define atomic_fetch_and_64(ptr, val) __sync_fetch_and_and((ptr), (val))
|
||||
#define atomic_fetch_and_ptr(ptr, val) __sync_fetch_and_and((ptr), (val))
|
||||
|
||||
#define atomic_or_fetch_8(ptr, val) __sync_or_and_fetch((ptr), (val))
|
||||
#define atomic_or_fetch_16(ptr, val) __sync_or_and_fetch((ptr), (val))
|
||||
#define atomic_or_fetch_32(ptr, val) __sync_or_and_fetch((ptr), (val))
|
||||
#define atomic_or_fetch_64(ptr, val) __sync_or_and_fetch((ptr), (val))
|
||||
#define atomic_or_fetch_ptr(ptr, val) __sync_or_and_fetch((ptr), (val))
|
||||
|
||||
#define atomic_fetch_or_8(ptr, val) __sync_fetch_and_or((ptr), (val))
|
||||
#define atomic_fetch_or_16(ptr, val) __sync_fetch_and_or((ptr), (val))
|
||||
#define atomic_fetch_or_32(ptr, val) __sync_fetch_and_or((ptr), (val))
|
||||
#define atomic_fetch_or_64(ptr, val) __sync_fetch_and_or((ptr), (val))
|
||||
#define atomic_fetch_or_ptr(ptr, val) __sync_fetch_and_or((ptr), (val))
|
||||
|
||||
#define atomic_xor_fetch_8(ptr, val) __sync_xor_and_fetch((ptr), (val))
|
||||
#define atomic_xor_fetch_16(ptr, val) __sync_xor_and_fetch((ptr), (val))
|
||||
#define atomic_xor_fetch_32(ptr, val) __sync_xor_and_fetch((ptr), (val))
|
||||
#define atomic_xor_fetch_64(ptr, val) __sync_xor_and_fetch((ptr), (val))
|
||||
#define atomic_xor_fetch_ptr(ptr, val) __sync_xor_and_fetch((ptr), (val))
|
||||
|
||||
#define atomic_fetch_xor_8(ptr, val) __sync_fetch_and_xor((ptr), (val))
|
||||
#define atomic_fetch_xor_16(ptr, val) __sync_fetch_and_xor((ptr), (val))
|
||||
#define atomic_fetch_xor_32(ptr, val) __sync_fetch_and_xor((ptr), (val))
|
||||
#define atomic_fetch_xor_64(ptr, val) __sync_fetch_and_xor((ptr), (val))
|
||||
#define atomic_fetch_xor_ptr(ptr, val) __sync_fetch_and_xor((ptr), (val))
|
||||
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue