diff --git a/.gitignore b/.gitignore
index c5d90eea50..e6e327327c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -66,6 +66,8 @@ CMakeError.log
/test/cfg
/src/.vs
*.o
+version.c
+taos.rc
src/connector/jdbc/.settings/
tests/comparisonTest/cassandra/cassandratest/.classpath
tests/comparisonTest/cassandra/cassandratest/.project
diff --git a/CMakeLists.txt b/CMakeLists.txt
index bc6a888f9d..565ab32f00 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -18,6 +18,7 @@ SET(TD_COVER FALSE)
SET(TD_MEM_CHECK FALSE)
SET(TD_PAGMODE_LITE FALSE)
+SET(TD_SOMODE_STATIC FALSE)
SET(TD_GODLL FALSE)
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
@@ -27,6 +28,7 @@ INCLUDE(cmake/input.inc)
INCLUDE(cmake/platform.inc)
INCLUDE(cmake/define.inc)
INCLUDE(cmake/env.inc)
+INCLUDE(cmake/version.inc)
INCLUDE(cmake/install.inc)
ADD_SUBDIRECTORY(deps)
diff --git a/README.md b/README.md
index 2d84389f78..8f29adf89b 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,7 @@
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
+[](https://hub.docker.com/repository/docker/tdengine/tdengine)
[](https://www.taosdata.com)
diff --git a/cmake/define.inc b/cmake/define.inc
index c72995159f..a63b9f1732 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -77,6 +77,11 @@ IF (TD_LINUX)
ADD_DEFINITIONS(-D_LINUX)
ADD_DEFINITIONS(-D_TD_LINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
+
+ IF (TD_NINGSI_60)
+ ADD_DEFINITIONS(-D_TD_NINGSI_60_)
+ MESSAGE(STATUS "set ningsi macro to true")
+ ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
@@ -110,7 +115,7 @@ IF (TD_WINDOWS)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
- SET(COMMON_FLAGS "/nologo /WX /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
+ SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /GL")
ENDIF ()
diff --git a/cmake/input.inc b/cmake/input.inc
index e963e20240..0235ba42d3 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -22,6 +22,11 @@ IF (${PAGMODE} MATCHES "lite")
MESSAGE(STATUS "Build with pagmode lite")
ENDIF ()
+IF (${SOMODE} MATCHES "static")
+ SET(TD_SOMODE_STATIC TRUE)
+ MESSAGE(STATUS "Link so using static mode")
+ENDIF ()
+
IF (${DLLTYPE} MATCHES "go")
SET(TD_GODLL TRUE)
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
diff --git a/cmake/platform.inc b/cmake/platform.inc
index 71eee9d507..7834a35411 100755
--- a/cmake/platform.inc
+++ b/cmake/platform.inc
@@ -22,6 +22,9 @@ SET(TD_LINUX FALSE)
SET(TD_MIPS_64 FALSE)
SET(TD_MIPS_32 FALSE)
SET(TD_APLHINE FALSE)
+ SET(TD_NINGSI FALSE)
+ SET(TD_NINGSI_60 FALSE)
+ SET(TD_NINGSI_80 FALSE)
SET(TD_WINDOWS FALSE)
SET(TD_WINDOWS_64 FALSE)
SET(TD_WINDOWS_32 FALSE)
@@ -99,3 +102,18 @@ ELSEIF (${CPUTYPE} MATCHES "x86")
ELSE ()
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
ENDIF ()
+
+# cmake -DOSTYPE=Ningsi
+IF (${OSTYPE} MATCHES "Ningsi60")
+ SET(TD_NINGSI TRUE)
+ SET(TD_NINGSI_60 TRUE)
+ MESSAGE(STATUS "input osType: Ningsi60")
+ELSEIF (${OSTYPE} MATCHES "Ningsi80")
+ SET(TD_NINGSI TRUE)
+ SET(TD_NINGSI_80 TRUE)
+ MESSAGE(STATUS "input osType: Ningsi80")
+ELSEIF (${OSTYPE} MATCHES "Linux")
+ MESSAGE(STATUS "input osType: Linux")
+ELSE ()
+ MESSAGE(STATUS "input osType unknown: " ${OSTYPE})
+ENDIF ()
\ No newline at end of file
diff --git a/cmake/version.inc b/cmake/version.inc
new file mode 100644
index 0000000000..c620d753a6
--- /dev/null
+++ b/cmake/version.inc
@@ -0,0 +1,69 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+PROJECT(TDengine)
+
+IF (DEFINED VERNUMBER)
+ SET(TD_VER_NUMBER ${VERNUMBER})
+ELSE ()
+ SET(TD_VER_NUMBER "2.0.2.0")
+ENDIF ()
+
+IF (DEFINED VERCOMPATIBLE)
+ SET(TD_VER_COMPATIBLE ${VERCOMPATIBLE})
+ELSE ()
+ SET(TD_VER_COMPATIBLE "2.0.0.0")
+ENDIF ()
+
+IF (DEFINED GITINFO)
+ SET(TD_VER_GIT ${GITINFO})
+ELSE ()
+ SET(TD_VER_GIT "community")
+ENDIF ()
+
+IF (DEFINED GITINFOI)
+ SET(TD_VER_GIT_INTERNAL ${GITINFOI})
+ELSE ()
+ SET(TD_VER_GIT_INTERNAL "internal")
+ENDIF ()
+
+IF (DEFINED VERDATE)
+ SET(TD_VER_DATE ${VERDATE})
+ELSE ()
+ STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
+ENDIF ()
+
+IF (DEFINED VERTYPE)
+ SET(TD_VER_VERTYPE ${VERTYPE})
+ELSE ()
+ SET(TD_VER_VERTYPE "stable")
+ENDIF ()
+
+IF (DEFINED CPUTYPE)
+ SET(TD_VER_CPUTYPE ${CPUTYPE})
+ELSE ()
+ IF (TD_WINDOWS_32)
+ SET(TD_VER_CPUTYPE "x86")
+ ELSE ()
+ SET(TD_VER_CPUTYPE "x64")
+ ENDIF ()
+ENDIF ()
+
+IF (DEFINED OSTYPE)
+ SET(TD_VER_OSTYPE ${OSTYPE})
+ELSE ()
+ SET(TD_VER_OSTYPE "Linux")
+ENDIF ()
+
+MESSAGE(STATUS "============= compile version parameter information start ============= ")
+MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
+MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
+MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT})
+MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL})
+MESSAGE(STATUS "build date:" ${TD_VER_DATE})
+MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
+MESSAGE(STATUS "ver cpu:" ${TD_VER_CPUTYPE})
+MESSAGE(STATUS "os type:" ${TD_VER_OSTYPE})
+MESSAGE(STATUS "============= compile version parameter information end ============= ")
+
+STRING(REPLACE "." "_" TD_LIB_VER_NUMBER ${TD_VER_NUMBER})
+
+CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/util/src/version.c.in" "${TD_COMMUNITY_DIR}/src/util/src/version.c")
diff --git a/deps/MsvcLibX/CMakeLists.txt b/deps/MsvcLibX/CMakeLists.txt
index fc77a3b447..4428579e1c 100644
--- a/deps/MsvcLibX/CMakeLists.txt
+++ b/deps/MsvcLibX/CMakeLists.txt
@@ -4,5 +4,5 @@ PROJECT(TDengine)
IF (TD_WINDOWS)
INCLUDE_DIRECTORIES(include)
AUX_SOURCE_DIRECTORY(src SRC)
- ADD_LIBRARY(MsvcLibXw64 ${SRC})
+ ADD_LIBRARY(MsvcLibXw ${SRC})
ENDIF ()
diff --git a/deps/MsvcLibX/include/msvclibx.h b/deps/MsvcLibX/include/msvclibx.h
index 4f5fc2733b..00953f13d0 100644
--- a/deps/MsvcLibX/include/msvclibx.h
+++ b/deps/MsvcLibX/include/msvclibx.h
@@ -59,7 +59,7 @@
/* Generate the OS-and-debug-mode-specific library name */
#define _MSVCLIBX_LIB "MsvcLibX" _MSVCLIBX_LIB_OS_SUFFIX _MSVCLIBX_LIB_DBG_SUFFIX ".lib"
//#pragma message("Adding pragma comment(lib, \"" _MSVCLIBX_LIB "\")")
-#pragma comment(lib, _MSVCLIBX_LIB)
+//#pragma comment(lib, _MSVCLIBX_LIB)
/* Library-specific routine used internally by many standard routines */
#if defined(_WIN32)
diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/webdocs/markdowndocs/Model-ch.md
index f5776ce6ce..31eb6b3744 100644
--- a/documentation20/webdocs/markdowndocs/Model-ch.md
+++ b/documentation20/webdocs/markdowndocs/Model-ch.md
@@ -9,9 +9,9 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
```cmd
-CREATE DATABASE power KEEP 365 DAYS 10 REPLICA 3 BLOCKS 4;
+CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4;
```
-上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,副本数为3, 内存块数为4。详细的语法及参数请见TAOS SQL
+上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4。详细的语法及参数请见TAOS SQL
创建库之后,需要使用SQL命令USE将当前库切换过来,例如:
@@ -27,13 +27,15 @@ USE power;
- 处于两个不同库的表是不能进行JOIN操作的。
## 创建超级表
-一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的设备创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
+一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
```cmd
CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
```
-与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义,为采集的物理量(示例中为ts, current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 TAOS SQL 一节。
+与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 TAOS SQL 一节。
-每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。一个系统可以有多个DB,一个DB里可以有一到多个超级表。
+每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。
+
+一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。
## 创建表
TDengine对每个数据采集点需要独立建表。与标准的关系型数据一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以表一中的智能电表为例,可以使用如下的SQL命令建表:
@@ -51,5 +53,7 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
```
上述SQL语句将记录(now, 10.2, 219, 0.32) 插入进表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。
-**多列模型**:TDengine支持多列模型,只要这些物理量是同时采集的,这些量就可以作为不同列放在同一张表里。有的数据采集点有多组采集量,每一组的数据采集时间是不一样的,这时需要对同一个采集点建多张表。但还有一种极限的设计,单列模型,无论是否同时采集,每个采集的物理量单独建表。TDengine建议,只要采集时间一致,就采用多列模型,因为插入效率以及存储效率更高。TDengine支持最大的列数为1024列。
+## 多列模型 vs 单列模型
+TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
+TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型就会显得简单。
diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md
index 813d06a660..64cadf69cd 100644
--- a/documentation20/webdocs/markdowndocs/administrator-ch.md
+++ b/documentation20/webdocs/markdowndocs/administrator-ch.md
@@ -82,7 +82,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
- firstEp: taosd启动时,主动连接的集群中第一个dnode的end point, 默认值为localhost:6030。
- secondEp: taosd启动时,如果first连接不上,尝试连接集群中第二个dnode的end point, 默认值为空。
-- fqdn:数据节点的FQDN。如果为空,将自动获取操作系统配置的第一个, 默认值为空。
+- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
@@ -94,7 +94,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
- maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。
-**注意:**对于端口,TDengine会使用从serverPort起12个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030都6041共12个端口,而且必须TCP和UDP都打开。
+**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030都6042共13个端口,而且必须TCP和UDP都打开。
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
@@ -153,10 +153,10 @@ TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同
系统管理员可以在CLI界面里添加、删除用户,也可以修改密码。CLI里SQL语法如下:
```
-CREATE USER PASS <‘password’>;
+CREATE USER PASS <'password'>;
```
-创建用户,并指定用户名和密码,密码需要用单引号引起来
+创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
```
DROP USER ;
@@ -165,10 +165,10 @@ DROP USER ;
删除用户,限root用户使用
```
-ALTER USER PASS <‘password’>;
+ALTER USER PASS <'password'>;
```
-修改用户密码, 为避免被转换为小写,密码需要用单引号引用
+修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```
SHOW USERS;
diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md
index bfe3b55bd2..7ab4b5d096 100644
--- a/documentation20/webdocs/markdowndocs/architecture-ch.md
+++ b/documentation20/webdocs/markdowndocs/architecture-ch.md
@@ -82,7 +82,7 @@ TDengine 分布式架构的逻辑结构图如下:
### 节点之间的通讯
**通讯方式:**TDengine系统的各个节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
-**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过选项“fqdn"进行指定,如果没有指定,系统将自动获取FQDN。如果节点没有配置FQDN,可以直接使用IP地址作为FQDN,但不建议使用,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。
+**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP链接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。
diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md
index afe0272387..097433a18a 100644
--- a/documentation20/webdocs/markdowndocs/cluster-ch.md
+++ b/documentation20/webdocs/markdowndocs/cluster-ch.md
@@ -1,16 +1,46 @@
-#TDengine 集群安装、管理
+# TDengine 集群安装、管理
-多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。
+多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。
-集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令“hostname"获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。
+集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
-##安装、创建第一个节点
+## 准备工作
-集群是由一个一个dnode组成的,是从一个dnode的创建开始的。创建第一个节点很简单,就按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装、启动即可。
+**第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
-启动后,请执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
+**第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
+
+**第三步**:在所有节点安装TDengine,且版本必须是一致的,**但不要启动taosd**;
+
+**第四步**:检查、配置所有节点的FQDN:
+
+1. 每个节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的;
+2. 每个节点上执行`ping host`, 其中host是其他节点的hostname, 看能否ping通其它节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件,或DNS的配置。如果无法ping通,是无法组成集群的。
+3. 每个节点的FQDN就是输出的hostname外加端口号,比如h1.taosdata.com:6030
+
+**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
+
+```
+// firstEp 是每个节点启动后连接的第一个节点
+firstEp h1.taosdata.com:6030
+
+// 配置本节点的FQDN,如果本机只有一个hostname, 无需配置
+fqdn h1.taosdata.com
+
+// 配置本节点的端口号,缺省是6030
+serverPort 6030
+
+// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
+arbitrator ha.taosdata.com:6042
+```
+
+一定要修改的参数是firstEp, 其他参数可不做任何修改,除非你很清楚为什么要修改。
+
+## 启动第一个节点
+
+按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示,启动第一个节点h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
@@ -25,71 +55,64 @@ taos>
```
上述命令里,可以看到这个刚启动的这个节点的End Point是:h1.taos.com:6030
-## 安装、创建后续节点
+## 启动后续节点
-将新的节点添加到现有集群,具体有以下几步:
+将后续的节点添加到现有集群,具体有以下几步:
-1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装,**但不要启动taosd**
+1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个节点启动taosd。
-2. 如果是使用涛思数据的官方安装包进行安装,在安装结束时,会询问集群的End Port, 输入第一个节点的End Point即可。如果是源码安装,请编辑配置文件taos.cfg(缺省是在/etc/taos/目录),增加一行:
-
- ```
- firstEp h1.taos.com:6030
- ```
-
- 请注意将示例的“h1.taos.com:6030" 替换为你自己第一个节点的End Point
-
-3. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法启动taosd
-
-4. 在Linux shell里执行命令"hostname"找出本机的FQDN, 假设为h2.taos.com。如果无法找到,可以查看taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),fqdn以及port都会打印出来。
-
-5. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 使用命令:
+2. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
```
CREATE DNODE "h2.taos.com:6030";
```
- 将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
+ 将新节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新节点的End Point。
-6. 使用命令
+3. 然后执行命令
```
SHOW DNODES;
```
- 查看新节点是否被成功加入。
+ 查看新节点是否被成功加入。如果该被加入的节点处于离线状态,请做两个检查
+
+ - 查看该节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么
+ - 查看该节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。
按照上述步骤可以源源不断的将新的节点加入到集群。
**提示:**
-- firstEp, secondEp这两个参数仅仅在该节点第一次加入集群时有作用,加入集群后,该节点会保存最新的mnode的End Point列表,不再依赖这两个参数。
-- 两个没有配置firstEp, secondEp参数的dnode启动后,会独立运行起来。这个时候,无法将其中一个节点加入到另外一个节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
+- firstEp这个参数仅仅在该节点第一次加入集群时有作用,加入集群后,该节点会保存最新的mnode的End Point列表,不再依赖这两个参数。
+- 两个没有配置firstEp参数的dnode启动后,会独立运行起来。这个时候,无法将其中一个节点加入到另外一个节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
-##节点管理
+## 节点管理
-###添加节点
+### 添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置,缺省是自动获取。
-###删除节点
+### 删除节点
执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
+
```
DROP DNODE "fqdn:port";
```
其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
-###查看节点
+### 查看节点
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+
```
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。
-###查看虚拟节点组
+### 查看虚拟节点组
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
@@ -97,7 +120,7 @@ SHOW DNODES;
```
SHOW VGROUPS;
```
-##vnode的高可用性
+## vnode的高可用性
TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。
vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
@@ -111,7 +134,7 @@ CREATE DATABASE demo replica 3;
因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。
-##Mnode的高可用性
+## Mnode的高可用性
TDengine集群是由mnode (taosd的一个模块,逻辑节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
@@ -125,7 +148,7 @@ SHOW MNODES;
**注意:**一个TDengine高可用系统,无论是vnode还是mnode, 都必须配置多个副本。
-##负载均衡
+## 负载均衡
有三种情况,将触发负载均衡,而且都无需人工干预。
@@ -142,8 +165,9 @@ SHOW MNODES;
**注意:**如果一个虚拟节点组(包括mnode组)里每个节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个节点,副本数为3,如果3个节点都宕机,然后2个节点重启,是无法工作的,只有等3个节点都重启成功,才能对外服务。
-##Arbitrator的使用
+## Arbitrator的使用
如果副本数为偶数,当一个vnode group里一半或超过一半的vnode不工作时,是无法从中选出master的。同理,一半或超过一半的mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
-TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
+TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
+
diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md
index a4111e78fc..b760fe161a 100644
--- a/documentation20/webdocs/markdowndocs/faq-ch.md
+++ b/documentation20/webdocs/markdowndocs/faq-ch.md
@@ -1,6 +1,6 @@
-#常见问题
+# 常见问题
-#### 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
+## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
@@ -10,23 +10,23 @@
4. 安装最新稳定版本的TDengine
5. 如果数据需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决
-#### 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
+## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
请看为此问题撰写的技术博客
-#### 3. 创建数据表时提示more dnodes are needed
+## 3. 创建数据表时提示more dnodes are needed
请看为此问题撰写的技术博客
-#### 4. 如何让TDengine crash时生成core文件?
+## 4. 如何让TDengine crash时生成core文件?
请看为此问题撰写的技术博客
-#### 5. 遇到错误"Unable to establish connection", 我怎么办?
+## 5. 遇到错误"Unable to establish connection", 我怎么办?
客户端遇到链接故障,请按照下面的步骤进行检查:
1. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
2. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
-3. 确认客户端连接时指定了正确的服务器IP地址
-4. ping服务器IP,如果没有反应,请检查你的网络
+3. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
+4. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
5. 检查防火墙设置,确认TCP/UDP 端口6030-6039 是打开的
6. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
7. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
@@ -36,40 +36,61 @@
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
-#### 6. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
+## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
+产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
+
+1. 请检查连接的服务器的FQDN是否正确
+2. 如果网络配置有DNS server, 请检查是否正常工作
+3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。
+4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法链接服务器的
+
+
+## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
如果你确认语法正确,2.0之前版本,请检查SQL语句长度是否超过64K。如果超过,也会返回这个错误。
-#### 7. 是否支持validation queries?
+## 8. 是否支持validation queries?
TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。
-#### 8. 我可以删除或更新一条记录吗?
+## 9. 我可以删除或更新一条记录吗?
不能。因为TDengine是为联网设备采集的数据设计的,不容许修改。但TDengine提供数据保留策略,只要数据记录超过保留时长,就会被自动删除。
-#### 10. 我怎么创建超过250列的表?
+## 10. 我怎么创建超过1024列的表?
使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。
-#### 10. 最有效的写入数据的方法是什么?
+## 10. 最有效的写入数据的方法是什么?
-批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的记录。
+批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
-#### 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
+## 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
-
- Class.forName("com.taosdata.jdbc.TSDBDriver");
-
- Properties properties = new Properties();
-
- properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
-
- Connection = DriverManager.getConnection(url, properties);
-
-#### 12.TDengine GO windows驱动的如何编译?
+```JAVA
+Class.forName("com.taosdata.jdbc.TSDBDriver");
+Properties properties = new Properties();
+properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
+Connection = DriverManager.getConnection(url, properties);
+```
+## 12.TDengine GO windows驱动的如何编译?
请看为此问题撰写的技术博客
+## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
+请更新至最新的JDBC驱动
+```JAVA
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.4
+
+```
+## 14. 怎么报告问题?
+如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
+1. /var/log/taos
+2. /etc/taos
+附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在 GitHub提交Issue。
+为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/webdocs/markdowndocs/insert-ch.md
index 5eef99e891..5e4532dfd0 100644
--- a/documentation20/webdocs/markdowndocs/insert-ch.md
+++ b/documentation20/webdocs/markdowndocs/insert-ch.md
@@ -24,6 +24,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为8M)。
- TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程切频繁切换,带来额外开销。
+- 对同一张表,如果新插入记录的时间戳已经存在,新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。
## Prometheus直接写入
[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index 93a6936da3..a91dde8796 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -24,7 +24,7 @@
# dataDir /var/lib/taos
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
-# arbitrator arbitrator_hostname:6030
+# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
diff --git a/packaging/deb/tarbitratord b/packaging/deb/tarbitratord
index bf4dba7258..3f97c3c0c2 100644
--- a/packaging/deb/tarbitratord
+++ b/packaging/deb/tarbitratord
@@ -7,19 +7,19 @@
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
-# Provides: TDEngine
+# Provides: taoscluster
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
-# Short-Description: Starts TDEngine tarbitrator
-# Description: Starts TDEngine tarbitrator, a arbitrator
+# Short-Description: Starts taoscluster tarbitrator
+# Description: Starts taoscluster tarbitrator, a arbitrator
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
-NAME="tarbitrator"
+NAME="taoscluster"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/tarbitrator"
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
index 3fb34e8286..668d5a49eb 100644
--- a/packaging/docker/Dockerfile
+++ b/packaging/docker/Dockerfile
@@ -12,6 +12,6 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
-EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041
+EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ]
diff --git a/packaging/release.sh b/packaging/release.sh
index 2302b45875..3c0378042d 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -7,20 +7,24 @@ set -e
# releash.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
-# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
+# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
# -l [full | lite]
+# -s [static | dynamic]
# -n [2.0.0.3]
+# -m [2.0.0.0]
# set parameters by default value
verMode=edge # [cluster, edge]
verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
-osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
+osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
+soMode=dynamic # [static | dynamic]
verNumber=""
+verNumberComp="2.0.0.0"
-while getopts "hv:V:c:o:l:n:" arg
+while getopts "hv:V:c:o:l:s:n:m:" arg
do
case $arg in
v)
@@ -39,10 +43,18 @@ do
#echo "pagMode=$OPTARG"
pagMode=$(echo $OPTARG)
;;
+ s)
+ #echo "soMode=$OPTARG"
+ soMode=$(echo $OPTARG)
+ ;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
+ m)
+ #echo "verNumberComp=$OPTARG"
+ verNumberComp=$(echo $OPTARG)
+ ;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
@@ -50,10 +62,12 @@ do
h)
echo "Usage: `basename $0` -v [cluster | edge] "
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
- echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] "
+ echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
echo " -V [stable | beta] "
echo " -l [full | lite] "
+ echo " -s [static | dynamic] "
echo " -n [version number] "
+ echo " -m [compatible version number] "
exit 0
;;
?) #unknow option
@@ -63,215 +77,142 @@ do
esac
done
-echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} verNumber=${verNumber}"
+echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} verNumber=${verNumber} verNumberComp=${verNumberComp}"
curr_dir=$(pwd)
if [ "$osType" != "Darwin" ]; then
- script_dir="$(dirname $(readlink -f $0))"
- top_dir="$(readlink -f ${script_dir}/..)"
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/..)"
else
- script_dir=`dirname $0`
- cd ${script_dir}
- script_dir="$(pwd)"
- top_dir=${script_dir}/..
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/..
fi
-versioninfo="${top_dir}/src/util/src/version.c"
-
csudo=""
-if command -v sudo > /dev/null; then
- csudo="sudo"
-fi
+#if command -v sudo > /dev/null; then
+# csudo="sudo"
+#fi
function is_valid_version() {
- [ -z $1 ] && return 1 || :
+ [ -z $1 ] && return 1 || :
- rx='^([0-9]+\.){3}(\*|[0-9]+)$'
- if [[ $1 =~ $rx ]]; then
- return 0
- fi
- return 1
+ rx='^([0-9]+\.){3}(\*|[0-9]+)$'
+ if [[ $1 =~ $rx ]]; then
+ return 0
+ fi
+ return 1
}
function vercomp () {
- if [[ $1 == $2 ]]; then
- echo 0
- exit 0
- fi
-
- local IFS=.
- local i ver1=($1) ver2=($2)
-
- # fill empty fields in ver1 with zeros
- for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
- ver1[i]=0
- done
-
- for ((i=0; i<${#ver1[@]}; i++)); do
- if [[ -z ${ver2[i]} ]]; then
- # fill empty fields in ver2 with zeros
- ver2[i]=0
- fi
- if ((10#${ver1[i]} > 10#${ver2[i]})); then
- echo 1
- exit 0
- fi
- if ((10#${ver1[i]} < 10#${ver2[i]})); then
- echo 2
- exit 0
- fi
- done
+ if [[ $1 == $2 ]]; then
echo 0
-}
+ exit 0
+ fi
+
+ local IFS=.
+ local i ver1=($1) ver2=($2)
-# 1. Read version information
-version=$(cat ${versioninfo} | grep " version" | cut -d '"' -f2)
-compatible_version=$(cat ${versioninfo} | grep " compatible_version" | cut -d '"' -f2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
-if [ -z ${verNumber} ]; then
- while true; do
- read -p "Do you want to release a new version? [y/N]: " is_version_change
-
- if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then
- read -p "Please enter the new version: " tversion
- while true; do
- if (! is_valid_version $tversion) || [ "$(vercomp $tversion $version)" = '2' ]; then
- read -p "Please enter a correct version: " tversion
- continue
- fi
- version=${tversion}
- break
- done
-
- echo
-
- read -p "Enter the oldest compatible version: " tversion
- while true; do
-
- if [ -z $tversion ]; then
- break
- fi
-
- if (! is_valid_version $tversion) || [ "$(vercomp $version $tversion)" = '2' ]; then
- read -p "enter correct compatible version: " tversion
- else
- compatible_version=$tversion
- break
- fi
- done
-
- break
- elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then
- echo "Use old version: ${version} compatible version: ${compatible_version}."
- break
- else
- continue
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]; then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]})); then
+ echo 1
+ exit 0
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]})); then
+ echo 2
+ exit 0
fi
done
-else
- echo "old version: $version, new version: $verNumber"
- #if ( ! is_valid_version $verNumber ) || [[ "$(vercomp $version $verNumber)" == '2' ]]; then
- # echo "please enter correct version"
- # exit 0
- #else
- version=${verNumber}
- #fi
-fi
+ echo 0
+}
-echo "=======================new version number: ${version}======================================"
+# 1. check version information
+if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
+ echo "please enter correct version"
+ exit 0
+fi
+
+echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
-# output the version info to the buildinfo file.
build_time=$(date +"%F %R")
-echo "char version[12] = \"${version}\";" > ${versioninfo}
-echo "char compatible_version[12] = \"${compatible_version}\";" >> ${versioninfo}
-echo "char gitinfo[48] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
-if [ "$verMode" != "cluster" ]; then
- echo "char gitinfoOfInternal[48] = \"\";" >> ${versioninfo}
-else
- enterprise_dir="${top_dir}/../enterprise"
- cd ${enterprise_dir}
- echo "char gitinfoOfInternal[48] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
- cd ${curr_dir}
-fi
-echo "char buildinfo[64] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
-echo "" >> ${versioninfo}
-tmp_version=$(echo $version | tr -s "." "_")
-if [ "$verMode" == "cluster" ]; then
- libtaos_info=${tmp_version}_${osType}_${cpuType}
-else
- libtaos_info=edge_${tmp_version}_${osType}_${cpuType}
-fi
-if [ "$verType" == "beta" ]; then
- libtaos_info=${libtaos_info}_${verType}
-fi
-echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo}
+
+# get commint id from git
+gitinfo=$(git rev-parse --verify HEAD)
+enterprise_dir="${top_dir}/../enterprise"
+cd ${enterprise_dir}
+gitinfoOfInternal=$(git rev-parse --verify HEAD)
+cd ${curr_dir}
# 2. cmake executable file
compile_dir="${top_dir}/debug"
if [ -d ${compile_dir} ]; then
- ${csudo} rm -rf ${compile_dir}
+ ${csudo} rm -rf ${compile_dir}
fi
if [ "$osType" != "Darwin" ]; then
- ${csudo} mkdir -p ${compile_dir}
+ ${csudo} mkdir -p ${compile_dir}
else
- mkdir -p ${compile_dir}
+ mkdir -p ${compile_dir}
fi
cd ${compile_dir}
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
- if [ "$verMode" != "cluster" ]; then
- cmake ../ -DCPUTYPE=${cpuType} -DPAGMODE=${pagMode}
- else
- cmake ../../ -DCPUTYPE=${cpuType}
- fi
+ if [ "$verMode" != "cluster" ]; then
+ cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
+ else
+ cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
+ fi
else
- echo "input cpuType=${cpuType} error!!!"
- exit 1
+ echo "input cpuType=${cpuType} error!!!"
+ exit 1
fi
make
cd ${curr_dir}
-# 3. judge the operating system type, then Call the corresponding script for packaging
-#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
-#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
-#echo "osinfo: ${osinfo}"
-
+# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
- if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
- echo "====do deb package for the ubuntu system===="
- output_dir="${top_dir}/debs"
- if [ -d ${output_dir} ]; then
- ${csudo} rm -rf ${output_dir}
- fi
- ${csudo} mkdir -p ${output_dir}
- cd ${script_dir}/deb
- ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
-
- echo "====do rpm package for the centos system===="
- output_dir="${top_dir}/rpms"
- if [ -d ${output_dir} ]; then
- ${csudo} rm -rf ${output_dir}
- fi
- ${csudo} mkdir -p ${output_dir}
- cd ${script_dir}/rpm
- ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
+ if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
+ echo "====do deb package for the ubuntu system===="
+ output_dir="${top_dir}/debs"
+ if [ -d ${output_dir} ]; then
+ ${csudo} rm -rf ${output_dir}
fi
+ ${csudo} mkdir -p ${output_dir}
+ cd ${script_dir}/deb
+ ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
+
+ echo "====do rpm package for the centos system===="
+ output_dir="${top_dir}/rpms"
+ if [ -d ${output_dir} ]; then
+ ${csudo} rm -rf ${output_dir}
+ fi
+ ${csudo} mkdir -p ${output_dir}
+ cd ${script_dir}/rpm
+ ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
+ fi
- echo "====do tar.gz package for all systems===="
- cd ${script_dir}/tools
-
- ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
- ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ echo "====do tar.gz package for all systems===="
+ cd ${script_dir}/tools
+
+ ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ ${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ ${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
- cd ${script_dir}/tools
- ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
+ cd ${script_dir}/tools
+ ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
fi
-# 4. Clean up temporary compile directories
-#${csudo} rm -rf ${compile_dir}
-
diff --git a/packaging/rpm/tarbitratord b/packaging/rpm/tarbitratord
index 5d3f2b9c79..68138f5c1d 100644
--- a/packaging/rpm/tarbitratord
+++ b/packaging/rpm/tarbitratord
@@ -7,10 +7,10 @@
#
#
### BEGIN INIT INFO
-# Provides: TDEngine
+# Provides: taoscluster
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
-# Short-Description: start and stop taosd
+# Short-Description: start and stop tarbitrator
# Description: tarbitrator is a arbitrator used in TDengine cluster.
### END INIT INFO
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
old mode 100644
new mode 100755
index 6f54993f91..1cbb3ead9c
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -76,7 +76,11 @@ fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
-osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu" ; then
@@ -95,8 +99,10 @@ elif echo $osinfo | grep -qwi "fedora" ; then
# echo "This is fedora system"
os_type=2
else
- echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, "
- echo "please feel free to contact taosdata.com for support."
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
os_type=1
fi
@@ -192,13 +198,12 @@ function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || :
-
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
- if [ -d ${lib64_link_dir} ]; then
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
@@ -306,14 +311,27 @@ function clean_service_on_sysvinit() {
fi
if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/taosd ]; then
${csudo} chkconfig --del taosd || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
+ fi
elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/taosd ]; then
${csudo} insserv -r taosd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
+ fi
elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/taosd ]; then
${csudo} update-rc.d -f taosd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
+ fi
fi
${csudo} rm -f ${service_config_dir}/taosd || :
@@ -326,7 +344,6 @@ function clean_service_on_sysvinit() {
function install_service_on_sysvinit() {
clean_service_on_sysvinit
-
sleep 1
# Install taosd service
@@ -364,34 +381,29 @@ function install_service_on_sysvinit() {
function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/taosd.service"
-
if systemctl is-active --quiet taosd; then
echo "TDengine is running, stopping it..."
${csudo} systemctl stop taosd &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
-
${csudo} rm -f ${taosd_service_config}
-
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
-
if systemctl is-active --quiet nginxd; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
-
${csudo} rm -f ${nginx_service_config}
-
- tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
- if systemctl is-active --quiet tarbitratord; then
- echo "tarbitrator is running, stopping it..."
- ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
- fi
- ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
-
- ${csudo} rm -f ${tarbitratord_service_config}
fi
}
@@ -401,7 +413,6 @@ function install_service_on_systemd() {
clean_service_on_systemd
taosd_service_config="${service_config_dir}/taosd.service"
-
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}"
@@ -422,32 +433,30 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
${csudo} systemctl enable taosd
-
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
if [ "$verMode" == "cluster" ]; then
-
- tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
-
- ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
- # ${csudo} systemctl enable tarbitratord
-
nginx_service_config="${service_config_dir}/nginxd.service"
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
@@ -696,6 +705,7 @@ function install_TDengine() {
echo
echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}"
fi
+ touch ~/.taos_history
rm -rf $(tar -tf taos.tar.gz)
}
diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh
new file mode 100755
index 0000000000..a89d2257dc
--- /dev/null
+++ b/packaging/tools/install_arbi.sh
@@ -0,0 +1,297 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install taosd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+# taos:2345:respawn:/etc/init.d/tarbitratord start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tarbitrator
+ fi
+}
+
+function update_TDengine() {
+ # Start to update
+ echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ #echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
+}
+
+function install_TDengine() {
+ # Start to install
+ echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ #echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update_TDengine
+else
+ install_TDengine
+fi
+
diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh
new file mode 100755
index 0000000000..bc6179eff2
--- /dev/null
+++ b/packaging/tools/makearbi.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TDengine-enterprise-arbitrator"
+else
+ install_dir="${release_dir}/TDengine-arbitrator"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
+install_files="${script_dir}/install_arbi.sh"
+
+#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
+#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 5fe4cf50dd..8545a3e5e4 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
- bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
+ bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index 63e09dc568..dd6fe65eb7 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -69,11 +69,12 @@ function kill_tarbitrator() {
}
function clean_bin() {
# Remove link
- ${csudo} rm -f ${bin_link_dir}/taos || :
- ${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
- ${csudo} rm -f ${bin_link_dir}/rmtaos || :
+ ${csudo} rm -f ${bin_link_dir}/taos || :
+ ${csudo} rm -f ${bin_link_dir}/taosd || :
+ ${csudo} rm -f ${bin_link_dir}/taosdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmtaos || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
@@ -86,7 +87,7 @@ function clean_lib() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
- ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
@@ -148,15 +149,27 @@ function clean_service_on_sysvinit() {
${csudo} service tarbitratord stop || :
fi
- if ((${initd_mod}==1)); then
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/taosd ]; then
${csudo} chkconfig --del taosd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
- elif ((${initd_mod}==2)); then
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/taosd ]; then
${csudo} insserv -r taosd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
- elif ((${initd_mod}==3)); then
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/taosd ]; then
${csudo} update-rc.d -f taosd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
+ fi
fi
${csudo} rm -f ${service_config_dir}/taosd || :
@@ -196,13 +209,20 @@ ${csudo} rm -rf ${data_link_dir} || :
${csudo} rm -rf ${install_main_dir}
${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
-osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
if echo $osinfo | grep -qwi "ubuntu" ; then
# echo "this is ubuntu system"
${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "this is debian system"
+ ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
elif echo $osinfo | grep -qwi "centos" ; then
- echo "this is centos system"
+# echo "this is centos system"
${csudo} rpm -e --noscripts tdengine || :
fi
diff --git a/packaging/tools/remove_arbi.sh b/packaging/tools/remove_arbi.sh
new file mode 100755
index 0000000000..68fd9275fb
--- /dev/null
+++ b/packaging/tools/remove_arbi.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall TDengine's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TDengine tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "TDengine's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh
index 4bc278fcf0..7cbf524d04 100755
--- a/packaging/tools/remove_client.sh
+++ b/packaging/tools/remove_client.sh
@@ -37,7 +37,7 @@ function kill_client() {
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/taos || :
- ${csudo} rm -f ${bin_link_dir}/taosump || :
+ ${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
}
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index d5cbf3cab6..5b5fb3435d 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -35,12 +35,14 @@ IF (TD_LINUX)
ELSEIF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32)
-
+
+ CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/client/src/taos.rc.in" "${TD_COMMUNITY_DIR}/src/client/src/taos.rc")
+
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static trpc tutil query)
# generate dynamic library (*.dll)
- ADD_LIBRARY(taos SHARED ${SRC})
+ ADD_LIBRARY(taos SHARED ${SRC} ${TD_COMMUNITY_DIR}/src/client/src/taos.rc)
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h
index 397a60d140..4e579b0729 100644
--- a/src/client/inc/tscLocalMerge.h
+++ b/src/client/inc/tscLocalMerge.h
@@ -87,7 +87,6 @@ typedef struct SRetrieveSupport {
SSqlObj * pParentSql;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
uint32_t numOfRetry; // record the number of retry times
- pthread_mutex_t queryMutex;
} SRetrieveSupport;
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 2ca6ba6691..f77897a74b 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -108,7 +108,7 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
-SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
+SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
@@ -138,10 +138,10 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryTags(SQueryInfo* pQueryInfo);
-void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
- SSchema* pColSchema, int16_t colType);
+SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
+ SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
-int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql);
+int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql);
void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
@@ -194,11 +194,11 @@ SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);
-int32_t tscValidateName(SSQLToken* pToken);
+int32_t tscValidateName(SStrToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
-bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId);
+bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams);
// get starter position of metric query condition (query on tags) in SSqlCmd.payload
SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
@@ -217,7 +217,7 @@ STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex
STableMetaInfo* tscGetMetaInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
-int32_t tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo);
+SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 6d02bc7fbd..d2c52e972a 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -35,6 +35,40 @@ extern "C" {
#include "qTsbuf.h"
#include "tcmdtype.h"
+#if 0
+static UNUSED_FUNC void *u_malloc (size_t __size) {
+ uint32_t v = rand();
+
+ if (v % 5000 <= 0) {
+ return NULL;
+ } else {
+ return malloc(__size);
+ }
+}
+
+static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) {
+ uint32_t v = rand();
+ if (v % 5000 <= 0) {
+ return NULL;
+ } else {
+ return calloc(num, __size);
+ }
+}
+
+static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
+ uint32_t v = rand();
+ if (v % 5000 <= 0) {
+ return NULL;
+ } else {
+ return realloc(p, __size);
+ }
+}
+
+#define calloc u_calloc
+#define malloc u_malloc
+#define realloc u_realloc
+#endif
+
// forward declaration
struct SSqlInfo;
struct SLocalReducer;
@@ -195,9 +229,9 @@ typedef struct STableDataBlocks {
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
- uint32_t type; // query/insert/import type
+ uint32_t type; // query/insert type
char slidingTimeUnit;
- STimeWindow window;
+ STimeWindow window; // query time window
int64_t intervalTime; // aggregation time interval
int64_t slidingTime; // sliding window in mseconds
SSqlGroupbyExpr groupbyExpr; // group by tags info
@@ -216,6 +250,7 @@ typedef struct SQueryInfo {
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
+ int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
} SQueryInfo;
typedef struct {
@@ -234,7 +269,7 @@ typedef struct {
char * curSql; // current sql, resume position of sql after parsing paused
int8_t parseFinished;
- short numOfCols;
+ int16_t numOfCols;
uint32_t allocSize;
char * payload;
int32_t payloadLen;
@@ -431,31 +466,36 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
- if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
- int32_t realLen = varDataLen(pData);
- assert(realLen <= bytes - VARSTR_HEADER_SIZE);
- if (isNull(pData, type)) {
- pRes->tsrow[columnIndex] = NULL;
+ // user defined constant value output columns
+ if (pInfo->pSqlExpr->colInfo.flag == TSDB_COL_UDC) {
+ if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
+ pData = pInfo->pSqlExpr->param[1].pz;
+ pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
+ pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : pData;
} else {
- pRes->tsrow[columnIndex] = ((tstr*)pData)->data;
- }
+ assert(bytes == tDataTypeDesc[type].nSize);
- if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
- *(pData + realLen + VARSTR_HEADER_SIZE) = 0;
+ pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : &pInfo->pSqlExpr->param[1].i64Key;
+ pRes->length[columnIndex] = bytes;
}
-
- pRes->length[columnIndex] = realLen;
} else {
- assert(bytes == tDataTypeDesc[type].nSize);
+ if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
+ int32_t realLen = varDataLen(pData);
+ assert(realLen <= bytes - VARSTR_HEADER_SIZE);
- if (isNull(pData, type)) {
- pRes->tsrow[columnIndex] = NULL;
+ pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : ((tstr *)pData)->data;
+ if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
+ *(pData + realLen + VARSTR_HEADER_SIZE) = 0;
+ }
+
+ pRes->length[columnIndex] = realLen;
} else {
- pRes->tsrow[columnIndex] = pData;
- }
+ assert(bytes == tDataTypeDesc[type].nSize);
- pRes->length[columnIndex] = bytes;
+ pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : pData;
+ pRes->length[columnIndex] = bytes;
+ }
}
}
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index d81bbd9b6d..eaea91d1bf 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -89,6 +89,14 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: isUpdateQueryImp
+ * Signature: (J)J
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
+ (JNIEnv *env, jobject jobj, jlong con, jlong tres);
+
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: freeResultSetImp
diff --git a/src/client/src/taos.rc.in b/src/client/src/taos.rc.in
new file mode 100644
index 0000000000..751be85fd0
--- /dev/null
+++ b/src/client/src/taos.rc.in
@@ -0,0 +1,31 @@
+1 VERSIONINFO
+ FILEVERSION ${TD_VER_NUMBER}
+ PRODUCTVERSION ${TD_VER_NUMBER}
+ FILEFLAGSMASK 0x17L
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x0L
+ FILESUBTYPE 0x0L
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "FileDescription", "Native C Driver for TDengine"
+ VALUE "FileVersion", "${TD_VER_NUMBER}"
+ VALUE "InternalName", "taos.dll(${TD_VER_CPUTYPE})"
+ VALUE "LegalCopyright", "Copyright (C) 2020 TAOS Data"
+ VALUE "OriginalFilename", ""
+ VALUE "ProductName", "taos.dll(${TD_VER_CPUTYPE})"
+ VALUE "ProductVersion", "${TD_VER_NUMBER}"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
\ No newline at end of file
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index b05aef76eb..4643d255dc 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -433,7 +433,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscError("%p get tableMeta failed, code:%s", pSql, tstrerror(code));
goto _error;
} else {
- tscDebug("%p get tableMeta successfully", pSql);
+ const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
+ tscDebug("%p get %s successfully", pSql, msg);
}
if (pSql->pStream == NULL) {
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index 5c708dffee..e74fcba246 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -56,7 +56,8 @@
for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \
SQLFunctionCtx *__ctx = (ctx)->tagInfo.pTagCtxList[i]; \
if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { \
- __ctx->tag = (tVariant){.i64Key = (ts), .nType = TSDB_DATA_TYPE_BIGINT}; \
+ __ctx->tag.i64Key = (ts); \
+ __ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \
} \
aAggs[TSDB_FUNC_TAG].xFunction(__ctx); \
} \
@@ -963,7 +964,8 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) {
SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[i];
if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) {
- __ctx->tag = (tVariant){.i64Key = key, .nType = TSDB_DATA_TYPE_BIGINT};
+ __ctx->tag.i64Key = key;
+ __ctx->tag.nType = TSDB_DATA_TYPE_BIGINT;
}
aAggs[TSDB_FUNC_TAG].xFunction(__ctx);
@@ -1811,23 +1813,19 @@ static void last_dist_func_second_merge(SQLFunctionCtx *pCtx) {
* NOTE: last_row does not use the interResultBuf to keep the result
*/
static void last_row_function(SQLFunctionCtx *pCtx) {
- assert(pCtx->size == 1);
-
+ assert(pCtx->size >= 1);
char *pData = GET_INPUT_CHAR(pCtx);
- assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType);
+
+ // assign the last element in current data block
+ assignVal(pCtx->aOutputBuf, pData + (pCtx->size - 1) * pCtx->inputBytes, pCtx->inputBytes, pCtx->inputType);
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
pResInfo->hasResult = DATA_SET_FLAG;
- SLastrowInfo *pInfo = (SLastrowInfo *)pResInfo->interResultBuf;
- pInfo->ts = pCtx->ptsList[0];
-
- pInfo->hasResult = DATA_SET_FLAG;
-
- // set the result to final result buffer
+ // set the result to final result buffer in case of super table query
if (pResInfo->superTableQ) {
SLastrowInfo *pInfo1 = (SLastrowInfo *)(pCtx->aOutputBuf + pCtx->inputBytes);
- pInfo1->ts = pCtx->ptsList[0];
+ pInfo1->ts = pCtx->ptsList[pCtx->size - 1];
pInfo1->hasResult = DATA_SET_FLAG;
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo1->ts);
@@ -1867,7 +1865,8 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6
for (int32_t i = 0; i < pTagInfo->numOfTagCols; ++i) {
SQLFunctionCtx* ctx = pTagInfo->pTagCtxList[i];
if (ctx->functionId == TSDB_FUNC_TS_DUMMY) {
- ctx->tag = (tVariant) {.nType = TSDB_DATA_TYPE_BIGINT, .i64Key = tsKey};
+ ctx->tag.nType = TSDB_DATA_TYPE_BIGINT;
+ ctx->tag.i64Key = tsKey;
}
tVariantDump(&ctx->tag, dst->pTags + size, ctx->tag.nType, true);
@@ -2035,7 +2034,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
tValuePair **tvp = pRes->res;
int32_t step = QUERY_ASC_FORWARD_STEP;
- int32_t len = GET_RES_INFO(pCtx)->numOfRes;
+ int32_t len = (int32_t)(GET_RES_INFO(pCtx)->numOfRes);
switch (type) {
case TSDB_DATA_TYPE_INT: {
@@ -2409,10 +2408,10 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
// user specify the order of output by sort the result according to timestamp
if (pCtx->param[1].i64Key == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
__compar_fn_t comparator = (pCtx->param[2].i64Key == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
- qsort(tvp, pResInfo->numOfRes, POINTER_BYTES, comparator);
+ qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
} else if (pCtx->param[1].i64Key > PRIMARYKEY_TIMESTAMP_COL_INDEX) {
__compar_fn_t comparator = (pCtx->param[2].i64Key == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
- qsort(tvp, pResInfo->numOfRes, POINTER_BYTES, comparator);
+ qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
}
GET_TRUE_DATA_TYPE();
@@ -2906,33 +2905,41 @@ static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_
}
static void col_project_function(SQLFunctionCtx *pCtx) {
+ // the number of output rows should not affect the final number of rows, so set it to be 0
+ if (pCtx->numOfParams == 2) {
+ return;
+ }
+
INC_INIT_VAL(pCtx, pCtx->size);
-
+
char *pData = GET_INPUT_CHAR(pCtx);
if (pCtx->order == TSDB_ORDER_ASC) {
- memcpy(pCtx->aOutputBuf, pData, (size_t)pCtx->size * pCtx->inputBytes);
+ memcpy(pCtx->aOutputBuf, pData, (size_t) pCtx->size * pCtx->inputBytes);
} else {
for(int32_t i = 0; i < pCtx->size; ++i) {
memcpy(pCtx->aOutputBuf + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes,
pCtx->inputBytes);
}
}
-
+
pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes;
}
static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
-
+ if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0
+ return;
+ }
+
// only one output
if (pCtx->param[0].i64Key == 1 && pResInfo->numOfRes >= 1) {
return;
}
-
+
INC_INIT_VAL(pCtx, 1);
char *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes);
-
+
pCtx->aOutputBuf += pCtx->inputBytes;
}
@@ -3900,11 +3907,11 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) {
// primary ts must be existed, so no need to check its existance
if (pCtx->order == TSDB_ORDER_ASC) {
- tsBufAppend(pTSbuf, 0, pCtx->tag.i64Key, input, pCtx->size * TSDB_KEYSIZE);
+ tsBufAppend(pTSbuf, 0, &pCtx->tag, input, pCtx->size * TSDB_KEYSIZE);
} else {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *d = GET_INPUT_CHAR_INDEX(pCtx, i);
- tsBufAppend(pTSbuf, 0, pCtx->tag.i64Key, d, TSDB_KEYSIZE);
+ tsBufAppend(pTSbuf, 0, &pCtx->tag, d, TSDB_KEYSIZE);
}
}
@@ -3923,7 +3930,7 @@ static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) {
STSBuf *pTSbuf = pInfo->pTSBuf;
- tsBufAppend(pTSbuf, 0, pCtx->tag.i64Key, pData, TSDB_KEYSIZE);
+ tsBufAppend(pTSbuf, 0, &pCtx->tag, pData, TSDB_KEYSIZE);
SET_VAL(pCtx, pCtx->size, 1);
pResInfo->hasResult = DATA_SET_FLAG;
diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c
index 99c3bc4fb3..563c9fa84e 100644
--- a/src/client/src/tscLocalMerge.c
+++ b/src/client/src/tscLocalMerge.c
@@ -370,7 +370,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
- TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
+ TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
@@ -843,28 +843,6 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1);
}
-static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRes *pRes, tFilePage **pResPages,
- SLocalReducer *pLocalReducer) {
- assert(0);
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- for (int32_t i = 0; i < size; ++i) {
- TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
-
- int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
- assert(offset == getColumnModelOffset(pLocalReducer->resColModel, i));
-
- char *src = pResPages[i]->data + (pRes->numOfRows - 1) * pField->bytes;
- char *dst = pRes->data + pRes->numOfRows * offset;
-
- for (int32_t j = 0; j < pRes->numOfRows; ++j) {
- memcpy(dst, src, (size_t)pField->bytes);
- dst += pField->bytes;
- src -= pField->bytes;
- }
- }
-}
-
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
@@ -907,7 +885,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
}
- memcpy(pRes->data, pBeforeFillData->data, pRes->numOfRows * pLocalReducer->finalRowSize);
+ memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalRowSize));
pRes->numOfClauseTotal += pRes->numOfRows;
pBeforeFillData->num = 0;
@@ -925,7 +903,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
- int64_t actualETime = MAX(pQueryInfo->window.skey, pQueryInfo->window.ekey);
+ // todo extract function
+ int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
@@ -943,7 +922,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset,
- newRows * pField->bytes);
+ (size_t)(newRows * pField->bytes));
}
}
@@ -984,14 +963,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
}
- if (pQueryInfo->order.order == TSDB_ORDER_ASC) {
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
- int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
- memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, pField->bytes * pRes->numOfRows);
- }
- } else { // todo bug??
- reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer);
+ for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
+ TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
+ int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
+ memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
}
pRes->numOfRowsGroup += pRes->numOfRows;
@@ -1248,8 +1223,6 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
#endif
-
-
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
@@ -1257,7 +1230,9 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
if (pFillInfo != NULL) {
- taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, pQueryInfo->window.ekey);
+ TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
+
+ taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, ekey);
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
@@ -1292,7 +1267,7 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
// for group result interpolation, do not return if not data is generated
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
- TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
+ TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
@@ -1345,7 +1320,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
// if fillType == TSDB_FILL_NONE, return directly
if (pQueryInfo->fillType != TSDB_FILL_NONE &&
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
- int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey;
+ int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) {
@@ -1402,13 +1377,12 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tscResetForNextRetrieve(pRes);
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
- tscDebug("%p %s call the drop local reducer", pSql, __FUNCTION__);
- tscDestroyLocalReducer(pSql);
- return 0;
+ tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
+ return pRes->code;
}
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// set the data merge in progress
int32_t prevStatus =
@@ -1503,8 +1477,8 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* so the processing of previous group is completed.
*/
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
+ bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
- bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
tFilePage *pResBuf = pLocalReducer->pResultBuf;
/*
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index cb49bd80b7..47bfe0fcdc 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -40,7 +40,7 @@ enum {
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
-static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
+static int32_t tscToInteger(SStrToken *pToken, int64_t *value, char **endPtr) {
if (pToken->n == 0) {
return TK_ILLEGAL;
}
@@ -73,7 +73,7 @@ static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
return pToken->type;
}
-static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
+static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
if (pToken->n == 0) {
return TK_ILLEGAL;
}
@@ -89,9 +89,9 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
return pToken->type;
}
-int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
+int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
int32_t index = 0;
- SSQLToken sToken;
+ SStrToken sToken;
int64_t interval;
int64_t useconds = 0;
char * pTokenEnd = *next;
@@ -128,7 +128,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
* time expression:
* e.g., now+12a, now-5h
*/
- SSQLToken valueToken;
+ SStrToken valueToken;
index = 0;
sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
pTokenEnd += index;
@@ -163,7 +163,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
return TSDB_CODE_SUCCESS;
}
-int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
+int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
int16_t timePrec) {
int64_t iv;
int32_t numType;
@@ -409,7 +409,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
- SSQLToken sToken = {0};
+ SStrToken sToken = {0};
char * payload = pDataBlocks->pData + pDataBlocks->size;
// 1. set the parsed value from sql string
@@ -524,7 +524,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows,
SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
- SSQLToken sToken;
+ SStrToken sToken;
int16_t numOfRows = 0;
@@ -734,8 +734,8 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
int32_t index = 0;
- SSQLToken sToken = {0};
- SSQLToken tableToken = {0};
+ SStrToken sToken = {0};
+ SStrToken tableToken = {0};
int32_t code = TSDB_CODE_SUCCESS;
const int32_t TABLE_INDEX = 0;
@@ -993,7 +993,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return code;
}
-int validateTableName(char *tblName, int len, SSQLToken* psTblToken) {
+int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
tstrncpy(psTblToken->z, tblName, TSDB_TABLE_FNAME_LEN);
psTblToken->n = len;
@@ -1031,11 +1031,11 @@ int tsParseInsertSql(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
assert(pQueryInfo != NULL);
- STableMetaInfo *pTableMetaInfo = NULL;
- if (pQueryInfo->numOfTables == 0) {
- pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
- } else {
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableMetaInfo *pTableMetaInfo = (pQueryInfo->numOfTables == 0)? tscAddEmptyMetaInfo(pQueryInfo):tscGetMetaInfo(pQueryInfo, 0);
+ if (pTableMetaInfo == NULL) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ code = terrno;
+ return code;
}
if ((code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
@@ -1057,7 +1057,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
while (1) {
int32_t index = 0;
- SSQLToken sToken = tStrGetToken(str, &index, false, 0, NULL);
+ SStrToken sToken = tStrGetToken(str, &index, false, 0, NULL);
// no data in the sql string anymore.
if (sToken.n == 0) {
@@ -1083,7 +1083,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
pCmd->curSql = sToken.z;
char buf[TSDB_TABLE_FNAME_LEN];
- SSQLToken sTblToken;
+ SStrToken sTblToken;
sTblToken.z = buf;
// Check if the table name available or not
if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) {
@@ -1285,15 +1285,14 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
int32_t index = 0;
SSqlCmd *pCmd = &pSql->cmd;
- SSQLToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
+ SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT;
pSql->res.numOfRows = 0;
- SQueryInfo *pQueryInfo = NULL;
- tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index b996dd958a..620e8ea57a 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -155,7 +155,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
uint32_t i = 0, start = 0;
while (sql[i] != 0) {
- SSQLToken token = {0};
+ SStrToken token = {0};
token.n = tSQLGetToken(sql + i, &token.type);
if (token.type == TK_QUESTION) {
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index f48e7b7691..6ff97e9d00 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -285,9 +285,9 @@ void tscKillConnection(STscObj *pObj) {
SSqlObj *pSql = pObj->sqlList;
while (pSql) {
- //taosStopRpcConn(pSql->thandle);
pSql = pSql->next;
}
+
SSqlStream *pStream = pObj->streamList;
while (pStream) {
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index d108349085..5ce4c7125f 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -33,8 +33,8 @@
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
-// -1 is tbname column index, so here use the -2 as the initial value
-#define COLUMN_INDEX_INITIAL_VAL (-2)
+// -1 is tbname column index, so here use the -3 as the initial value
+#define COLUMN_INDEX_INITIAL_VAL (-3)
#define COLUMN_INDEX_INITIALIZER \
{ COLUMN_INDEX_INITIAL_VAL, COLUMN_INDEX_INITIAL_VAL }
#define COLUMN_INDEX_VALIDE(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_TBNAME_COLUMN_INDEX))
@@ -51,12 +51,12 @@ static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql);
static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name);
-static void getCurrentDBName(SSqlObj* pSql, SSQLToken* pDBToken);
-static bool hasSpecifyDB(SSQLToken* pTableName);
+static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken);
+static bool hasSpecifyDB(SStrToken* pTableName);
static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd);
-static int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len);
+static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len);
static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength);
static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName);
@@ -65,11 +65,11 @@ static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int3
static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
int8_t type, char* fieldName, SSqlExpr* pSqlExpr);
static int32_t changeFunctionID(int32_t optr, int16_t* functionId);
-static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable);
+static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery);
static bool validateIpAddress(const char* ip, size_t size);
static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
-static bool functionCompatibleCheck(SQueryInfo* pQueryInfo);
+static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery);
static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
@@ -100,11 +100,11 @@ static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
-static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
-static int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t optrToString(tSQLExpr* pExpr, char** exprString);
-static int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate);
@@ -151,7 +151,7 @@ static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
return TSDB_CODE_SUCCESS;
}
-static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) {
+static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
const char* msg1 = "password can not be empty";
const char* msg2 = "name or password too long";
const char* msg3 = "password needs single quote marks enclosed";
@@ -179,20 +179,24 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_APP_ERROR;
}
- SSqlCmd* pCmd = &(pSql->cmd);
- SQueryInfo* pQueryInfo = NULL;
+ SSqlCmd* pCmd = &pSql->cmd;
+ SSqlRes* pRes = &pSql->res;
+ int32_t code = TSDB_CODE_SUCCESS;
if (!pInfo->valid) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), pInfo->pzErrMsg);
}
- int32_t code = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
+ SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
+ if (pQueryInfo == NULL) {
+ pRes->code = terrno;
+ return pRes->code;
+ }
- STableMetaInfo* pTableMetaInfo = NULL;
- if (pQueryInfo->numOfTables == 0) {
- pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
- } else {
- pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
+ STableMetaInfo* pTableMetaInfo = (pQueryInfo->numOfTables == 0)? tscAddEmptyMetaInfo(pQueryInfo) : pQueryInfo->pTableMetaInfo[0];
+ if (pTableMetaInfo == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return pRes->code;
}
pCmd->command = pInfo->type;
@@ -206,7 +210,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg2 = "invalid name";
const char* msg3 = "param name too long";
- SSQLToken* pzName = &pInfo->pDCLInfo->a[0];
+ SStrToken* pzName = &pInfo->pDCLInfo->a[0];
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -241,7 +245,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_USE_DB: {
const char* msg = "invalid db name";
- SSQLToken* pToken = &pInfo->pDCLInfo->a[0];
+ SStrToken* pToken = &pInfo->pDCLInfo->a[0];
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
@@ -296,7 +300,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- SSQLToken* pIpAddr = &pInfo->pDCLInfo->a[0];
+ SStrToken* pIpAddr = &pInfo->pDCLInfo->a[0];
pIpAddr->n = strdequote(pIpAddr->z);
break;
}
@@ -307,8 +311,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg2 = "invalid user/account name";
const char* msg3 = "name too long";
- SSQLToken* pName = &pInfo->pDCLInfo->user.user;
- SSQLToken* pPwd = &pInfo->pDCLInfo->user.passwd;
+ SStrToken* pName = &pInfo->pDCLInfo->user.user;
+ SStrToken* pPwd = &pInfo->pDCLInfo->user.passwd;
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -337,7 +341,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_DESCRIBE_TABLE: {
- SSQLToken* pToken = &pInfo->pDCLInfo->a[0];
+ SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg2 = "table name is too long";
const char* msg1 = "invalid table name";
@@ -400,8 +404,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// tDCLSQL* pDCL = pInfo->pDCLInfo;
SUserInfo* pUser = &pInfo->pDCLInfo->user;
- SSQLToken* pName = &pUser->user;
- SSQLToken* pPwd = &pUser->passwd;
+ SStrToken* pName = &pUser->user;
+ SStrToken* pPwd = &pUser->passwd;
if (pName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -423,7 +427,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pUser->type == TSDB_ALTER_USER_PRIVILEGES) {
assert(pPwd->type == TSDB_DATA_TYPE_NULL);
- SSQLToken* pPrivilege = &pUser->privilege;
+ SStrToken* pPrivilege = &pUser->privilege;
if (strncasecmp(pPrivilege->z, "super", 5) == 0 && pPrivilege->n == 5) {
pCmd->count = 1;
@@ -487,9 +491,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg1 = "columns in select clause not identical";
for (int32_t i = pCmd->numOfClause; i < pInfo->subclauseInfo.numOfClause; ++i) {
- SQueryInfo* pqi = NULL;
- if ((code = tscGetQueryInfoDetailSafely(pCmd, i, &pqi)) != TSDB_CODE_SUCCESS) {
- return code;
+ SQueryInfo* pqi = tscGetQueryInfoDetailSafely(pCmd, i);
+ if (pqi == NULL) {
+ pRes->code = terrno;
+ return pRes->code;
}
}
@@ -581,7 +586,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
}
// interval is not null
- SSQLToken* t = &pQuerySql->interval;
+ SStrToken* t = &pQuerySql->interval;
if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->intervalTime) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -667,7 +672,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
- SSQLToken* pSliding = &pQuerySql->sliding;
+ SStrToken* pSliding = &pQuerySql->sliding;
if (pSliding->n != 0) {
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
@@ -692,7 +697,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
return TSDB_CODE_SUCCESS;
}
-int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql) {
+int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
const char* msg = "name too long";
SSqlCmd* pCmd = &pSql->cmd;
@@ -709,7 +714,7 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableNa
// db has been specified in sql string so we ignore current db path
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL);
} else { // get current DB name first, then set it into path
- SSQLToken t = {0};
+ SStrToken t = {0};
getCurrentDBName(pSql, &t);
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
@@ -1027,13 +1032,13 @@ static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
-static void getCurrentDBName(SSqlObj* pSql, SSQLToken* pDBToken) {
+static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken) {
pDBToken->z = pSql->pTscObj->db;
pDBToken->n = (uint32_t)strlen(pSql->pTscObj->db);
}
/* length limitation, strstr cannot be applied */
-static bool hasSpecifyDB(SSQLToken* pTableName) {
+static bool hasSpecifyDB(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
return true;
@@ -1043,7 +1048,7 @@ static bool hasSpecifyDB(SSQLToken* pTableName) {
return false;
}
-int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* xlen) {
+int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
int32_t totalLen = 0;
if (account != NULL) {
@@ -1094,18 +1099,6 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
}
-static void extractColumnNameFromString(tSQLExprItem* pItem) {
- if (pItem->pNode->nSQLOptr == TK_STRING) {
- pItem->pNode->val.nLen = strdequote(pItem->pNode->val.pz);
- pItem->pNode->nSQLOptr = TK_ID;
-
- SSQLToken* pIdToken = &pItem->pNode->colInfo;
- pIdToken->type = TK_ID;
- pIdToken->z = pItem->pNode->val.pz;
- pIdToken->n = pItem->pNode->val.nLen;
- }
-}
-
static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) {
const char* msg1 = "invalid column name, or illegal column type";
const char* msg2 = "invalid arithmetic expression in select clause";
@@ -1123,42 +1116,31 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
}
int32_t tableIndex = columnList.ids[0].tableIndex;
-
- // todo potential data overflow
- char* arithmeticExprStr = malloc(1024*1024);
- char* p = arithmeticExprStr;
-
if (arithmeticType == NORMAL_ARITHMETIC) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
// all columns in arithmetic expression must belong to the same table
for (int32_t f = 1; f < columnList.num; ++f) {
if (columnList.ids[f].tableIndex != tableIndex) {
- taosTFree(arithmeticExprStr);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
- if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) {
- taosTFree(arithmeticExprStr);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
// expr string is set as the parameter of function
SColumnIndex index = {.tableIndex = tableIndex};
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double),
sizeof(double), false);
- char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr;
- tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
+ char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z;
+ size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1);
+ tstrncpy(pExpr->aliasName, name, len);
tExprNode* pNode = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList);
if (ret != TSDB_CODE_SUCCESS) {
- taosTFree(arithmeticExprStr);
taosArrayDestroy(colList);
tExprTreeDestroy(&pNode, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
@@ -1167,8 +1149,8 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
size_t numOfNode = taosArrayGetSize(colList);
for(int32_t k = 0; k < numOfNode; ++k) {
SColIndex* pIndex = taosArrayGet(colList, k);
- if (pIndex->flag == 1) {
- taosTFree(arithmeticExprStr);
+ if (TSDB_COL_IS_TAG(pIndex->flag)) {
+ tExprTreeDestroy(&pNode, NULL);
taosArrayDestroy(colList);
tExprTreeDestroy(&pNode, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -1185,7 +1167,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
// TODO: other error handling
} END_TRY
- size_t len = tbufTell(&bw);
+ len = tbufTell(&bw);
char* c = tbufGetData(&bw, true);
// set the serialized binary string as the parameter of arithmetic expression
@@ -1196,16 +1178,18 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
taosArrayDestroy(colList);
tExprTreeDestroy(&pNode, NULL);
} else {
- if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) {
- taosTFree(arithmeticExprStr);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
columnList.num = 0;
columnList.ids[0] = (SColumnIndex) {0, 0};
- char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr;
- insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, name, NULL);
+ char aliasName[TSDB_COL_NAME_LEN] = {0};
+ if (pItem->aliasName != NULL) {
+ tstrncpy(aliasName, pItem->aliasName, TSDB_COL_NAME_LEN);
+ } else {
+ int32_t nameLen = MIN(TSDB_COL_NAME_LEN, pItem->pNode->token.n + 1);
+ tstrncpy(aliasName, pItem->pNode->token.z, nameLen);
+ }
+
+ insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, aliasName, NULL);
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot);
@@ -1221,7 +1205,6 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
if (ret != TSDB_CODE_SUCCESS) {
tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
- taosTFree(arithmeticExprStr);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
}
@@ -1229,11 +1212,38 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
}
}
- taosTFree(arithmeticExprStr);
return TSDB_CODE_SUCCESS;
}
-int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable) {
+static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+ SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ tscColumnListInsert(pQueryInfo->colList, &tsCol);
+}
+
+static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) {
+ SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex);
+
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
+
+ char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName;
+ tstrncpy(pExpr->aliasName, colName, sizeof(pExpr->aliasName));
+
+ SColumnList ids = {0};
+ ids.num = 1;
+ ids.ids[0] = *pIndex;
+
+ if (pIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX || pIndex->columnIndex == TSDB_UD_COLUMN_INDEX ||
+ pIndex->columnIndex >= tscGetNumOfColumns(pTableMeta)) {
+ ids.num = 0;
+ }
+
+ insertResultField(pQueryInfo, startPos, &ids, pExpr->resBytes, (int8_t)pExpr->resType, pExpr->aliasName, pExpr);
+}
+
+int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery) {
assert(pSelection != NULL && pCmd != NULL);
const char* msg2 = "functions can not be mixed up";
@@ -1251,16 +1261,14 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
tSQLExprItem* pItem = &pSelection->a[i];
// project on all fields
- if (pItem->pNode->nSQLOptr == TK_ALL || pItem->pNode->nSQLOptr == TK_ID || pItem->pNode->nSQLOptr == TK_STRING) {
+ int32_t optr = pItem->pNode->nSQLOptr;
+
+ if (optr == TK_ALL || optr == TK_ID || optr == TK_STRING || optr == TK_INTEGER || optr == TK_FLOAT) {
// it is actually a function, but the function name is invalid
if (pItem->pNode->nSQLOptr == TK_ID && (pItem->pNode->colInfo.z == NULL && pItem->pNode->colInfo.n == 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- // if the name of column is quoted, remove it and set the right information for later process
- extractColumnNameFromString(pItem);
- TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
-
// select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -1276,12 +1284,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
if (code != TSDB_CODE_SUCCESS) {
return code;
}
-
} else {
- /*
- * not support such expression
- * e.g., select 12+5 from table_name
- */
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -1290,7 +1293,26 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
}
}
- if (!functionCompatibleCheck(pQueryInfo)) {
+ // there is only one user-defined column in the final result field, add the timestamp column.
+ size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
+ if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
+ SColumnIndex index = {0};
+
+ // set the constant column value always attached to first table.
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, clauseIndex, 0);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, PRIMARYKEY_TIMESTAMP_COL_INDEX);
+
+ // add the timestamp column into the output columns
+ int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
+ tscAddSpecialColumnForSelect(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL);
+
+ SFieldSupInfo* pSupInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, numOfCols);
+ pSupInfo->visible = false;
+
+ pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
+ }
+
+ if (!functionCompatibleCheck(pQueryInfo, joinQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -1353,32 +1375,10 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c
pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ);
}
-static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) {
- SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex);
-
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
- STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
-
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
-
- char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName;
- tstrncpy(pExpr->aliasName, colName, sizeof(pExpr->aliasName));
-
- SColumnList ids = {0};
- ids.num = 1;
- ids.ids[0] = *pIndex;
-
- if (pIndex->columnIndex >= tscGetNumOfColumns(pTableMeta) || pIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- ids.num = 0;
- }
-
- insertResultField(pQueryInfo, startPos, &ids, pExpr->resBytes, (int8_t)pExpr->resType, pExpr->aliasName, pExpr);
-}
-
-void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
+SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) {
SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
- pColSchema->bytes, pColSchema->bytes, flag);
+ pColSchema->bytes, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
tstrncpy(pExpr->aliasName, pColSchema->name, sizeof(pExpr->aliasName));
SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex);
@@ -1394,6 +1394,8 @@ void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex
if (TSDB_COL_IS_TAG(flag)) {
tscColumnListInsert(pTableMetaInfo->tagColList, pIndex);
}
+
+ return pExpr;
}
static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, int32_t startPos) {
@@ -1426,18 +1428,16 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
return numOfTotalColumns;
}
-static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
- SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscColumnListInsert(pQueryInfo->colList, &tsCol);
-}
-
int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
const char* msg0 = "invalid column name";
const char* msg1 = "tag for normal table query is not allowed";
-
- int32_t startPos = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
- if (pItem->pNode->nSQLOptr == TK_ALL) { // project on all fields
+ int32_t startPos = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t optr = pItem->pNode->nSQLOptr;
+
+ if (optr == TK_ALL) { // project on all fields
+ TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
+
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
@@ -1453,7 +1453,24 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
} else {
doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos);
}
- } else if (pItem->pNode->nSQLOptr == TK_ID) { // simple column projection query
+
+ // add the primary timestamp column even though it is not required by user
+ tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
+ } else if (optr == TK_STRING || optr == TK_INTEGER || optr == TK_FLOAT) { // simple column projection query
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+
+ // user-specified constant value as a new result column
+ index.columnIndex = (pQueryInfo->udColumnId--);
+ index.tableIndex = 0;
+
+ SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->val, &pItem->pNode->token, pItem->aliasName);
+ SSqlExpr* pExpr =
+ tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC);
+
+ // NOTE: the first parameter is reserved for the tag column id during join query process.
+ pExpr->numOfParams = 2;
+ tVariantAssign(&pExpr->param[1], &pItem->pNode->val);
+ } else if (optr == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@@ -1472,8 +1489,10 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
}
addProjectQueryCol(pQueryInfo, startPos, &index, pItem);
+ pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
+ // add the primary timestamp column even though it is not required by user
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
} else {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -1561,7 +1580,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pItem->pNode->pParam != NULL) {
- SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
+ SStrToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
if (pToken->z == NULL || pToken->n == 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -1570,7 +1589,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (pParamElem->pNode->nSQLOptr == TK_ALL) {
// select table.*
// check if the table name is valid or not
- SSQLToken tmpToken = pParamElem->pNode->colInfo;
+ SStrToken tmpToken = pParamElem->pNode->colInfo;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
@@ -1768,7 +1787,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (pParamElem->pNode->nSQLOptr == TK_ALL) {
// select table.*
- SSQLToken tmpToken = pParamElem->pNode->colInfo;
+ SStrToken tmpToken = pParamElem->pNode->colInfo;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
@@ -1834,10 +1853,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i + j, &index) !=
- 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
+
+ colIndex++;
}
numOfFields += tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@@ -2045,7 +2065,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// todo refactor
static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex) {
- assert(num == 1 && columnIndex >= -1 && tableIndex >= 0);
+ assert(num == 1 && tableIndex >= 0);
SColumnList columnList = {0};
columnList.num = num;
@@ -2070,16 +2090,16 @@ void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, c
snprintf(resultFieldName, maxLen, "%s(%s)", aAggs[functionId].aName, columnName);
}
-static bool isTablenameToken(SSQLToken* token) {
- SSQLToken tmpToken = *token;
- SSQLToken tableToken = {0};
+static bool isTablenameToken(SStrToken* token) {
+ SStrToken tmpToken = *token;
+ SStrToken tableToken = {0};
extractTableNameFromToken(&tmpToken, &tableToken);
return (strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_TBNAME_L));
}
-static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken* pToken) {
+static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken* pToken) {
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index)->pTableMeta;
int32_t numOfCols = tscGetNumOfColumns(pTableMeta) + tscGetNumOfTags(pTableMeta);
@@ -2101,7 +2121,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken
return columnIndex;
}
-int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
@@ -2143,7 +2163,7 @@ int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SSQLToken* pToken, SQueryInfo* pQu
}
}
-int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
if (pTableToken->n == 0) { // only one table and no table name prefix in column name
if (pQueryInfo->numOfTables == 1) {
pIndex->tableIndex = 0;
@@ -2169,8 +2189,8 @@ int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColum
return TSDB_CODE_SUCCESS;
}
-int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
- SSQLToken tableToken = {0};
+int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+ SStrToken tableToken = {0};
extractTableNameFromToken(pToken, &tableToken);
if (getTableIndexImpl(&tableToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) {
@@ -2180,12 +2200,12 @@ int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIn
return TSDB_CODE_SUCCESS;
}
-int32_t getColumnIndexByName(SSqlCmd* pCmd, const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- SSQLToken tmpToken = *pToken;
+ SStrToken tmpToken = *pToken;
if (getTableIndexByName(&tmpToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -2297,7 +2317,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int16_t showType = pShowInfo->showType;
if (showType == TSDB_MGMT_TABLE_TABLE || showType == TSDB_MGMT_TABLE_METRIC || showType == TSDB_MGMT_TABLE_VGROUP) {
// db prefix in tagCond, show table conds in payload
- SSQLToken* pDbPrefixToken = &pShowInfo->prefix;
+ SStrToken* pDbPrefixToken = &pShowInfo->prefix;
if (pDbPrefixToken->type != 0) {
if (pDbPrefixToken->n >= TSDB_DB_NAME_LEN) { // db name is too long
@@ -2319,7 +2339,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
// show table/stable like 'xxxx', set the like pattern for show tables
- SSQLToken* pPattern = &pShowInfo->pattern;
+ SStrToken* pPattern = &pShowInfo->pattern;
if (pPattern->type != 0) {
pPattern->n = strdequote(pPattern->z);
@@ -2337,7 +2357,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
// show vnodes may be ip addr of dnode in payload
- SSQLToken* pDnodeIp = &pShowInfo->prefix;
+ SStrToken* pDnodeIp = &pShowInfo->prefix;
if (pDnodeIp->n >= TSDB_IPv4ADDR_LEN) { // ip addr is too long
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2358,7 +2378,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
SSqlCmd* pCmd = &pSql->cmd;
pCmd->command = pInfo->type;
- SSQLToken* idStr = &(pInfo->pDCLInfo->ip);
+ SStrToken* idStr = &(pInfo->pDCLInfo->ip);
if (idStr->n > TSDB_KILL_MSG_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2510,7 +2530,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return false;
}
-static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) {
+static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
int32_t startIdx = 0;
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, startIdx);
@@ -2542,6 +2562,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) {
if (functionCompatList[functionId] != factor) {
return false;
}
+
+ if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) {
+ return false;
+ }
}
return true;
@@ -2579,7 +2603,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
for (int32_t i = 0; i < pList->nExpr; ++i) {
tVariant* pVar = &pList->a[i].pVar;
- SSQLToken token = {pVar->nLen, pVar->nType, pVar->pz};
+ SStrToken token = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@@ -2659,9 +2683,12 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
}
int32_t size = pColumn->numOfFilters + 1;
- char* tmp = (char*)realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size));
+
+ char* tmp = (char*) realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size));
if (tmp != NULL) {
pColumn->filterInfo = (SColumnFilterInfo*)tmp;
+ } else {
+ return NULL;
}
pColumn->numOfFilters++;
@@ -2945,9 +2972,16 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
} else { // update the existed column filter information, find the filter info here
pColFilter = &pColumn->filterInfo[0];
}
+
+ if (pColFilter == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
} else if (sqlOptr == TK_OR) {
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
pColFilter = addColumnFilterInfo(pColumn);
+ if (pColFilter == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
} else { // error;
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3072,7 +3106,6 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQ
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg1 = "invalid join query condition";
- const char* msg2 = "join on binary/nchar not supported";
const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition";
@@ -3116,10 +3149,6 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (pTagSchema1->type == TSDB_DATA_TYPE_BINARY || pTagSchema1->type == TSDB_DATA_TYPE_NCHAR) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
pTagCond->joinInfo.hasJoin = true;
return TSDB_CODE_SUCCESS;
}
@@ -3156,7 +3185,7 @@ int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) {
return TSDB_CODE_SUCCESS;
}
-static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** str) {
+static UNUSED_FUNC int32_t arithmeticExprToString(tSQLExpr* pExpr, char** str) {
char* start = *str;
int32_t code = doArithmeticExprToString(pExpr, str);
@@ -3610,7 +3639,7 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo*
return;
}
- SSQLToken t = {0};
+ SStrToken t = {0};
extractTableNameFromToken(&pLeft->colInfo, &t);
*pOut = *pExpr;
@@ -3690,7 +3719,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
num = j;
char* name = extractDBName(pTableMetaInfo->name, db);
- SSQLToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) };
+ SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) };
for (int32_t i = 0; i < num; ++i) {
if (i >= 1) {
@@ -3699,7 +3728,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
char idBuf[TSDB_TABLE_FNAME_LEN] = {0};
int32_t xlen = (int32_t)strlen(segments[i]);
- SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING};
+ SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING};
int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen);
if (ret != TSDB_CODE_SUCCESS) {
@@ -4020,7 +4049,7 @@ int32_t getTimeRange(STimeWindow* win, tSQLExpr* pRight, int32_t optr, int16_t t
return TSDB_CODE_TSC_INVALID_SQL;
}
} else {
- SSQLToken token = {.z = pRight->val.pz, .n = pRight->val.nLen, .type = TK_ID};
+ SStrToken token = {.z = pRight->val.pz, .n = pRight->val.nLen, .type = TK_ID};
int32_t len = tSQLGetToken(pRight->val.pz, &token.type);
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->val.nLen) {
@@ -4266,7 +4295,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
return TSDB_CODE_SUCCESS;
}
- SSQLToken columnName = {pVar->nLen, pVar->nType, pVar->pz};
+ SStrToken columnName = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = {0};
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
@@ -4335,7 +4364,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
}
tVariant* pVar2 = &pSortorder->a[1].pVar;
- SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
+ SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -4464,7 +4493,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- SSQLToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING};
+ SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING};
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -4502,12 +4531,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER;
SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER;
- SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING};
+ SStrToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING};
if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17);
}
- SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING};
+ SStrToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING};
if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg19);
}
@@ -4531,7 +4560,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int16_t numOfTags = tscGetNumOfTags(pTableMeta);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
- SSQLToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen};
+ SStrToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen};
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -4624,7 +4653,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pItem = &pAlterSQL->varList->a[0];
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
- SSQLToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen};
+ SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen};
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17);
}
@@ -4687,7 +4716,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
}
}
- if (pExpr->functionId == TSDB_FUNC_PRJ || pExpr->functionId == TSDB_FUNC_DIFF ||
+ if ((pExpr->functionId == TSDB_FUNC_PRJ && pExpr->numOfParams == 0) || pExpr->functionId == TSDB_FUNC_DIFF ||
pExpr->functionId == TSDB_FUNC_ARITHM) {
isProjectionFunction = true;
}
@@ -4744,7 +4773,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
{"dDebugFlag", 10}, {"mqttDebugFlag", 13}, {"wDebugFlag", 10}, {"tmrDebugFlag", 12},
};
- SSQLToken* pOptionToken = &pOptions->a[1];
+ SStrToken* pOptionToken = &pOptions->a[1];
if (pOptions->nTokens == 2) {
// reset log and reset query cache does not need value
@@ -4756,7 +4785,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
}
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
- SSQLToken* pValToken = &pOptions->a[2];
+ SStrToken* pValToken = &pOptions->a[2];
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
@@ -4767,14 +4796,14 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_SUCCESS;
} else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenMonitor].len == pOptionToken->n)) {
- SSQLToken* pValToken = &pOptions->a[2];
+ SStrToken* pValToken = &pOptions->a[2];
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else {
- SSQLToken* pValToken = &pOptions->a[2];
+ SStrToken* pValToken = &pOptions->a[2];
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 0 || val > 256) {
@@ -4803,7 +4832,7 @@ int32_t validateLocalConfig(tDCLSQL* pOptions) {
SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12},
{"cDebugFlag", 10}, {"uDebugFlag", 10}, {"debugFlag", 9}};
- SSQLToken* pOptionToken = &pOptions->a[0];
+ SStrToken* pOptionToken = &pOptions->a[0];
if (pOptions->nTokens == 1) {
// reset log does not need value
@@ -4814,7 +4843,7 @@ int32_t validateLocalConfig(tDCLSQL* pOptions) {
}
}
} else {
- SSQLToken* pValToken = &pOptions->a[1];
+ SStrToken* pValToken = &pOptions->a[1];
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 131 || val > 199) {
@@ -4839,7 +4868,7 @@ int32_t validateColumnName(char* name) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- SSQLToken token = {.z = name};
+ SStrToken token = {.z = name};
token.n = tSQLGetToken(name, &token.type);
if (token.type != TK_STRING && token.type != TK_ID) {
@@ -5018,7 +5047,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBIn
pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default
- SSQLToken* pToken = &pCreateDbInfo->precision;
+ SStrToken* pToken = &pCreateDbInfo->precision;
if (pToken->n > 0) {
pToken->n = strdequote(pToken->z);
@@ -5689,7 +5718,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
assert(pFieldList != NULL);
// if sql specifies db, use it, otherwise use default db
- SSQLToken* pzTableName = &(pCreateTable->name);
+ SStrToken* pzTableName = &(pCreateTable->name);
if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
@@ -5744,7 +5773,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
STableMetaInfo* pStableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
// super table name, create table by using dst
- SSQLToken* pToken = &(pCreateTable->usingInfo.stableName);
+ SStrToken* pToken = &(pCreateTable->usingInfo.stableName);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
@@ -5837,7 +5866,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// if sql specifies db, use it, otherwise use default db
- SSQLToken* pzTableName = &(pCreateTable->name);
+ SStrToken* pzTableName = &(pCreateTable->name);
SQuerySQL* pQuerySql = pCreateTable->pSelect;
if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) {
@@ -5850,7 +5879,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
tVariant* pVar = &pSrcMeterName->a[0].pVar;
- SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING};
+ SStrToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING};
if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -5865,7 +5894,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
- if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) {
+ if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5991,7 +6020,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
pTableItem->nLen = strdequote(pTableItem->pz);
- SSQLToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING};
+ SStrToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING};
if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
@@ -6002,13 +6031,13 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i/2);
- SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz};
+ SStrToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz};
if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar;
- SSQLToken aliasName = {.z = pTableItem1->pz, .n = pTableItem1->nLen, .type = TK_STRING};
+ SStrToken aliasName = {.z = pTableItem1->pz, .n = pTableItem1->nLen, .type = TK_STRING};
if (tscValidateName(&aliasName) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
@@ -6048,7 +6077,8 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) {
+ int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
+ if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -6082,6 +6112,9 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
} else { // set the time rang
pQueryInfo->window = TSWINDOW_INITIALIZER;
+ if (pQuerySql->from->nExpr > 2) { // it is a join query, no wher clause is not allowed.
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query ");
+ }
}
// user does not specified the query time window, twa is not allowed in such case.
@@ -6131,7 +6164,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
if (pQueryInfo->intervalTime > 0) {
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
- if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) {
+ if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
}
@@ -6230,7 +6263,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS
(*pExpr)->_node.pLeft = pLeft;
(*pExpr)->_node.pRight = pRight;
- SSQLToken t = {.type = pSqlExpr->nSQLOptr};
+ SStrToken t = {.type = pSqlExpr->nSQLOptr};
(*pExpr)->_node.optr = getBinaryExprOptr(&t);
assert((*pExpr)->_node.optr != 0);
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 0785dd8b53..ecb85472fc 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -43,6 +43,14 @@ void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts);
void tscSaveSubscriptionProgress(void* sub);
static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
+static int32_t getWaitingTimeInterval(int32_t count) {
+ int32_t initial = 100; // 100 ms by default
+ if (count <= 1) {
+ return 0;
+ }
+
+ return initial * (2<<(count - 2));
+}
static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
@@ -117,7 +125,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
pVgroupInfo->inUse = pEpSet->inUse;
pVgroupInfo->numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
- tstrncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], sizeof(pEpSet->fqdn[i]));
+ tstrncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
}
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
@@ -218,13 +226,17 @@ int tscSendMsgToServer(SSqlObj *pSql) {
.handle = &pSql->pRpcCtx,
.code = 0
};
-
// NOTE: the rpc context should be acquired before sending data to server.
// Otherwise, the pSql object may have been released already during the response function, which is
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
// cause crash.
- rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
- return TSDB_CODE_SUCCESS;
+ if (pObj != NULL && pObj->signature == pObj) {
+ rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
+ return TSDB_CODE_SUCCESS;
+ } else {
+ //pObj->signature has been reset by other thread, ignore concurrency problem
+ return TSDB_CODE_TSC_CONN_KILLED;
+ }
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
@@ -275,6 +287,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
(rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
+ rpcMsg->code == TSDB_CODE_APP_NOT_READY ||
rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE)) {
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
@@ -287,6 +300,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
} else {
+ // wait for a little bit moment and then retry
+ if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+ int32_t duration = getWaitingTimeInterval(pSql->retry);
+ taosMsleep(duration);
+ }
+
rpcMsg->code = tscRenewTableMeta(pSql, pTableMetaInfo->name);
// if there is an error occurring, proceed to the following error handling procedure.
@@ -299,10 +318,10 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
pRes->rspLen = 0;
- if (pRes->code != TSDB_CODE_TSC_QUERY_CANCELLED) {
- pRes->code = (rpcMsg->code != TSDB_CODE_SUCCESS) ? rpcMsg->code : TSDB_CODE_RPC_NETWORK_UNAVAIL;
- } else {
+ if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
tscDebug("%p query is cancelled, code:%s", pSql, tstrerror(pRes->code));
+ } else {
+ pRes->code = rpcMsg->code;
}
if (pRes->code == TSDB_CODE_SUCCESS) {
@@ -439,35 +458,21 @@ void tscKillSTableQuery(SSqlObj *pSql) {
return;
}
+ pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
+
for (int i = 0; i < pSql->numOfSubs; ++i) {
+ // NOTE: pSub may have been released already here
SSqlObj *pSub = pSql->pSubs[i];
if (pSub == NULL) {
continue;
}
- /*
- * here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause
- * sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
- */
- rpcCancelRequest(pSub->pRpcCtx);
pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
- tscQueueAsyncRes(pSub);
- }
-
- /*
- * 1. if the subqueries are not launched or partially launched, we need to waiting the launched
- * query return to successfully free allocated resources.
- * 2. if no any subqueries are launched yet, which means the super table query only in parse sql stage,
- * set the res.code, and return.
- */
- const int64_t MAX_WAITING_TIME = 10000; // 10 Sec.
- int64_t stime = taosGetTimestampMs();
-
- while (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
- taosMsleep(100);
- if (taosGetTimestampMs() - stime > MAX_WAITING_TIME) {
- break;
+ if (pSub->pRpcCtx != NULL) {
+ rpcCancelRequest(pSub->pRpcCtx);
}
+
+ tscQueueAsyncRes(pSub); // async res? not other functions?
}
tscDebug("%p super table query cancelled", pSql);
@@ -623,26 +628,29 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for query msg", pSql);
- return -1; // todo add test for this
+ return TSDB_CODE_TSC_INVALID_SQL; // todo add test for this
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
-
- if (taosArrayGetSize(pQueryInfo->colList) <= 0 && !tscQueryTags(pQueryInfo)) {
- tscError("%p illegal value of numOfCols in query msg: %d", pSql, tscGetNumOfColumns(pTableMeta));
- return -1;
+
+ size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
+ if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
+ tscError("%p illegal value of numOfCols in query msg: %"PRIu64", table cols:%d", pSql, numOfSrcCols,
+ tscGetNumOfColumns(pTableMeta));
+
+ return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTime < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
- return -1;
+ return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols < 0) {
tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pQueryInfo->groupbyExpr.numOfGroupCols);
- return -1;
+ return TSDB_CODE_TSC_INVALID_SQL;
}
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
@@ -708,7 +716,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (pColFilter->filterstr) {
pFilterMsg->len = htobe64(pColFilter->len);
- memcpy(pMsg, (void *)pColFilter->pz, pColFilter->len + 1);
+ memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
pMsg += (pColFilter->len + 1); // append the additional filter binary info
} else {
pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
@@ -720,7 +728,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
tscError("invalid filter info");
- return -1;
+ return TSDB_CODE_TSC_INVALID_SQL;
}
}
}
@@ -729,10 +737,10 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
- if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId)) {
+ if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
/* column id is not valid according to the cached table meta, the table meta is expired */
tscError("%p table schema is not matched with parsed sql", pSql);
- return -1;
+ return TSDB_CODE_TSC_INVALID_SQL;
}
pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId);
@@ -929,8 +937,8 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMCreateAcctMsg *pAlterMsg = (SCMCreateAcctMsg *)pCmd->payload;
- SSQLToken *pName = &pInfo->pDCLInfo->user.user;
- SSQLToken *pPwd = &pInfo->pDCLInfo->user.passwd;
+ SStrToken *pName = &pInfo->pDCLInfo->user.user;
+ SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
strncpy(pAlterMsg->user, pName->z, pName->n);
strncpy(pAlterMsg->pass, pPwd->z, pPwd->n);
@@ -1132,13 +1140,13 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
- SSQLToken *pPattern = &pShowInfo->pattern;
+ SStrToken *pPattern = &pShowInfo->pattern;
if (pPattern->type > 0) { // only show tables support wildcard query
strncpy(pShowMsg->payload, pPattern->z, pPattern->n);
pShowMsg->payloadLen = htons(pPattern->n);
}
} else {
- SSQLToken *pEpAddr = &pShowInfo->prefix;
+ SStrToken *pEpAddr = &pShowInfo->prefix;
assert(pEpAddr->n > 0 && pEpAddr->type > 0);
strncpy(pShowMsg->payload, pEpAddr->z, pEpAddr->n);
@@ -1280,7 +1288,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
- return -1;
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMAlterTableMsg *pAlterTableMsg = (SCMAlterTableMsg *)pCmd->payload;
@@ -1428,6 +1436,12 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
+ int32_t code = pRes->code;
+ if (pRes->code != TSDB_CODE_SUCCESS) {
+ tscQueueAsyncRes(pSql);
+ return code;
+ }
+
pRes->code = tscDoLocalMerge(pSql);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
@@ -1438,7 +1452,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
pRes->row = 0;
pRes->completed = (pRes->numOfRows == 0);
- int32_t code = pRes->code;
+ code = pRes->code;
if (pRes->code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
} else {
@@ -1632,7 +1646,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
pthread_mutex_unlock(&pObj->mutex);
tscError("%p failed to malloc for heartbeat msg", pSql);
- return -1;
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMHeartBeatMsg *pHeartbeat = (SCMHeartBeatMsg *)pCmd->payload;
@@ -1702,7 +1716,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta == NULL);
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name,
- strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer);
+ strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
// todo handle out of memory case
if (pTableMetaInfo->pTableMeta == NULL) {
@@ -1906,7 +1920,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, strlen(key), (char *)pTableMeta, size,
- tsTableMetaKeepTimer);
+ tsTableMetaKeepTimer * 1000);
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
if (pQueryInfo->colList == NULL) {
@@ -1946,8 +1960,12 @@ static void createHBObj(STscObj* pObj) {
pSql->fp = tscProcessHeartBeatRsp;
- SQueryInfo *pQueryInfo = NULL;
- tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
+ if (pQueryInfo == NULL) {
+ pSql->res.code = terrno;
+ return;
+ }
+
pQueryInfo->command = TSDB_SQL_HB;
pSql->cmd.command = pQueryInfo->command;
@@ -2132,8 +2150,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
tscAddSubqueryInfo(&pNew->cmd);
- SQueryInfo *pNewQueryInfo = NULL;
- tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo);
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
@@ -2236,8 +2253,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
- SQueryInfo *pNewQueryInfo = NULL;
- if ((code = tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo)) != TSDB_CODE_SUCCESS) {
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
+ if (pNewQueryInfo == NULL) {
tscFreeSqlObj(pNew);
return code;
}
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 1bd885466c..f63923e046 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -201,7 +201,7 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
}
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
- tscDebug("try to create a connection to %s:%u, user:%s db:%s", ip, port, user, db);
+ tscDebug("try to create a connection to %s:%u, user:%s db:%s", ip, port != 0 ? port : tsServerPort , user, db);
if (user == NULL) user = TSDB_DEFAULT_USER;
if (pass == NULL) pass = TSDB_DEFAULT_PASS;
@@ -655,27 +655,30 @@ int* taos_fetch_lengths(TAOS_RES *res) {
char *taos_get_client_info() { return version; }
void taos_stop_query(TAOS_RES *res) {
- if (res == NULL) {
+ SSqlObj *pSql = (SSqlObj *)res;
+ if (pSql == NULL || pSql->signature != pSql) {
return;
}
- SSqlObj *pSql = (SSqlObj *)res;
+ tscDebug("%p start to cancel query", res);
SSqlCmd *pCmd = &pSql->cmd;
- if (pSql->signature != pSql) return;
- tscDebug("%p start to cancel query", res);
-
-
+ // TODO there are multi-thread problem.
+ // It may have been released by the other thread already.
+ // The ref count may fix this problem.
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
- tscKillSTableQuery(pSql);
- }
- if (pSql->cmd.command < TSDB_SQL_LOCAL) {
- rpcCancelRequest(pSql->pRpcCtx);
- }
+ // set the error code for master pSqlObj firstly
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
- tscQueueAsyncRes(pSql);
+
+ if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
+ assert(pSql->pRpcCtx == NULL);
+ tscKillSTableQuery(pSql);
+ } else {
+ if (pSql->cmd.command < TSDB_SQL_LOCAL) {
+ rpcCancelRequest(pSql->pRpcCtx);
+ }
+ }
tscDebug("%p query is cancelled", res);
}
@@ -824,8 +827,11 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
char *str = (char *)tblNameList;
- SQueryInfo *pQueryInfo = NULL;
- tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
+ if (pQueryInfo == NULL) {
+ pSql->res.code = terrno;
+ return terrno;
+ }
STableMetaInfo *pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
@@ -850,7 +856,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
str = nextStr + 1;
len = (int32_t)strtrim(tblName);
- SSQLToken sToken = {.n = len, .type = TK_ID, .z = tblName};
+ SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
tSQLGetToken(tblName, &sToken.type);
// Check if the table name available or not
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index d2b5439f8d..2fb264c756 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -12,6 +12,8 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
+#define _GNU_SOURCE
+
#include "os.h"
#include "qAst.h"
@@ -93,13 +95,14 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
tscInfo("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
#endif
- if (elem1.tag < elem2.tag || (elem1.tag == elem2.tag && tsCompare(order, elem1.ts, elem2.ts))) {
+ int32_t res = tVariantCompare(&elem1.tag, &elem2.tag);
+ if (res == -1 || (res == 0 && tsCompare(order, elem1.ts, elem2.ts))) {
if (!tsBufNextPos(pSupporter1->pTSBuf)) {
break;
}
numOfInput1++;
- } else if (elem1.tag > elem2.tag || (elem1.tag == elem2.tag && tsCompare(order, elem2.ts, elem1.ts))) {
+ } else if ((res > 0) || (res == 0 && tsCompare(order, elem2.ts, elem1.ts))) {
if (!tsBufNextPos(pSupporter2->pTSBuf)) {
break;
}
@@ -119,8 +122,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
win->ekey = elem1.ts;
}
- tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
- tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
+ tsBufAppend(output1, elem1.vnode, &elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
+ tsBufAppend(output2, elem2.vnode, &elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
} else {
pLimit->offset -= 1;
}
@@ -352,11 +355,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
pExpr = tscSqlExprGet(pQueryInfo, 0);
}
- // set the join condition tag column info, to do extract method
+ // set the join condition tag column info, todo extract method
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pQueryInfo->tagCond.joinInfo.hasJoin);
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
+ // set the tag column id for executor to extract correct tag value
pExpr->param[0].i64Key = colId;
pExpr->numOfParams = 1;
}
@@ -433,6 +437,7 @@ int32_t tscCompareTidTags(const void* p1, const void* p2) {
if (t1->vgId != t2->vgId) {
return (t1->vgId > t2->vgId) ? 1 : -1;
}
+
if (t1->tid != t2->tid) {
return (t1->tid > t2->tid) ? 1 : -1;
}
@@ -539,6 +544,7 @@ static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1,
for(int32_t i = 1; i < p1->num; ++i) {
STidTags* prev = (STidTags*) varDataVal(p1->pIdTagList + (i - 1) * p1->tagSize);
STidTags* p = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize);
+ assert(prev->vgId >= 1 && p->vgId >= 1);
if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) {
tscError("%p join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj);
@@ -575,6 +581,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
while(i < p1->num && j < p2->num) {
STidTags* pp1 = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize);
STidTags* pp2 = (STidTags*) varDataVal(p2->pIdTagList + j * p2->tagSize);
+ assert(pp1->tid != 0 && pp2->tid != 0);
int32_t ret = doCompare(pp1->tag, pp2->tag, pColSchema->type, pColSchema->bytes);
if (ret == 0) {
@@ -623,7 +630,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// keep the results in memory
if (numOfRows > 0) {
- size_t validLen = pSupporter->tagSize * pRes->numOfRows;
+ size_t validLen = (size_t)(pSupporter->tagSize * pRes->numOfRows);
size_t length = pSupporter->totalLen + validLen;
// todo handle memory error
@@ -684,6 +691,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
freeJoinSubqueryObj(pParentSql);
pParentSql->res.code = code;
tscQueueAsyncRes(pParentSql);
+
taosArrayDestroy(s1);
taosArrayDestroy(s2);
return;
@@ -748,7 +756,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
}
if (numOfRows > 0) { // write the compressed timestamp to disk file
- fwrite(pRes->data, pRes->numOfRows, 1, pSupporter->f);
+ fwrite(pRes->data, (size_t)pRes->numOfRows, 1, pSupporter->f);
fclose(pSupporter->f);
pSupporter->f = NULL;
@@ -1143,7 +1151,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
/////////////////////////////////////////////////////////////////////////////////////////
static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code);
-static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj);
+static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj);
int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
SSqlCmd * pCmd = &pSql->cmd;
@@ -1216,6 +1224,16 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
int32_t tagColId = tscGetJoinTagColIdByUid(pTagCond, pTableMetaInfo->pTableMeta->id.uid);
SSchema* s = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
+ // get the tag colId column index
+ int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
+ SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
+ for(int32_t i = 0; i < numOfTags; ++i) {
+ if (pSchema[i].colId == tagColId) {
+ index.columnIndex = i;
+ break;
+ }
+ }
+
int16_t bytes = 0;
int16_t type = 0;
int32_t inter = 0;
@@ -1285,8 +1303,14 @@ int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0);
-
+
+ // todo add test
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
+ if (pState == NULL) {
+ pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return pSql->res.code;
+ }
+
pState->numOfTotal = pQueryInfo->numOfTables;
pState->numOfRemain = pState->numOfTotal;
@@ -1300,7 +1324,7 @@ int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
if (0 == i) {
taosTFree(pState);
- }
+ }
return pSql->res.code;
}
@@ -1330,10 +1354,6 @@ static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState
SRetrieveSupport* pSupport = pSub->param;
taosTFree(pSupport->localBuffer);
-
- pthread_mutex_unlock(&pSupport->queryMutex);
- pthread_mutex_destroy(&pSupport->queryMutex);
-
taosTFree(pSupport);
tscFreeSqlObj(pSub);
@@ -1406,14 +1426,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
trs->pParentSql = pSql;
trs->pFinalColModel = pModel;
- pthread_mutexattr_t mutexattr;
- memset(&mutexattr, 0, sizeof(pthread_mutexattr_t));
-
- pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE_NP);
- pthread_mutex_init(&trs->queryMutex, &mutexattr);
- pthread_mutexattr_destroy(&mutexattr);
-
- SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs, NULL);
+ SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
if (pNew == NULL) {
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
taosTFree(trs->localBuffer);
@@ -1458,15 +1471,16 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
}
static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
- tscDebug("%p start to free subquery result", pSql);
-
+ tscDebug("%p start to free subquery obj", pSql);
+
+ int32_t index = trsupport->subqueryIndex;
+ SSqlObj *pParentSql = trsupport->pParentSql;
+
+ assert(pSql == pParentSql->pSubs[index]);
+ pParentSql->pSubs[index] = NULL;
+
taos_free_result(pSql);
-
taosTFree(trsupport->localBuffer);
-
- pthread_mutex_unlock(&trsupport->queryMutex);
- pthread_mutex_destroy(&trsupport->queryMutex);
-
taosTFree(trsupport);
}
@@ -1475,23 +1489,11 @@ static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, i
static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) {
// set no disk space error info
-#ifdef WINDOWS
- LPVOID lpMsgBuf;
- FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL,
- GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
- (LPTSTR)&lpMsgBuf, 0, NULL);
- tscError("sub:%p failed to flush data to disk:reason:%s", tres, lpMsgBuf);
- LocalFree(lpMsgBuf);
-#else
tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code));
-#endif
-
SSqlObj* pParentSql = trsupport->pParentSql;
pParentSql->res.code = code;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
-
- pthread_mutex_unlock(&trsupport->queryMutex);
tscHandleSubqueryError(trsupport, tres, pParentSql->res.code);
}
@@ -1510,13 +1512,10 @@ static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, in
// clear local saved number of results
trsupport->localBuffer->num = 0;
- pthread_mutex_unlock(&trsupport->queryMutex);
-
- tscTrace("%p sub:%p retrieve failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
+ tscError("%p sub:%p retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
tstrerror(code), subqueryIndex, trsupport->numOfRetry);
- SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSql, trsupport, pSql);
-
+ SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
trsupport->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, trsupport->subqueryIndex);
@@ -1527,8 +1526,15 @@ static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, in
return pParentSql->res.code;
}
- taos_free_result(pSql);
- return tscProcessSql(pNew);
+ int32_t ret = tscProcessSql(pNew);
+
+ // if failed to process sql, let following code handle the pSql
+ if (ret == TSDB_CODE_SUCCESS) {
+ taos_free_result(pSql);
+ return ret;
+ } else {
+ return ret;
+ }
}
void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) {
@@ -1548,14 +1554,14 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
*/
pSql->res.numOfRows = 0;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts
- tscDebug("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", pParentSql, pSql,
- subqueryIndex, pParentSql->res.code);
+ tscDebug("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%s", pParentSql, pSql,
+ subqueryIndex, tstrerror(pParentSql->res.code));
}
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
tscDebug("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pParentSql, pSql, numOfRows, subqueryIndex);
- tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pParentSql, pSql,
- subqueryIndex, pParentSql->res.code);
+ tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql, pSql,
+ subqueryIndex, tstrerror(pParentSql->res.code));
} else {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) {
@@ -1587,10 +1593,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
taosTFree(trsupport->pState);
tscFreeSubSqlObj(trsupport, pSql);
-
+
// in case of second stage join subquery, invoke its callback function instead of regular QueueAsyncRes
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
-
+
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
} else { // regular super table query
@@ -1669,7 +1675,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
// only free once
taosTFree(trsupport->pState);
tscFreeSubSqlObj(trsupport, pSql);
-
+
// set the command flag must be after the semaphore been correctly set.
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE;
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
@@ -1685,24 +1691,22 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
int32_t idx = trsupport->subqueryIndex;
SSqlObj * pParentSql = trsupport->pParentSql;
+ assert(tres != NULL);
SSqlObj *pSql = (SSqlObj *)tres;
- if (pSql == NULL) { // sql object has been released in error process, return immediately
- tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
- return;
- }
-
+// if (pSql == NULL) { // sql object has been released in error process, return immediately
+// tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
+// return;
+// }
+
SSubqueryState* pState = trsupport->pState;
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
- // query process and cancel query process may execute at the same time
- pthread_mutex_lock(&trsupport->queryMutex);
-
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
- SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
+ SCMVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
- tscTrace("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
+ tscDebug("%p query cancelled/failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(numOfRows), tstrerror(pParentSql->res.code));
tscHandleSubqueryError(param, tres, numOfRows);
@@ -1713,13 +1717,13 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
assert(numOfRows == taos_errno(pSql));
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
- tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
+ tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) {
return;
}
} else {
- tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(numOfRows));
+ tscDebug("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(numOfRows));
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows); // set global code and abort
}
@@ -1764,13 +1768,9 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
(int32_t)pRes->numOfRows, pQueryInfo->groupbyExpr.orderType);
if (ret != 0) { // set no disk space error info, and abort retry
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
-
} else if (pRes->completed) {
tscAllDataRetrievedFromDnode(trsupport, pSql);
- return;
-
} else { // continue fetch data from dnode
- pthread_mutex_unlock(&trsupport->queryMutex);
taos_fetch_rows_a(tres, tscRetrieveFromDnodeCallBack, param);
}
@@ -1779,15 +1779,15 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
}
}
-static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) {
+static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) {
const int32_t table_index = 0;
SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, TSDB_SQL_SELECT, prevSqlObj);
if (pNew != NULL) { // the sub query of two-stage super table query
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
+
pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY;
-
- assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1);
+ assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1 && trsupport->subqueryIndex < pSql->numOfSubs);
// launch subquery for each vnode, so the subquery index equals to the vgroupIndex.
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
@@ -1804,7 +1804,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
SSqlObj* pParentSql = trsupport->pParentSql;
SSqlObj* pSql = (SSqlObj *) tres;
-
+
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
assert(pSql->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1);
@@ -1814,7 +1814,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
// stable query killed or other subquery failed, all query stopped
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
- tscTrace("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
+ tscError("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
tscHandleSubqueryError(param, tres, code);
@@ -1832,12 +1832,12 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
assert(code == taos_errno(pSql));
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
- tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
+ tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) {
return;
}
} else {
- tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
+ tscError("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
}
@@ -1845,7 +1845,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
return;
}
- tscTrace("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
+ tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
if (pSql->res.qhandle == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
@@ -1924,8 +1924,14 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
assert(size > 0);
- pSql->pSubs = calloc(size, POINTER_BYTES);
+ // the number of already initialized subqueries
+ int32_t numOfSub = 0;
+
pSql->numOfSubs = (uint16_t)size;
+ pSql->pSubs = calloc(size, POINTER_BYTES);
+ if (pSql->pSubs == NULL) {
+ goto _error;
+ }
tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size);
@@ -1934,10 +1940,13 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
pState->numOfRemain = pSql->numOfSubs;
pRes->code = TSDB_CODE_SUCCESS;
- int32_t numOfSub = 0;
while(numOfSub < pSql->numOfSubs) {
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
+ if (pSupporter == NULL) {
+ goto _error;
+ }
+
pSupporter->pSql = pSql;
pSupporter->pState = pState;
pSupporter->index = numOfSub;
@@ -1970,7 +1979,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
if (numOfSub < pSql->numOfSubs) {
tscError("%p failed to prepare subObj structure and launch sub-insertion", pSql);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return pRes->code; // free all allocated resource
+ goto _error;
}
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
@@ -2068,46 +2077,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
doBuildResFromSubqueries(pSql);
tsem_post(&pSql->rspSem);
-
return;
-
- // continue retrieve data from vnode
-// if (!tscHasRemainDataInSubqueryResultSet(pSql)) {
-// tscDebug("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
-// SSubqueryState* pState = NULL;
-//
-// // free all sub sqlobj
-// for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
-// SSqlObj* pChildObj = pSql->pSubs[i];
-// if (pChildObj == NULL) {
-// continue;
-// }
-//
-// SJoinSupporter* pSupporter = (SJoinSupporter*)pChildObj->param;
-// pState = pSupporter->pState;
-//
-// tscDestroyJoinSupporter(pChildObj->param);
-// taos_free_result(pChildObj);
-// }
-//
-// free(pState);
-//
-// pRes->completed = true; // set query completed
-// tsem_post(&pSql->rspSem);
-// return;
-// }
-
-// tscFetchDatablockFromSubquery(pSql);
-// if (pRes->code != TSDB_CODE_SUCCESS) {
-// return;
-// }
}
-
-// if (pSql->res.code == TSDB_CODE_SUCCESS) {
-// (*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
-// } else {
-// tscQueueAsyncRes(pSql);
-// }
}
static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) {
@@ -2157,7 +2128,6 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
SSqlRes *pRes = &pSql->res;
assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows);
-
if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker
taosTFree(pRes->tsrow);
return pRes->tsrow;
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 64a871ff74..b61fd7e8c9 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -373,7 +373,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return;
}
-
+
tscDebug("%p start to free sql object", pSql);
tscPartiallyFreeSqlObj(pSql);
@@ -388,6 +388,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
taosTFree(pSql->sqlstr);
tsem_destroy(&pSql->rspSem);
+
free(pSql);
}
@@ -404,7 +405,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
taosTFree(pDataBlock);
}
-SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
+SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset) {
uint32_t needed = pDataBlock->numOfParams + 1;
if (needed > pDataBlock->numOfAllocedParams) {
@@ -485,15 +486,6 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
return TSDB_CODE_SUCCESS;
}
-//void tscFreeUnusedDataBlocks(SDataBlockList* pList) {
-// /* release additional memory consumption */
-// for (int32_t i = 0; i < pList->nSize; ++i) {
-// STableDataBlocks* pDataBlock = pList->pData[i];
-// pDataBlock->pData = realloc(pDataBlock->pData, pDataBlock->size);
-// pDataBlock->nAllocSize = (uint32_t)pDataBlock->size;
-// }
-//}
-
/**
* create the in-memory buffer for each table to keep the submitted data block
* @param initialSize
@@ -518,6 +510,11 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
}
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
+ if (dataBuf->pData == NULL) {
+ tscError("failed to allocated memory, reason:%s", strerror(errno));
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
@@ -742,7 +739,7 @@ bool tscIsInsertData(char* sqlstr) {
int32_t index = 0;
do {
- SSQLToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL);
+ SStrToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL);
if (t0.type != TK_LP) {
return t0.type == TK_INSERT || t0.type == TK_IMPORT;
}
@@ -926,17 +923,23 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
}
static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
- int16_t size, int16_t interSize, bool isTagCol) {
+ int16_t size, int16_t interSize, int32_t colType) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr));
+ if (pExpr == NULL) {
+ return NULL;
+ }
+
pExpr->functionId = functionId;
-
+
// set the correct columnIndex index
if (pColIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pExpr->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX;
+ } else if (pColIndex->columnIndex <= TSDB_UD_COLUMN_INDEX) {
+ pExpr->colInfo.colId = pColIndex->columnIndex;
} else {
- if (isTagCol) {
+ if (TSDB_COL_IS_TAG(colType)) {
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
tstrncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(pExpr->colInfo.name));
@@ -948,9 +951,9 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
}
}
- pExpr->colInfo.flag = isTagCol? TSDB_COL_TAG:TSDB_COL_NORMAL;
-
+ pExpr->colInfo.flag = colType;
pExpr->colInfo.colIndex = pColIndex->columnIndex;
+
pExpr->resType = type;
pExpr->resBytes = size;
pExpr->interBytes = interSize;
@@ -1060,8 +1063,11 @@ void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy)
if (deepcopy) {
SSqlExpr* p1 = calloc(1, sizeof(SSqlExpr));
+ if (p1 == NULL) {
+ assert(0);
+ }
+
*p1 = *pExpr;
-
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
tVariantAssign(&p1->param[j], &pExpr->param[j]);
}
@@ -1097,16 +1103,22 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
if (i >= numOfCols || numOfCols == 0) {
SColumn* b = calloc(1, sizeof(SColumn));
+ if (b == NULL) {
+ return NULL;
+ }
+
b->colIndex = *pColIndex;
-
taosArrayInsert(pColumnList, i, &b);
} else {
SColumn* pCol = taosArrayGetP(pColumnList, i);
if (i < numOfCols && (pCol->colIndex.columnIndex > col || pCol->colIndex.tableIndex != pColIndex->tableIndex)) {
SColumn* b = calloc(1, sizeof(SColumn));
+ if (b == NULL) {
+ return NULL;
+ }
+
b->colIndex = *pColIndex;
-
taosArrayInsert(pColumnList, i, &b);
}
}
@@ -1128,7 +1140,10 @@ SColumn* tscColumnClone(const SColumn* src) {
assert(src != NULL);
SColumn* dst = calloc(1, sizeof(SColumn));
-
+ if (dst == NULL) {
+ return NULL;
+ }
+
dst->colIndex = src->colIndex;
dst->numOfFilters = src->numOfFilters;
dst->filterInfo = tscFilterInfoClone(src->filterInfo, src->numOfFilters);
@@ -1183,7 +1198,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
* 'first_part.second_part'
*
*/
-static int32_t validateQuoteToken(SSQLToken* pToken) {
+static int32_t validateQuoteToken(SStrToken* pToken) {
strdequote(pToken->z);
pToken->n = (uint32_t)strtrim(pToken->z);
@@ -1199,7 +1214,7 @@ static int32_t validateQuoteToken(SSQLToken* pToken) {
return TSDB_CODE_SUCCESS;
}
-int32_t tscValidateName(SSQLToken* pToken) {
+int32_t tscValidateName(SStrToken* pToken) {
if (pToken->type != TK_STRING && pToken->type != TK_ID) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1286,12 +1301,12 @@ void tscIncStreamExecutionCount(void* pStream) {
ps->num += 1;
}
-bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId) {
+bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams) {
if (pTableMetaInfo->pTableMeta == NULL) {
return false;
}
- if (colId == TSDB_TBNAME_COLUMN_INDEX) {
+ if (colId == TSDB_TBNAME_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
return true;
}
@@ -1338,6 +1353,10 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
if (pCond->len > 0) {
assert(pCond->cond != NULL);
c.cond = malloc(c.len);
+ if (c.cond == NULL) {
+ assert(0);
+ }
+
memcpy(c.cond, pCond->cond, c.len);
}
@@ -1463,20 +1482,20 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) {
return pQueryInfo->pTableMetaInfo[tableIndex];
}
-int32_t tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo) {
+SQueryInfo* tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex) {
+ SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
int32_t ret = TSDB_CODE_SUCCESS;
- *pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
-
- while ((*pQueryInfo) == NULL) {
+ while ((pQueryInfo) == NULL) {
if ((ret = tscAddSubqueryInfo(pCmd)) != TSDB_CODE_SUCCESS) {
- return ret;
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return NULL;
}
- (*pQueryInfo) = tscGetQueryInfoDetail(pCmd, subClauseIndex);
+ pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
}
- return TSDB_CODE_SUCCESS;
+ return pQueryInfo;
}
STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) {
@@ -1507,6 +1526,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
assert(pQueryInfo->exprList == NULL);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
+ pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
}
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
@@ -1522,8 +1542,11 @@ int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
pCmd->pQueryInfo = (SQueryInfo**)tmp;
SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo));
+ if (pQueryInfo == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
tscInitQueryInfo(pQueryInfo);
-
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
@@ -1584,14 +1607,18 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
SVgroupsInfo* vgroupList, SArray* pTagCols) {
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
if (pAlloc == NULL) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL;
}
pQueryInfo->pTableMetaInfo = pAlloc;
- pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = calloc(1, sizeof(STableMetaInfo));
+ STableMetaInfo* pTableMetaInfo = calloc(1, sizeof(STableMetaInfo));
+ if (pTableMetaInfo == NULL) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return NULL;
+ }
- STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables];
- assert(pTableMetaInfo != NULL);
+ pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo;
if (name != NULL) {
tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
@@ -1602,10 +1629,18 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
if (vgroupList != NULL) {
size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups;
pTableMetaInfo->vgroupList = malloc(size);
+ if (pTableMetaInfo->vgroupList == NULL) {
+ return NULL;
+ }
+
memcpy(pTableMetaInfo->vgroupList, vgroupList, size);
}
pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES);
+ if (pTableMetaInfo->tagColList == NULL) {
+ return NULL;
+ }
+
if (pTagCols != NULL) {
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
@@ -1671,8 +1706,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
return NULL;
}
- SQueryInfo* pQueryInfo = NULL;
- tscGetQueryInfoDetailSafely(pCmd, 0, &pQueryInfo);
+ SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, 0);
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
@@ -1754,6 +1788,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) {
SSqlCmd* pCmd = &pSql->cmd;
+
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) {
tscError("%p new subquery failed, tableIndex:%d", pSql, tableIndex);
@@ -1769,10 +1804,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) {
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
-
- free(pNew);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return NULL;
+ goto _error;
}
SSqlCmd* pnCmd = &pNew->cmd;
@@ -1789,9 +1822,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pnCmd->parseFinished = 1;
if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
- tscFreeSqlObj(pNew);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return NULL;
+ goto _error;
}
SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pnCmd, 0);
@@ -1816,20 +1848,28 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayClone(pQueryInfo->groupbyExpr.columnInfo);
+ if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
}
tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
+ if (pNewQueryInfo->fillVal == NULL) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
}
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
- tscFreeSqlObj(pNew);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return NULL;
+ goto _error;
}
tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, (int16_t)tableIndex);
@@ -1872,16 +1912,15 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed since no tableMeta in cache, name:%s", pSql, name);
- tscFreeSqlObj(pNew);
- if (pPrevSql != NULL) {
+ if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
terrno = pPrevSql->res.code;
} else {
terrno = TSDB_CODE_TSC_APP_ERROR;
}
- return NULL;
+ goto _error;
}
assert(pNewQueryInfo->numOfTables == 1);
@@ -1906,6 +1945,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
}
return pNew;
+
+_error:
+ tscFreeSqlObj(pNew);
+ return NULL;
}
/**
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 4ffc631566..ef0713c415 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -29,6 +29,7 @@ extern uint16_t tsServerPort;
extern uint16_t tsDnodeShellPort;
extern uint16_t tsDnodeDnodePort;
extern uint16_t tsSyncPort;
+extern uint16_t tsArbitratorPort;
extern int32_t tsStatusInterval;
extern int32_t tsNumOfMnodes;
extern int32_t tsEnableVnodeBak;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 2263a5dae1..2a4ac3fc40 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -4,6 +4,7 @@
#include "os.h"
#include "taosmsg.h"
#include "tstoken.h"
+#include "tvariant.h"
typedef struct SDataStatis {
int16_t colId;
@@ -24,10 +25,12 @@ void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
-void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable);
+void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable);
SSchema tGetTableNameColumnSchema();
+SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name);
+
bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
diff --git a/src/common/inc/tvariant.h b/src/common/inc/tvariant.h
index 4fd6ea5541..e9973bcb95 100644
--- a/src/common/inc/tvariant.h
+++ b/src/common/inc/tvariant.h
@@ -36,7 +36,7 @@ typedef struct tVariant {
};
} tVariant;
-void tVariantCreate(tVariant *pVar, SSQLToken *token);
+void tVariantCreate(tVariant *pVar, SStrToken *token);
void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type);
@@ -46,6 +46,8 @@ void tVariantDestroy(tVariant *pV);
void tVariantAssign(tVariant *pDst, const tVariant *pSrc);
+int32_t tVariantCompare(const tVariant* p1, const tVariant* p2);
+
int32_t tVariantToString(tVariant *pVar, char *dst);
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix);
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index e9d7a71477..795585e5c9 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -37,6 +37,7 @@ uint16_t tsServerPort = 6030;
uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
uint16_t tsDnodeDnodePort = 6035; // udp/tcp
uint16_t tsSyncPort = 6040;
+uint16_t tsArbitratorPort = 6042;
int32_t tsStatusInterval = 1; // second
int32_t tsNumOfMnodes = 3;
int32_t tsEnableVnodeBak = 1;
@@ -54,7 +55,7 @@ int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
-int32_t tsEnableCoreFile = 1;
+int32_t tsEnableCoreFile = 0;
int32_t tsMaxBinaryDisplayWidth = 30;
/*
@@ -1331,7 +1332,10 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
*port = atoi(temp+1);
}
- if (*port == 0) *port = tsServerPort;
+ if (*port == 0) {
+ *port = tsServerPort;
+ return -1;
+ }
return 0;
}
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index d80ddb0ee2..01945dbb00 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -4,6 +4,7 @@
#include "tname.h"
#include "tstoken.h"
#include "ttokendef.h"
+#include "tvariant.h"
// todo refactor
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
@@ -43,7 +44,30 @@ SSchema tGetTableNameColumnSchema() {
s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE;
s.type = TSDB_DATA_TYPE_BINARY;
s.colId = TSDB_TBNAME_COLUMN_INDEX;
- strncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
+ tstrncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
+ return s;
+}
+
+SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name) {
+ SSchema s = {0};
+
+ s.type = pVal->nType;
+ if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) {
+ s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE);
+ } else {
+ s.bytes = tDataTypeDesc[pVal->nType].nSize;
+ }
+
+ s.colId = TSDB_UD_COLUMN_INDEX;
+ if (name != NULL) {
+ tstrncpy(s.name, name, sizeof(s.name));
+ } else {
+ size_t len = strdequote(exprStr->z);
+ size_t tlen = MIN(sizeof(s.name), len + 1);
+
+ tstrncpy(s.name, exprStr->z, tlen);
+ }
+
return s;
}
@@ -110,7 +134,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken
*/
-void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) {
+void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
const char sep = TS_PATH_DELIMITER[0];
if (pToken == pTable || pToken == NULL || pTable == NULL) {
diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c
index 5460d21252..069d8eb251 100644
--- a/src/common/src/tvariant.c
+++ b/src/common/src/tvariant.c
@@ -12,12 +12,10 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
+#include "os.h"
#include "tvariant.h"
#include "hash.h"
-#include "hashfunc.h"
-#include "os.h"
-#include "hash.h"
#include "taos.h"
#include "taosdef.h"
#include "tstoken.h"
@@ -25,7 +23,7 @@
#include "tutil.h"
// todo support scientific expression number and oct number
-void tVariantCreate(tVariant *pVar, SSQLToken *token) { tVariantCreateFromString(pVar, token->z, token->n, token->type); }
+void tVariantCreate(tVariant *pVar, SStrToken *token) { tVariantCreateFromString(pVar, token->z, token->n, token->type); }
void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type) {
memset(pVar, 0, sizeof(tVariant));
@@ -102,10 +100,9 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
}
case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length
size_t lenInwchar = len / TSDB_NCHAR_SIZE;
+
pVar->wpz = calloc(1, (lenInwchar + 1) * TSDB_NCHAR_SIZE);
-
- wcsncpy(pVar->wpz, (wchar_t *)pz, lenInwchar);
- pVar->wpz[lenInwchar] = 0;
+ memcpy(pVar->wpz, pz, lenInwchar * TSDB_NCHAR_SIZE);
pVar->nLen = (int32_t)len;
break;
@@ -169,6 +166,50 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
char* n = strdup(p);
taosArrayPush(pDst->arr, &n);
}
+
+ return;
+ }
+
+ pDst->nLen = tDataTypeDesc[pDst->nType].nSize;
+}
+
+int32_t tVariantCompare(const tVariant* p1, const tVariant* p2) {
+ if (p1->nType == TSDB_DATA_TYPE_NULL && p2->nType == TSDB_DATA_TYPE_NULL) {
+ return 0;
+ }
+
+ if (p1->nType == TSDB_DATA_TYPE_NULL) {
+ return -1;
+ }
+
+ if (p2->nType == TSDB_DATA_TYPE_NULL) {
+ return 1;
+ }
+
+ switch (p1->nType) {
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR: {
+ if (p1->nLen == p2->nLen) {
+ return memcmp(p1->pz, p2->pz, p1->nLen);
+ } else {
+ return p1->nLen > p2->nLen? 1:-1;
+ }
+ };
+
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE:
+ if (p1->dKey == p2->dKey) {
+ return 0;
+ } else {
+ return p1->dKey > p2->dKey? 1:-1;
+ }
+
+ default:
+ if (p1->i64Key == p2->i64Key) {
+ return 0;
+ } else {
+ return p1->i64Key > p2->i64Key? 1:-1;
+ }
}
}
@@ -228,7 +269,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type,
errno = 0;
char *endPtr = NULL;
- SSQLToken token = {0};
+ SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
@@ -277,7 +318,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type,
errno = 0;
wchar_t *endPtr = NULL;
- SSQLToken token = {0};
+ SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
@@ -436,7 +477,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
}
static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *value) {
- SSQLToken stoken = {.z = pStr, .n = len};
+ SStrToken stoken = {.z = pStr, .n = len};
if (TK_ILLEGAL == isValidNumber(&stoken)) {
return -1;
@@ -462,7 +503,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
errno = 0;
char *endPtr = NULL;
- SSQLToken token = {0};
+ SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
@@ -479,7 +520,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
return 0;
}
- SSQLToken sToken = {.z = pVariant->pz, .n = pVariant->nLen};
+ SStrToken sToken = {.z = pVariant->pz, .n = pVariant->nLen};
if (TK_ILLEGAL == isValidNumber(&sToken)) {
return -1;
}
@@ -515,7 +556,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
errno = 0;
wchar_t *endPtr = NULL;
- SSQLToken token = {0};
+ SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
diff --git a/src/connector/jdbc/.classpath b/src/connector/jdbc/.classpath
index 39abf1c5e9..a5d95095cc 100644
--- a/src/connector/jdbc/.classpath
+++ b/src/connector/jdbc/.classpath
@@ -28,22 +28,5 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
index 2ce39b7ee4..86d179eae4 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
@@ -14,6 +14,7 @@
*****************************************************************************/
package com.taosdata.jdbc;
+import java.io.*;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
@@ -30,336 +31,392 @@ import java.sql.SQLXML;
import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
-import java.util.Enumeration;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
import java.util.concurrent.Executor;
public class TSDBConnection implements Connection {
- private TSDBJNIConnector connector = null;
-
- protected Properties props = null;
-
- private String catalog = null;
-
- private TSDBDatabaseMetaData dbMetaData = null;
-
- private Properties clientInfoProps = new Properties();
-
- private int timeoutMilliseconds = 0;
-
- private String tsCharSet = "";
-
- public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
- this.dbMetaData = meta;
- connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
- Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
- info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
- info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
- }
-
- private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
- this.connector = new TSDBJNIConnector();
- this.connector.connect(host, port, dbName, user, password);
-
- try {
- this.setCatalog(dbName);
- } catch (SQLException e) {
- e.printStackTrace();
- }
-
- this.dbMetaData.setConnection(this);
- }
-
- public TSDBJNIConnector getConnection() {
- return this.connector;
- }
-
- public Statement createStatement() throws SQLException {
- if (!this.connector.isClosed()) {
- return new TSDBStatement(this.connector);
- } else {
- throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
- }
- }
-
- public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
- if (this.connector.isClosed()) {
- throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
- }
-
- long id = this.connector.subscribe(topic, sql, restart, 0);
- if (id == 0) {
- throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription"));
- }
-
- return new TSDBSubscribe(this.connector, id);
- }
-
- public PreparedStatement prepareStatement(String sql) throws SQLException {
- if (!this.connector.isClosed()) {
- return new TSDBPreparedStatement(this.connector, sql);
- } else {
- throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
- }
- }
-
- public CallableStatement prepareCall(String sql) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public String nativeSQL(String sql) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void setAutoCommit(boolean autoCommit) throws SQLException {
- }
-
- public boolean getAutoCommit() throws SQLException {
- return true;
- }
-
- public void commit() throws SQLException {
- }
-
- public void rollback() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void close() throws SQLException {
- if (this.connector != null && !this.connector.isClosed()) {
- this.connector.closeConnection();
- } else {
- throw new SQLException(TSDBConstants.WrapErrMsg("connection is already closed!"));
- }
- }
-
- public boolean isClosed() throws SQLException {
- return this.connector.isClosed();
- }
-
- /**
- * A connection's database is able to provide information describing its tables,
- * its supported SQL grammar, its stored procedures, the capabilities of this
- * connection, etc. This information is made available through a
- * DatabaseMetaData object.
- *
- * @return a DatabaseMetaData object for this connection
- * @exception SQLException
- * if a database access error occurs
- */
- public DatabaseMetaData getMetaData() throws SQLException {
- return this.dbMetaData;
- }
-
- /**
- * This readOnly option is not supported by TDengine. However, the method is intentionally left blank here to
- * support HikariCP connection.
- * @param readOnly
- * @throws SQLException
- */
- public void setReadOnly(boolean readOnly) throws SQLException {
- }
-
- public boolean isReadOnly() throws SQLException {
- return true;
- }
-
- public void setCatalog(String catalog) throws SQLException {
- this.catalog = catalog;
- }
-
- public String getCatalog() throws SQLException {
- return this.catalog;
- }
-
- /**
- * The transaction isolation level option is not supported by TDengine.
- * This method is intentionally left empty to support HikariCP connection.
- * @param level
- * @throws SQLException
- */
- public void setTransactionIsolation(int level) throws SQLException {
- }
-
- /**
- * The transaction isolation level option is not supported by TDengine.
- * @return
- * @throws SQLException
- */
- public int getTransactionIsolation() throws SQLException {
- return Connection.TRANSACTION_NONE;
- }
-
- public SQLWarning getWarnings() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void clearWarnings() throws SQLException {
- // left blank to support HikariCP connection
- //todo: implement getWarnings according to the warning messages returned from TDengine
- }
-
- public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
- throws SQLException {
- // This method is implemented in the current way to support Spark
- if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) {
- throw new SQLException(TSDBConstants.INVALID_VARIABLES);
- }
-
- if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) {
- throw new SQLException(TSDBConstants.INVALID_VARIABLES);
- }
-
- return this.prepareStatement(sql);
- }
-
- public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public Map> getTypeMap() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void setTypeMap(Map> map) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void setHoldability(int holdability) throws SQLException {
- // intentionally left empty to support druid connection pool.
- }
-
- /**
- * the transaction is not supported by TDengine, so the opened ResultSet Objects will remain open
- * @return
- * @throws SQLException
- */
- public int getHoldability() throws SQLException {
- //intentionally left empty to support HikariCP connection.
- return ResultSet.HOLD_CURSORS_OVER_COMMIT;
- }
-
- public Savepoint setSavepoint() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public Savepoint setSavepoint(String name) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void rollback(Savepoint savepoint) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public void releaseSavepoint(Savepoint savepoint) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
- throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
- int resultSetHoldability) throws SQLException {
- return this.prepareStatement(sql, resultSetType, resultSetConcurrency);
- }
-
- public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
- int resultSetHoldability) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public Clob createClob() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public Blob createBlob() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public NClob createNClob() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public SQLXML createSQLXML() throws SQLException {
- throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
- }
-
- public boolean isValid(int timeout) throws SQLException {
- return !this.isClosed();
- }
-
- public void setClientInfo(String name, String value) throws SQLClientInfoException {
- clientInfoProps.setProperty(name, value);
- }
-
- public void setClientInfo(Properties properties) throws SQLClientInfoException {
- for (Enumeration