Merge remote-tracking branch 'origin/develop' into feature/crash_gen
This commit is contained in:
commit
a7c9113dbc
|
@ -27,7 +27,6 @@ tests/hdfs/
|
||||||
nmake/
|
nmake/
|
||||||
sln/
|
sln/
|
||||||
hdfs/
|
hdfs/
|
||||||
c/
|
|
||||||
taoshebei/
|
taoshebei/
|
||||||
taosdalipu/
|
taosdalipu/
|
||||||
Target/
|
Target/
|
||||||
|
|
|
@ -227,6 +227,8 @@ pipeline {
|
||||||
./test-all.sh p4
|
./test-all.sh p4
|
||||||
cd ${WKC}/tests
|
cd ${WKC}/tests
|
||||||
./test-all.sh full jdbc
|
./test-all.sh full jdbc
|
||||||
|
cd ${WKC}/tests
|
||||||
|
./test-all.sh full unit
|
||||||
date'''
|
date'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,7 +108,8 @@ mkdir debug && cd debug
|
||||||
cmake .. && cmake --build .
|
cmake .. && cmake --build .
|
||||||
```
|
```
|
||||||
|
|
||||||
To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
|
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 and arm32 platform.
|
||||||
|
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
|
||||||
|
|
||||||
aarch64:
|
aarch64:
|
||||||
```bash
|
```bash
|
||||||
|
|
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
||||||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||||
IF (TD_MVN_INSTALLED)
|
IF (TD_MVN_INSTALLED)
|
||||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.20-dist.jar DESTINATION connector/jdbc)
|
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.21-dist.jar DESTINATION connector/jdbc)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF (TD_DARWIN)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||||
|
|
|
@ -78,29 +78,58 @@ ELSE()
|
||||||
EXIT ()
|
EXIT ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
# if generate ARM version:
|
IF ("${CPUTYPE}" STREQUAL "")
|
||||||
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
|
MESSAGE(STATUS "The current platform " ${CMAKE_SYSTEM_PROCESSOR} " is detected")
|
||||||
IF (${CPUTYPE} MATCHES "aarch32")
|
|
||||||
|
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)")
|
||||||
|
MESSAGE(STATUS "The current platform is amd64")
|
||||||
|
MESSAGE(STATUS "Set CPUTYPE to x64")
|
||||||
|
SET(CPUTYPE "x64")
|
||||||
|
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
|
||||||
|
MESSAGE(STATUS "The current platform is x86")
|
||||||
|
MESSAGE(STATUS "Set CPUTYPE to x86")
|
||||||
|
SET(CPUTYPE "x32")
|
||||||
|
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
|
||||||
|
MESSAGE(STATUS "Set CPUTYPE to aarch32")
|
||||||
|
SET(CPUTYPE "aarch32")
|
||||||
|
MESSAGE(STATUS "Set CPUTYPE to aarch32")
|
||||||
|
SET(TD_LINUX TRUE)
|
||||||
|
SET(TD_LINUX_32 FALSE)
|
||||||
|
SET(TD_ARM_32 TRUE)
|
||||||
|
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||||
|
SET(CPUTYPE "aarch64")
|
||||||
|
MESSAGE(STATUS "Set CPUTYPE to aarch64")
|
||||||
|
SET(TD_LINUX TRUE)
|
||||||
|
SET(TD_LINUX_64 FALSE)
|
||||||
|
SET(TD_ARM_64 TRUE)
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
|
ELSE ()
|
||||||
|
# if generate ARM version:
|
||||||
|
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
|
||||||
|
IF (${CPUTYPE} MATCHES "aarch32")
|
||||||
SET(TD_LINUX TRUE)
|
SET(TD_LINUX TRUE)
|
||||||
SET(TD_LINUX_32 FALSE)
|
SET(TD_LINUX_32 FALSE)
|
||||||
SET(TD_ARM_32 TRUE)
|
SET(TD_ARM_32 TRUE)
|
||||||
MESSAGE(STATUS "input cpuType: aarch32")
|
MESSAGE(STATUS "input cpuType: aarch32")
|
||||||
ELSEIF (${CPUTYPE} MATCHES "aarch64")
|
ELSEIF (${CPUTYPE} MATCHES "aarch64")
|
||||||
SET(TD_LINUX TRUE)
|
SET(TD_LINUX TRUE)
|
||||||
SET(TD_LINUX_64 FALSE)
|
SET(TD_LINUX_64 FALSE)
|
||||||
SET(TD_ARM_64 TRUE)
|
SET(TD_ARM_64 TRUE)
|
||||||
MESSAGE(STATUS "input cpuType: aarch64")
|
MESSAGE(STATUS "input cpuType: aarch64")
|
||||||
ELSEIF (${CPUTYPE} MATCHES "mips64")
|
ELSEIF (${CPUTYPE} MATCHES "mips64")
|
||||||
SET(TD_LINUX TRUE)
|
SET(TD_LINUX TRUE)
|
||||||
SET(TD_LINUX_64 FALSE)
|
SET(TD_LINUX_64 FALSE)
|
||||||
SET(TD_MIPS_64 TRUE)
|
SET(TD_MIPS_64 TRUE)
|
||||||
MESSAGE(STATUS "input cpuType: mips64")
|
MESSAGE(STATUS "input cpuType: mips64")
|
||||||
ELSEIF (${CPUTYPE} MATCHES "x64")
|
ELSEIF (${CPUTYPE} MATCHES "x64")
|
||||||
MESSAGE(STATUS "input cpuType: x64")
|
MESSAGE(STATUS "input cpuType: x64")
|
||||||
ELSEIF (${CPUTYPE} MATCHES "x86")
|
ELSEIF (${CPUTYPE} MATCHES "x86")
|
||||||
MESSAGE(STATUS "input cpuType: x86")
|
MESSAGE(STATUS "input cpuType: x86")
|
||||||
ELSE ()
|
ELSE ()
|
||||||
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
|
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
# cmake -DOSTYPE=Ningsi
|
# cmake -DOSTYPE=Ningsi
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/faq/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/faq/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>FAQ</h1>
|
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/faq/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/faq/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>FAQ</h1>
|
||||||
<h4>1. When encoutered with the error "failed to connect to server", what can I do?</h4>
|
<h4>1. When encountered with the error "failed to connect to server", what can I do?</h4>
|
||||||
<p>The client may encounter connection errors. Please follow the steps below for troubleshooting:</p>
|
<p>The client may encounter connection errors. Please follow the steps below for troubleshooting:</p>
|
||||||
<ol>
|
<ol>
|
||||||
<li>On the server side, execute <code>systemctl status taosd</code> to check the status of <em>taosd</em> service. If <em>taosd</em> is not running, start it and retry connecting.</li>
|
<li>On the server side, execute <code>systemctl status taosd</code> to check the status of <em>taosd</em> service. If <em>taosd</em> is not running, start it and retry connecting.</li>
|
||||||
|
|
|
@ -31,6 +31,20 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
||||||
* [创建超级表](/model#create-stable):为同一类型的数据采集点创建一个超级表
|
* [创建超级表](/model#create-stable):为同一类型的数据采集点创建一个超级表
|
||||||
* [创建表](/model#create-table):使用超级表做模板,为每一个具体的数据采集点单独建表
|
* [创建表](/model#create-table):使用超级表做模板,为每一个具体的数据采集点单独建表
|
||||||
|
|
||||||
|
## [TAOS SQL](/taos-sql)
|
||||||
|
|
||||||
|
* [支持的数据类型](/taos-sql#data-type):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型
|
||||||
|
* [数据库管理](/taos-sql#management):添加、删除、查看数据库
|
||||||
|
* [表管理](/taos-sql#table):添加、删除、查看、修改表
|
||||||
|
* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表
|
||||||
|
* [标签管理](/taos-sql#tags):增加、删除、修改标签
|
||||||
|
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
|
||||||
|
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
|
||||||
|
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
|
||||||
|
* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
|
||||||
|
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
|
||||||
|
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
|
||||||
|
|
||||||
## [高效写入数据](/insert)
|
## [高效写入数据](/insert)
|
||||||
|
|
||||||
* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录
|
* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录
|
||||||
|
@ -94,20 +108,6 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
||||||
* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录
|
* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录
|
||||||
* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表
|
* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表
|
||||||
|
|
||||||
## [TAOS SQL](/taos-sql)
|
|
||||||
|
|
||||||
* [支持的数据类型](/taos-sql#data-type):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型
|
|
||||||
* [数据库管理](/taos-sql#management):添加、删除、查看数据库
|
|
||||||
* [表管理](/taos-sql#table):添加、删除、查看、修改表
|
|
||||||
* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表
|
|
||||||
* [标签管理](/taos-sql#tags):增加、删除、修改标签
|
|
||||||
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
|
|
||||||
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
|
|
||||||
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
|
|
||||||
* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
|
|
||||||
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
|
|
||||||
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
|
|
||||||
|
|
||||||
## TDengine的技术设计
|
## TDengine的技术设计
|
||||||
|
|
||||||
* [系统模块](/architecture/taosd):taosd的功能和模块划分
|
* [系统模块](/architecture/taosd):taosd的功能和模块划分
|
||||||
|
@ -119,6 +119,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
||||||
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
||||||
* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
||||||
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
|
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
|
||||||
|
* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
|
||||||
|
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/alibaba/DataX)
|
||||||
|
|
||||||
## TDengine与其他数据库的对比测试
|
## TDengine与其他数据库的对比测试
|
||||||
|
|
||||||
|
|
|
@ -178,15 +178,15 @@ TDengine 分布式架构的逻辑结构图如下:
|
||||||
|
|
||||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
||||||
|
|
||||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。
|
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。
|
||||||
|
|
||||||
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
|
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
|
||||||
|
|
||||||
**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpList文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取节点配置参数first, second,(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点)如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。
|
**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpSet文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取节点配置参数firstEp, secondEp,(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点)如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。
|
||||||
|
|
||||||
**MNODE的选择:** TDengine逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的End Point, 并与获取的mnode EP List进行比对,如果在其中,该数据节点认为自己应该启动mnode模块,成为mnode。如果自己的EP不在mnode EP List里,则不启动mnode模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode有可能迁移至新的dnode,但一切都是透明的,无需人工干预,配置参数的修改,是mnode自己根据资源做出的决定。
|
**MNODE的选择:** TDengine逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的End Point, 并与获取的mnode EP List进行比对,如果在其中,该数据节点认为自己应该启动mnode模块,成为mnode。如果自己的EP不在mnode EP List里,则不启动mnode模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode有可能迁移至新的dnode,但一切都是透明的,无需人工干预,配置参数的修改,是mnode自己根据资源做出的决定。
|
||||||
|
|
||||||
**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将first, second参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
|
**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将firstEp, secondEp参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
|
||||||
|
|
||||||
**重定向**:无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。
|
**重定向**:无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。
|
||||||
|
|
||||||
|
|
|
@ -209,7 +209,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
||||||
|
|
||||||
- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
|
- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
|
||||||
|
|
||||||
该API用来执行SQL语句,可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。返回值 NULL 表示失败。
|
该API用来执行SQL语句,可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。不能通过返回值是否是 NULL 来判断执行结果是否失败,而是需要用`taos_errno`函数解析结果集中的错误代码来进行判断。
|
||||||
|
|
||||||
- `int taos_result_precision(TAOS_RES *res)`
|
- `int taos_result_precision(TAOS_RES *res)`
|
||||||
|
|
||||||
|
@ -591,7 +591,8 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status": "succ",
|
"status": "succ",
|
||||||
"head": ["Time Stamp","current", …],
|
"head": ["ts","current", …],
|
||||||
|
"column_meta": [["ts",9,8],["current",6,4], …],
|
||||||
"data": [
|
"data": [
|
||||||
["2018-10-03 14:38:05.000", 10.3, …],
|
["2018-10-03 14:38:05.000", 10.3, …],
|
||||||
["2018-10-03 14:38:15.000", 12.6, …]
|
["2018-10-03 14:38:15.000", 12.6, …]
|
||||||
|
@ -602,10 +603,23 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
- status: 告知操作结果是成功还是失败
|
- status: 告知操作结果是成功还是失败。
|
||||||
- head: 表的定义,如果不返回结果集,仅有一列“affected_rows”
|
- head: 表的定义,如果不返回结果集,则仅有一列“affected_rows”。(从 2.0.17 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中,有可能会从返回值中去掉 head 这一项。)
|
||||||
- data: 具体返回的数据,一排一排的呈现,如果不返回结果集,仅[[affected_rows]]
|
- column_meta: 从 2.0.17 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
|
||||||
- rows: 表明总共多少行数据
|
- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有[[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
|
||||||
|
- rows: 表明总共多少行数据。
|
||||||
|
|
||||||
|
column_meta 中的列类型说明:
|
||||||
|
* 1:BOOL
|
||||||
|
* 2:TINYINT
|
||||||
|
* 3:SMALLINT
|
||||||
|
* 4:INT
|
||||||
|
* 5:BIGINT
|
||||||
|
* 6:FLOAT
|
||||||
|
* 7:DOUBLE
|
||||||
|
* 8:BINARY
|
||||||
|
* 9:TIMESTAMP
|
||||||
|
* 10:NCHAR
|
||||||
|
|
||||||
### 自定义授权码
|
### 自定义授权码
|
||||||
|
|
||||||
|
@ -651,7 +665,8 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status": "succ",
|
"status": "succ",
|
||||||
"head": ["Time Stamp","current","voltage","phase"],
|
"head": ["ts","current","voltage","phase"],
|
||||||
|
"column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
|
||||||
"data": [
|
"data": [
|
||||||
["2018-10-03 14:38:05.000",10.3,219,0.31],
|
["2018-10-03 14:38:05.000",10.3,219,0.31],
|
||||||
["2018-10-03 14:38:15.000",12.6,218,0.33]
|
["2018-10-03 14:38:15.000",12.6,218,0.33]
|
||||||
|
@ -671,8 +686,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 19
|
||||||
{
|
{
|
||||||
"status": "succ",
|
"status": "succ",
|
||||||
"head": ["affected_rows"],
|
"head": ["affected_rows"],
|
||||||
|
"column_meta": [["affected_rows",4,4]],
|
||||||
"data": [[1]],
|
"data": [[1]],
|
||||||
"rows": 1,
|
"rows": 1
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -691,7 +707,8 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status": "succ",
|
"status": "succ",
|
||||||
"head": ["column1","column2","column3"],
|
"head": ["ts","current","voltage","phase"],
|
||||||
|
"column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
|
||||||
"data": [
|
"data": [
|
||||||
[1538548685000,10.3,219,0.31],
|
[1538548685000,10.3,219,0.31],
|
||||||
[1538548695000,12.6,218,0.33]
|
[1538548695000,12.6,218,0.33]
|
||||||
|
@ -712,7 +729,8 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status": "succ",
|
"status": "succ",
|
||||||
"head": ["column1","column2","column3"],
|
"head": ["ts","current","voltage","phase"],
|
||||||
|
"column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
|
||||||
"data": [
|
"data": [
|
||||||
["2018-10-03T14:38:05.000+0800",10.3,219,0.31],
|
["2018-10-03T14:38:05.000+0800",10.3,219,0.31],
|
||||||
["2018-10-03T14:38:15.000+0800",12.6,218,0.33]
|
["2018-10-03T14:38:15.000+0800",12.6,218,0.33]
|
||||||
|
@ -726,7 +744,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
|
||||||
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
|
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
|
||||||
|
|
||||||
- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
|
- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
|
||||||
- httpMaxThreads: 启动的线程数量,默认为2
|
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
|
||||||
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
|
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
|
||||||
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
|
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
|
||||||
- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131
|
- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131
|
||||||
|
|
|
@ -155,11 +155,3 @@ TDengine客户端暂不支持如下函数:
|
||||||
- dbExistsTable(conn, "test"):是否存在表test
|
- dbExistsTable(conn, "test"):是否存在表test
|
||||||
- dbListTables(conn):显示连接中的所有表
|
- dbListTables(conn):显示连接中的所有表
|
||||||
|
|
||||||
|
|
||||||
## <a class="anchor" id="datax"></a>DataX
|
|
||||||
|
|
||||||
[DataX](https://github.com/alibaba/DataX) 是阿里巴巴集团开源的一款通用离线数据采集/同步工具,能够简单、高效地接入 TDengine 进行数据写入和读取。
|
|
||||||
|
|
||||||
* 数据读取集成的方法请参见 [TSDBReader 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbreader/doc/tsdbreader.md)
|
|
||||||
* 数据写入集成的方法请参见 [TSDBWriter 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbwriter/doc/tsdbhttpwriter.md)
|
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
||||||
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】
|
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】
|
||||||
|
|
||||||
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||||
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/);
|
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`);
|
||||||
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
||||||
|
|
||||||
**第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
**第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
||||||
|
|
|
@ -432,60 +432,62 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
||||||
|
|
||||||
## <a class="anchor" id="keywords"></a>TDengine参数限制与保留关键字
|
## <a class="anchor" id="keywords"></a>TDengine参数限制与保留关键字
|
||||||
|
|
||||||
- 数据库名:不能包含“.”以及特殊字符,不能超过32个字符
|
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
|
||||||
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符
|
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
|
||||||
- 表的列名:不能包含特殊字符,不能超过64个字符
|
- 表的列名:不能包含特殊字符,不能超过 64 个字符
|
||||||
- 数据库名、表名、列名,都不能以数字开头
|
- 数据库名、表名、列名,都不能以数字开头
|
||||||
- 表的列数:不能超过1024列
|
- 表的列数:不能超过 1024 列
|
||||||
- 记录的最大长度:包括时间戳8 byte,不能超过16KB
|
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
|
||||||
- 单条SQL语句默认最大字符串长度:65480 byte
|
- 单条 SQL 语句默认最大字符串长度:65480 byte
|
||||||
- 数据库副本数:不能超过3
|
- 数据库副本数:不能超过 3
|
||||||
- 用户名:不能超过23个byte
|
- 用户名:不能超过 23 个 byte
|
||||||
- 用户密码:不能超过15个byte
|
- 用户密码:不能超过 15 个 byte
|
||||||
- 标签(Tags)数量:不能超过128个
|
- 标签(Tags)数量:不能超过 128 个
|
||||||
- 标签的总长度:不能超过16Kbyte
|
- 标签的总长度:不能超过 16K byte
|
||||||
- 记录条数:仅受存储空间限制
|
- 记录条数:仅受存储空间限制
|
||||||
- 表的个数:仅受节点个数限制
|
- 表的个数:仅受节点个数限制
|
||||||
- 库的个数:仅受节点个数限制
|
- 库的个数:仅受节点个数限制
|
||||||
- 单个库上虚拟节点个数:不能超过64个
|
- 单个库上虚拟节点个数:不能超过 64 个
|
||||||
|
|
||||||
目前TDengine有将近200个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable名、数据列名及标签列名等。这些关键字列表如下:
|
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
|
||||||
|
|
||||||
| 关键字列表 | | | | |
|
| 关键字列表 | | | | |
|
||||||
| ---------- | ----------- | ------------ | ---------- | --------- |
|
| ---------- | ----------- | ------------ | ---------- | --------- |
|
||||||
| ABLOCKS | CONNECTION | GROUP | MINUS | SLASH |
|
| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
|
||||||
| ABORT | CONNECTIONS | GT | MNODES | SLIDING |
|
| ABORT | COPY | ID | MODULES | SLIMIT |
|
||||||
| ACCOUNT | COPY | ID | MODULES | SMALLINT |
|
| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
|
||||||
| ACCOUNTS | COUNT | IF | NCHAR | SPREAD |
|
| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
|
||||||
| ADD | CREATE | IGNORE | NE | STABLE |
|
| ADD | CTIME | IMMEDIATE | NONE | STABLE |
|
||||||
| AFTER | CTIME | IMMEDIATE | NONE | STABLES |
|
| AFTER | DATABASE | IMPORT | NOT | STABLES |
|
||||||
| ALL | DATABASE | IMPORT | NOT | STAR |
|
| ALL | DATABASES | IN | NOTNULL | STAR |
|
||||||
| ALTER | DATABASES | IN | NOTNULL | STATEMENT |
|
| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
|
||||||
| AND | DAYS | INITIALLY | NOW | STDDEV |
|
| AND | DEFERRED | INSERT | OF | STDDEV |
|
||||||
| AS | DEFERRED | INSERT | OF | STREAM |
|
| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
|
||||||
| ASC | DELIMITERS | INSTEAD | OFFSET | STREAMS |
|
| ASC | DESC | INTEGER | OR | STREAMS |
|
||||||
| ATTACH | DESC | INTEGER | OR | STRING |
|
| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
|
||||||
| AVG | DESCRIBE | INTERVAL | ORDER | SUM |
|
| AVG | DETACH | INTO | PASS | SUM |
|
||||||
| BEFORE | DETACH | INTO | PASS | TABLE |
|
| BEFORE | DIFF | IP | PERCENTILE | TABLE |
|
||||||
| BEGIN | DIFF | IP | PERCENTILE | TABLES |
|
| BEGIN | DISTINCT | IS | PLUS | TABLES |
|
||||||
| BETWEEN | DISTINCT | IS | PLUS | TAG |
|
| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
|
||||||
| BIGINT | DIVIDE | ISNULL | PRAGMA | TAGS |
|
| BIGINT | DNODE | JOIN | PREV | TAGS |
|
||||||
| BINARY | DNODE | JOIN | PREV | TBLOCKS |
|
| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
|
||||||
| BITAND | DNODES | KEEP | PRIVILEGE | TBNAME |
|
| BITAND | DOT | KEY | QUERIES | TBNAME |
|
||||||
| BITNOT | DOT | KEY | QUERIES | TIMES |
|
| BITNOT | DOUBLE | KILL | QUERY | TIMES |
|
||||||
| BITOR | DOUBLE | KILL | QUERY | TIMESTAMP |
|
| BITOR | DROP | LAST | RAISE | TIMESTAMP |
|
||||||
| BOOL | DROP | LAST | RAISE | TINYINT |
|
| BOOL | EACH | LE | REM | TINYINT |
|
||||||
| BOTTOM | EACH | LE | REM | TOP |
|
| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
|
||||||
| BY | END | LEASTSQUARES | REPLACE | TRIGGER |
|
| BY | EQ | LIKE | REPLICA | TRIGGER |
|
||||||
| CACHE | EQ | LIKE | REPLICA | UMINUS |
|
| CACHE | EXISTS | LIMIT | RESET | UMINUS |
|
||||||
| CASCADE | EXISTS | LIMIT | RESET | UPLUS |
|
| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
|
||||||
| CHANGE | EXPLAIN | LINEAR | RESTRICT | USE |
|
| CHANGE | FAIL | LOCAL | ROW | USE |
|
||||||
| CLOG | FAIL | LOCAL | ROW | USER |
|
| CLOG | FILL | LP | ROWS | USER |
|
||||||
| CLUSTER | FILL | LP | ROWS | USERS |
|
| CLUSTER | FIRST | LSHIFT | RP | USERS |
|
||||||
| COLON | FIRST | LSHIFT | RP | USING |
|
| COLON | FLOAT | LT | RSHIFT | USING |
|
||||||
| COLUMN | FLOAT | LT | RSHIFT | VALUES |
|
| COLUMN | FOR | MATCH | SCORES | VALUES |
|
||||||
| COMMA | FOR | MATCH | SCORES | VARIABLE |
|
| COMMA | FROM | MAX | SELECT | VARIABLE |
|
||||||
| COMP | FROM | MAX | SELECT | VGROUPS |
|
| COMP | GE | METRIC | SEMI | VGROUPS |
|
||||||
| CONCAT | GE | METRIC | SEMI | VIEW |
|
| CONCAT | GLOB | METRICS | SET | VIEW |
|
||||||
| CONFIGS | GLOB | METRICS | SET | WAVG |
|
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
|
||||||
| CONFLICT | GRANTS | MIN | SHOW | WHERE |
|
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
|
||||||
|
| CONNECTION | | | | |
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,19 @@
|
||||||
# TAOS SQL
|
# TAOS SQL
|
||||||
|
|
||||||
本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。
|
本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。
|
||||||
|
|
||||||
TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供删除功能,因此在TAO SQL中不提供数据删除的相关功能。
|
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供类似于标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供 SQL 标准的语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。
|
||||||
|
|
||||||
本章节SQL语法遵循如下约定:
|
TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。
|
||||||
|
|
||||||
- < > 里的内容是用户需要输入的,但不要输入<>本身
|
本章节 SQL 语法遵循如下约定:
|
||||||
- [ ]表示内容为可选项,但不能输入[]本身
|
|
||||||
- | 表示多选一,选择其中一个即可,但不能输入|本身
|
- < > 里的内容是用户需要输入的,但不要输入 <> 本身
|
||||||
|
- [ ] 表示内容为可选项,但不能输入 [] 本身
|
||||||
|
- | 表示多选一,选择其中一个即可,但不能输入 | 本身
|
||||||
- … 表示前面的项可重复多个
|
- … 表示前面的项可重复多个
|
||||||
|
|
||||||
为更好地说明SQL语法的规则及其特点,本文假设存在一个数据集。以智能电表(meters)为例,假设每个智能电表采集电流、电压、相位三个量。其建模如下:
|
为更好地说明 SQL 语法的规则及其特点,本文假设存在一个数据集。以智能电表(meters)为例,假设每个智能电表采集电流、电压、相位三个量。其建模如下:
|
||||||
```mysql
|
```mysql
|
||||||
taos> DESCRIBE meters;
|
taos> DESCRIBE meters;
|
||||||
Field | Type | Length | Note |
|
Field | Type | Length | Note |
|
||||||
|
@ -23,7 +25,7 @@ taos> DESCRIBE meters;
|
||||||
location | BINARY | 64 | TAG |
|
location | BINARY | 64 | TAG |
|
||||||
groupid | INT | 4 | TAG |
|
groupid | INT | 4 | TAG |
|
||||||
```
|
```
|
||||||
数据集包含4个智能电表的数据,按照TDengine的建模规则,对应4个子表,其名称分别是 d1001, d1002, d1003, d1004。
|
数据集包含 4 个智能电表的数据,按照 TDengine 的建模规则,对应 4 个子表,其名称分别是 d1001, d1002, d1003, d1004。
|
||||||
|
|
||||||
## <a class="anchor" id="data-type"></a>支持的数据类型
|
## <a class="anchor" id="data-type"></a>支持的数据类型
|
||||||
|
|
||||||
|
@ -142,15 +144,15 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
||||||
```
|
```
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1) 表的第一个字段必须是TIMESTAMP,并且系统自动将其设为主键;
|
1) 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
|
||||||
|
|
||||||
2) 表名最大长度为192;
|
2) 表名最大长度为 192;
|
||||||
|
|
||||||
3) 表的每行长度不能超过16k个字符;
|
3) 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||||
|
|
||||||
4) 子表名只能由字母、数字和下划线组成,且不能以数字开头
|
4) 子表名只能由字母、数字和下划线组成,且不能以数字开头
|
||||||
|
|
||||||
5) 使用数据类型binary或nchar,需指定其最长的字节数,如binary(20),表示20字节;
|
5) 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节;
|
||||||
|
|
||||||
- **以超级表为模板创建数据表**
|
- **以超级表为模板创建数据表**
|
||||||
|
|
||||||
|
@ -402,8 +404,8 @@ SELECT select_expr [, select_expr ...]
|
||||||
FROM {tb_name_list}
|
FROM {tb_name_list}
|
||||||
[WHERE where_condition]
|
[WHERE where_condition]
|
||||||
[INTERVAL (interval_val [, interval_offset])]
|
[INTERVAL (interval_val [, interval_offset])]
|
||||||
|
[SLIDING sliding_val]
|
||||||
[FILL fill_val]
|
[FILL fill_val]
|
||||||
[SLIDING fill_val]
|
|
||||||
[GROUP BY col_list]
|
[GROUP BY col_list]
|
||||||
[ORDER BY col_list { DESC | ASC }]
|
[ORDER BY col_list { DESC | ASC }]
|
||||||
[SLIMIT limit_val [, SOFFSET offset_val]]
|
[SLIMIT limit_val [, SOFFSET offset_val]]
|
||||||
|
@ -619,27 +621,30 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
|
||||||
Query OK, 1 row(s) in set (0.001091s)
|
Query OK, 1 row(s) in set (0.001091s)
|
||||||
```
|
```
|
||||||
|
|
||||||
- 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
|
- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
|
||||||
- where语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
|
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
|
||||||
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序(_c0指首列时间戳)。使用ORDER BY对其他字段进行排序为非法操作。
|
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
|
||||||
- 参数LIMIT控制输出条数,OFFSET指定从第几条开始输出。LIMIT/OFFSET对结果集的执行顺序在ORDER BY之后。
|
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
|
||||||
|
- 参数 SLIMIT 控制由 GROUP BY 指令划分的每个分组中的输出条数。
|
||||||
- 通过”>>"输出结果可以导出到指定文件
|
- 通过”>>"输出结果可以导出到指定文件
|
||||||
|
|
||||||
### 支持的条件过滤操作
|
### 支持的条件过滤操作
|
||||||
|
|
||||||
| Operation | Note | Applicable Data Types |
|
| Operation | Note | Applicable Data Types |
|
||||||
| --------- | ----------------------------- | ------------------------------------- |
|
| ----------- | ----------------------------- | ------------------------------------- |
|
||||||
| > | larger than | **`timestamp`** and all numeric types |
|
| > | larger than | **`timestamp`** and all numeric types |
|
||||||
| < | smaller than | **`timestamp`** and all numeric types |
|
| < | smaller than | **`timestamp`** and all numeric types |
|
||||||
| >= | larger than or equal to | **`timestamp`** and all numeric types |
|
| >= | larger than or equal to | **`timestamp`** and all numeric types |
|
||||||
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
|
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
|
||||||
| = | equal to | all types |
|
| = | equal to | all types |
|
||||||
| <> | not equal to | all types |
|
| <> | not equal to | all types |
|
||||||
|
| between and | within a certain range | **`timestamp`** and all numeric types |
|
||||||
| % | match with any char sequences | **`binary`** **`nchar`** |
|
| % | match with any char sequences | **`binary`** **`nchar`** |
|
||||||
| _ | match with a single char | **`binary`** **`nchar`** |
|
| _ | match with a single char | **`binary`** **`nchar`** |
|
||||||
|
|
||||||
1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
||||||
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用``` OR``` 关键字进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12)) 。
|
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
|
||||||
|
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||||
|
|
||||||
### SQL 示例
|
### SQL 示例
|
||||||
|
|
||||||
|
@ -1160,17 +1165,20 @@ TDengine支持按时间段进行聚合,可以将表中数据按照时间段进
|
||||||
SELECT function_list FROM tb_name
|
SELECT function_list FROM tb_name
|
||||||
[WHERE where_condition]
|
[WHERE where_condition]
|
||||||
INTERVAL (interval [, offset])
|
INTERVAL (interval [, offset])
|
||||||
|
[SLIDING sliding]
|
||||||
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
|
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
|
||||||
|
|
||||||
SELECT function_list FROM stb_name
|
SELECT function_list FROM stb_name
|
||||||
[WHERE where_condition]
|
[WHERE where_condition]
|
||||||
INTERVAL (interval [, offset])
|
INTERVAL (interval [, offset])
|
||||||
|
[SLIDING sliding]
|
||||||
[FILL ({ VALUE | PREV | NULL | LINEAR})]
|
[FILL ({ VALUE | PREV | NULL | LINEAR})]
|
||||||
[GROUP BY tags]
|
[GROUP BY tags]
|
||||||
```
|
```
|
||||||
|
|
||||||
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
|
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
|
||||||
- WHERE语句可以指定查询的起止时间和其他过滤条件
|
- WHERE语句可以指定查询的起止时间和其他过滤条件
|
||||||
|
- SLIDING语句用于指定聚合时间段的前向增量
|
||||||
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
||||||
* 不进行填充:NONE(默认填充模式)。
|
* 不进行填充:NONE(默认填充模式)。
|
||||||
* VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
|
* VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
|
||||||
|
@ -1182,6 +1190,8 @@ SELECT function_list FROM stb_name
|
||||||
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
|
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
|
||||||
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了group by语句分组,则返回结果中每个group内不按照时间序列严格单调递增。
|
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了group by语句分组,则返回结果中每个group内不按照时间序列严格单调递增。
|
||||||
|
|
||||||
|
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
|
||||||
|
|
||||||
**示例:** 智能电表的建表语句如下:
|
**示例:** 智能电表的建表语句如下:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
|
@ -1200,11 +1210,11 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
|
||||||
|
|
||||||
## <a class="anchor" id="limitation"></a>TAOS SQL 边界限制
|
## <a class="anchor" id="limitation"></a>TAOS SQL 边界限制
|
||||||
|
|
||||||
- 数据库名最大长度为32
|
- 数据库名最大长度为 32
|
||||||
- 表名最大长度为192,每行数据最大长度16k个字符
|
- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||||
- 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳
|
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
|
||||||
- 标签最多允许128个,可以1个,标签总长度不超过16k个字符
|
- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
|
||||||
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
|
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
|
||||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||||
|
|
||||||
## TAOS SQL其他约定
|
## TAOS SQL其他约定
|
||||||
|
@ -1220,3 +1230,4 @@ TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持
|
||||||
**is not null与不为空的表达式适用范围**
|
**is not null与不为空的表达式适用范围**
|
||||||
|
|
||||||
is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
|
is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
|
||||||
|
|
||||||
|
|
|
@ -92,15 +92,17 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
|
||||||
|
|
||||||
从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
|
从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
|
||||||
|
|
||||||
|
另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。
|
||||||
|
|
||||||
## 10. 我怎么创建超过1024列的表?
|
## 10. 我怎么创建超过1024列的表?
|
||||||
|
|
||||||
使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。
|
使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。
|
||||||
|
|
||||||
## 10. 最有效的写入数据的方法是什么?
|
## 11. 最有效的写入数据的方法是什么?
|
||||||
|
|
||||||
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
|
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
|
||||||
|
|
||||||
## 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
## 12. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
||||||
|
|
||||||
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
|
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
|
||||||
```JAVA
|
```JAVA
|
||||||
|
@ -110,7 +112,7 @@ properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||||
Connection = DriverManager.getConnection(url, properties);
|
Connection = DriverManager.getConnection(url, properties);
|
||||||
```
|
```
|
||||||
|
|
||||||
## 12.JDBC报错: the excuted SQL is not a DML or a DDL?
|
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
|
||||||
|
|
||||||
请更新至最新的JDBC驱动
|
请更新至最新的JDBC驱动
|
||||||
```JAVA
|
```JAVA
|
||||||
|
@ -121,15 +123,15 @@ Connection = DriverManager.getConnection(url, properties);
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
## 13. taos connect failed, reason: invalid timestamp
|
## 14. taos connect failed, reason: invalid timestamp
|
||||||
|
|
||||||
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
|
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
|
||||||
|
|
||||||
## 14. 表名显示不全
|
## 15. 表名显示不全
|
||||||
|
|
||||||
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
|
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
|
||||||
|
|
||||||
## 15. 如何进行数据迁移?
|
## 16. 如何进行数据迁移?
|
||||||
|
|
||||||
TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事:
|
TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事:
|
||||||
|
|
||||||
|
@ -137,7 +139,7 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A
|
||||||
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
|
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
|
||||||
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
|
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
|
||||||
|
|
||||||
## 16. 如何在命令行程序 taos 中临时调整日志级别
|
## 17. 如何在命令行程序 taos 中临时调整日志级别
|
||||||
|
|
||||||
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
|
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
|
||||||
|
|
||||||
|
|
|
@ -13,9 +13,8 @@ WORKDIR /root/${dirName}/
|
||||||
RUN /bin/bash install.sh -e no
|
RUN /bin/bash install.sh -e no
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
|
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
|
||||||
ENV LANG=en_US.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
ENV LANGUAGE=en_US:en
|
ENV LC_ALL=C.UTF-8
|
||||||
ENV LC_ALL=en_US.UTF-8
|
|
||||||
EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042
|
EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042
|
||||||
CMD ["taosd"]
|
CMD ["taosd"]
|
||||||
VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ]
|
VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ]
|
||||||
|
|
|
@ -270,7 +270,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
|
||||||
bool hasMoreVnodesToTry(SSqlObj *pSql);
|
bool hasMoreVnodesToTry(SSqlObj *pSql);
|
||||||
bool hasMoreClauseToTry(SSqlObj* pSql);
|
bool hasMoreClauseToTry(SSqlObj* pSql);
|
||||||
|
|
||||||
void tscFreeQueryInfo(SSqlCmd* pCmd);
|
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta);
|
||||||
|
|
||||||
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
|
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
|
||||||
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
|
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
|
||||||
|
|
|
@ -442,6 +442,8 @@ void tscCloseTscObj(void *pObj);
|
||||||
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
|
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
|
||||||
void *param, TAOS **taos);
|
void *param, TAOS **taos);
|
||||||
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res);
|
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res);
|
||||||
|
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param);
|
||||||
|
|
||||||
void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
|
void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
|
||||||
|
|
||||||
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
|
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
|
||||||
|
|
|
@ -74,12 +74,16 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
|
||||||
|
|
||||||
// TODO return the correct error code to client in tscQueueAsyncError
|
// TODO return the correct error code to client in tscQueueAsyncError
|
||||||
void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
|
void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
|
||||||
|
taos_query_ra(taos, sqlstr, fp, param);
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
|
||||||
STscObj *pObj = (STscObj *)taos;
|
STscObj *pObj = (STscObj *)taos;
|
||||||
if (pObj == NULL || pObj->signature != pObj) {
|
if (pObj == NULL || pObj->signature != pObj) {
|
||||||
tscError("bug!!! pObj:%p", pObj);
|
tscError("bug!!! pObj:%p", pObj);
|
||||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
||||||
return;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t sqlLen = (int32_t)strlen(sqlstr);
|
int32_t sqlLen = (int32_t)strlen(sqlstr);
|
||||||
|
@ -87,7 +91,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
|
||||||
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
|
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
|
||||||
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
||||||
tscQueueAsyncError(fp, param, terrno);
|
tscQueueAsyncError(fp, param, terrno);
|
||||||
return;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
nPrintTsc("%s", sqlstr);
|
nPrintTsc("%s", sqlstr);
|
||||||
|
@ -96,12 +100,15 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
|
||||||
if (pSql == NULL) {
|
if (pSql == NULL) {
|
||||||
tscError("failed to malloc sqlObj");
|
tscError("failed to malloc sqlObj");
|
||||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_OUT_OF_MEMORY);
|
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_OUT_OF_MEMORY);
|
||||||
return;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
doAsyncQuery(pObj, pSql, fp, param, sqlstr, sqlLen);
|
doAsyncQuery(pObj, pSql, fp, param, sqlstr, sqlLen);
|
||||||
|
|
||||||
|
return pSql;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
|
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
|
||||||
if (tres == NULL) {
|
if (tres == NULL) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -2013,6 +2013,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
||||||
if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
|
if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2802,7 +2802,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
||||||
numOfFailed += 1;
|
numOfFailed += 1;
|
||||||
|
|
||||||
// clean up tableMeta in cache
|
// clean up tableMeta in cache
|
||||||
tscFreeQueryInfo(&pSql->cmd);
|
tscFreeQueryInfo(&pSql->cmd, false);
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
|
||||||
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
|
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
|
||||||
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
|
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
|
|
||||||
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
|
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
|
||||||
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo);
|
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
|
||||||
|
|
||||||
static void tscStrToLower(char *str, int32_t n) {
|
static void tscStrToLower(char *str, int32_t n) {
|
||||||
if (str == NULL || n <= 0) { return;}
|
if (str == NULL || n <= 0) { return;}
|
||||||
|
@ -367,7 +367,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||||
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscFreeQueryInfo(SSqlCmd* pCmd) {
|
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
|
||||||
if (pCmd == NULL || pCmd->numOfClause == 0) {
|
if (pCmd == NULL || pCmd->numOfClause == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -376,7 +376,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd) {
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
|
||||||
|
|
||||||
freeQueryInfoImpl(pQueryInfo);
|
freeQueryInfoImpl(pQueryInfo);
|
||||||
clearAllTableMetaInfo(pQueryInfo);
|
clearAllTableMetaInfo(pQueryInfo, removeMeta);
|
||||||
tfree(pQueryInfo);
|
tfree(pQueryInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
|
||||||
|
|
||||||
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
|
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
|
||||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||||
tscFreeQueryInfo(pCmd);
|
tscFreeQueryInfo(pCmd, removeMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscFreeSqlResult(SSqlObj* pSql) {
|
void tscFreeSqlResult(SSqlObj* pSql) {
|
||||||
|
@ -1847,10 +1847,17 @@ SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
|
||||||
return pa;
|
return pa;
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) {
|
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
|
||||||
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
|
||||||
|
|
||||||
|
if (removeMeta) {
|
||||||
|
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
||||||
|
tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||||
|
|
||||||
|
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||||
|
}
|
||||||
|
|
||||||
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
|
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
|
||||||
tscClearTableMetaInfo(pTableMetaInfo);
|
tscClearTableMetaInfo(pTableMetaInfo);
|
||||||
free(pTableMetaInfo);
|
free(pTableMetaInfo);
|
||||||
|
@ -2714,7 +2721,11 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild) {
|
||||||
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) {
|
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) {
|
||||||
assert(pTableMeta != NULL);
|
assert(pTableMeta != NULL);
|
||||||
|
|
||||||
int32_t totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
|
int32_t totalCols = 0;
|
||||||
|
if (pTableMeta->tableInfo.numOfColumns >= 0 && pTableMeta->tableInfo.numOfTags >= 0) {
|
||||||
|
totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
|
||||||
|
}
|
||||||
|
|
||||||
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
|
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -430,10 +430,10 @@ static void doInitGlobalConfig(void) {
|
||||||
// port
|
// port
|
||||||
cfg.option = "serverPort";
|
cfg.option = "serverPort";
|
||||||
cfg.ptr = &tsServerPort;
|
cfg.ptr = &tsServerPort;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT16;
|
cfg.valType = TAOS_CFG_VTYPE_UINT16;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
|
||||||
cfg.minValue = 1;
|
cfg.minValue = 1;
|
||||||
cfg.maxValue = 65535;
|
cfg.maxValue = 65056;
|
||||||
cfg.ptrLength = 0;
|
cfg.ptrLength = 0;
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
|
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
|
||||||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||||
POST_BUILD
|
POST_BUILD
|
||||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.20-dist.jar ${LIBRARY_OUTPUT_PATH}
|
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.21-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMENT "build jdbc driver")
|
COMMENT "build jdbc driver")
|
||||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.20</version>
|
<version>2.0.21</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.20</version>
|
<version>2.0.21</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||||
|
|
|
@ -308,7 +308,7 @@ public class DatabaseMetaDataResultSet implements ResultSet {
|
||||||
return colMetaData.getColIndex() + 1;
|
return colMetaData.getColIndex() + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -14,16 +14,13 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.sql.Types;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public abstract class TSDBConstants {
|
public abstract class TSDBConstants {
|
||||||
|
|
||||||
public static final String STATEMENT_CLOSED = "statement is closed";
|
|
||||||
public static final String UNSUPPORTED_METHOD_EXCEPTION_MSG = "this operation is NOT supported currently!";
|
|
||||||
public static final String INVALID_VARIABLES = "invalid variables";
|
|
||||||
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed";
|
|
||||||
|
|
||||||
public static final String DEFAULT_PORT = "6200";
|
public static final String DEFAULT_PORT = "6200";
|
||||||
public static Map<Integer, String> DATATYPE_MAP = null;
|
public static Map<Integer, String> DATATYPE_MAP = null;
|
||||||
|
|
||||||
|
@ -77,8 +74,65 @@ public abstract class TSDBConstants {
|
||||||
return WrapErrMsg("unkown error!");
|
return WrapErrMsg("unkown error!");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static int taosType2JdbcType(int taosType) throws SQLException {
|
||||||
|
switch (taosType) {
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_NULL:
|
||||||
|
return Types.NULL;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||||
|
return Types.BOOLEAN;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||||
|
return Types.TINYINT;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
return Types.SMALLINT;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_INT:
|
||||||
|
return Types.INTEGER;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||||
|
return Types.BIGINT;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||||
|
return Types.FLOAT;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
return Types.DOUBLE;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||||
|
return Types.BINARY;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
|
return Types.TIMESTAMP;
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||||
|
return Types.NCHAR;
|
||||||
|
}
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int jdbcType2TaosType(int jdbcType) throws SQLException {
|
||||||
|
switch (jdbcType){
|
||||||
|
case Types.NULL:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_NULL;
|
||||||
|
case Types.BOOLEAN:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_BOOL;
|
||||||
|
case Types.TINYINT:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_TINYINT;
|
||||||
|
case Types.SMALLINT:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_SMALLINT;
|
||||||
|
case Types.INTEGER:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_INT;
|
||||||
|
case Types.BIGINT:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_BIGINT;
|
||||||
|
case Types.FLOAT:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_FLOAT;
|
||||||
|
case Types.DOUBLE:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_DOUBLE;
|
||||||
|
case Types.BINARY:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_BINARY;
|
||||||
|
case Types.TIMESTAMP:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
|
case Types.NCHAR:
|
||||||
|
return TSDBConstants.TSDB_DATA_TYPE_NCHAR;
|
||||||
|
}
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
|
||||||
|
}
|
||||||
|
|
||||||
static {
|
static {
|
||||||
DATATYPE_MAP = new HashMap<>();
|
DATATYPE_MAP = new HashMap<>();
|
||||||
|
DATATYPE_MAP.put(0, "NULL");
|
||||||
DATATYPE_MAP.put(1, "BOOL");
|
DATATYPE_MAP.put(1, "BOOL");
|
||||||
DATATYPE_MAP.put(2, "TINYINT");
|
DATATYPE_MAP.put(2, "TINYINT");
|
||||||
DATATYPE_MAP.put(3, "SMALLINT");
|
DATATYPE_MAP.put(3, "SMALLINT");
|
||||||
|
@ -90,4 +144,8 @@ public abstract class TSDBConstants {
|
||||||
DATATYPE_MAP.put(9, "TIMESTAMP");
|
DATATYPE_MAP.put(9, "TIMESTAMP");
|
||||||
DATATYPE_MAP.put(10, "NCHAR");
|
DATATYPE_MAP.put(10, "NCHAR");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static String jdbcType2TaosTypeName(int type) throws SQLException {
|
||||||
|
return DATATYPE_MAP.get(jdbcType2TaosType(type));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ public class TSDBErrorNumbers {
|
||||||
public static final int ERROR_INVALID_FOR_EXECUTE = 0x230c; //not a valid sql for execute: (SQL)
|
public static final int ERROR_INVALID_FOR_EXECUTE = 0x230c; //not a valid sql for execute: (SQL)
|
||||||
public static final int ERROR_PARAMETER_INDEX_OUT_RANGE = 0x230d; // parameter index out of range
|
public static final int ERROR_PARAMETER_INDEX_OUT_RANGE = 0x230d; // parameter index out of range
|
||||||
public static final int ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED = 0x230e; // connection already closed
|
public static final int ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED = 0x230e; // connection already closed
|
||||||
|
public static final int ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE = 0x230f; //unknown sql type in tdengine
|
||||||
|
|
||||||
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
|
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
|
||||||
|
|
||||||
|
@ -49,6 +50,7 @@ public class TSDBErrorNumbers {
|
||||||
errorNumbers.add(ERROR_INVALID_FOR_EXECUTE);
|
errorNumbers.add(ERROR_INVALID_FOR_EXECUTE);
|
||||||
errorNumbers.add(ERROR_PARAMETER_INDEX_OUT_RANGE);
|
errorNumbers.add(ERROR_PARAMETER_INDEX_OUT_RANGE);
|
||||||
errorNumbers.add(ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
|
errorNumbers.add(ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
|
||||||
|
errorNumbers.add(ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
|
||||||
|
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
|
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
|
||||||
|
|
|
@ -20,7 +20,7 @@ import java.sql.Timestamp;
|
||||||
import java.sql.Types;
|
import java.sql.Types;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
public class TSDBResultSetMetaData implements ResultSetMetaData {
|
public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaData {
|
||||||
|
|
||||||
List<ColumnMetaData> colMetaDataList = null;
|
List<ColumnMetaData> colMetaDataList = null;
|
||||||
|
|
||||||
|
@ -28,14 +28,6 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
|
||||||
this.colMetaDataList = metaDataList;
|
this.colMetaDataList = metaDataList;
|
||||||
}
|
}
|
||||||
|
|
||||||
public <T> T unwrap(Class<T> iface) throws SQLException {
|
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getColumnCount() throws SQLException {
|
public int getColumnCount() throws SQLException {
|
||||||
return colMetaDataList.size();
|
return colMetaDataList.size();
|
||||||
}
|
}
|
||||||
|
@ -94,7 +86,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getSchemaName(int column) throws SQLException {
|
public String getSchemaName(int column) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getPrecision(int column) throws SQLException {
|
public int getPrecision(int column) throws SQLException {
|
||||||
|
@ -125,18 +117,18 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getTableName(int column) throws SQLException {
|
public String getTableName(int column) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getCatalogName(int column) throws SQLException {
|
public String getCatalogName(int column) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getColumnType(int column) throws SQLException {
|
public int getColumnType(int column) throws SQLException {
|
||||||
ColumnMetaData meta = this.colMetaDataList.get(column - 1);
|
ColumnMetaData meta = this.colMetaDataList.get(column - 1);
|
||||||
switch (meta.getColType()) {
|
switch (meta.getColType()) {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||||
return java.sql.Types.BIT;
|
return Types.BOOLEAN;
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||||
return java.sql.Types.TINYINT;
|
return java.sql.Types.TINYINT;
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
@ -150,13 +142,13 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||||
return java.sql.Types.DOUBLE;
|
return java.sql.Types.DOUBLE;
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||||
return java.sql.Types.CHAR;
|
return Types.BINARY;
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
return java.sql.Types.BIGINT;
|
return java.sql.Types.TIMESTAMP;
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||||
return java.sql.Types.CHAR;
|
return Types.NCHAR;
|
||||||
}
|
}
|
||||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getColumnTypeName(int column) throws SQLException {
|
public String getColumnTypeName(int column) throws SQLException {
|
||||||
|
@ -173,7 +165,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isDefinitelyWritable(int column) throws SQLException {
|
public boolean isDefinitelyWritable(int column) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getColumnClassName(int column) throws SQLException {
|
public String getColumnClassName(int column) throws SQLException {
|
||||||
|
|
|
@ -1153,11 +1153,11 @@ public class TSDBResultSetWrapper implements ResultSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
|
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
|
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -14,12 +14,11 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
import javax.management.OperationsException;
|
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
|
|
||||||
public class TSDBSubscribe {
|
public class TSDBSubscribe {
|
||||||
private TSDBJNIConnector connecter = null;
|
private final TSDBJNIConnector connecter;
|
||||||
private long id = 0;
|
private final long id;
|
||||||
|
|
||||||
TSDBSubscribe(TSDBJNIConnector connecter, long id) throws SQLException {
|
TSDBSubscribe(TSDBJNIConnector connecter, long id) throws SQLException {
|
||||||
if (null != connecter) {
|
if (null != connecter) {
|
||||||
|
|
|
@ -18,10 +18,10 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
||||||
private final String database;
|
private final String database;
|
||||||
private final Statement statement;
|
private final Statement statement;
|
||||||
// data
|
// data
|
||||||
private ArrayList<ArrayList<Object>> resultSet = new ArrayList<>();
|
private ArrayList<ArrayList<Object>> resultSet;
|
||||||
// meta
|
// meta
|
||||||
private ArrayList<String> columnNames = new ArrayList<>();
|
private ArrayList<String> columnNames;
|
||||||
private ArrayList<Field> columns = new ArrayList<>();
|
private ArrayList<Field> columns;
|
||||||
private RestfulResultSetMetaData metaData;
|
private RestfulResultSetMetaData metaData;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -29,11 +29,36 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
||||||
*
|
*
|
||||||
* @param resultJson: 包含data信息的结果集,有sql返回的结果集
|
* @param resultJson: 包含data信息的结果集,有sql返回的结果集
|
||||||
***/
|
***/
|
||||||
public RestfulResultSet(String database, Statement statement, JSONObject resultJson) {
|
public RestfulResultSet(String database, Statement statement, JSONObject resultJson) throws SQLException {
|
||||||
this.database = database;
|
this.database = database;
|
||||||
this.statement = statement;
|
this.statement = statement;
|
||||||
|
// column metadata
|
||||||
|
JSONArray columnMeta = resultJson.getJSONArray("column_meta");
|
||||||
|
columnNames = new ArrayList<>();
|
||||||
|
columns = new ArrayList<>();
|
||||||
|
for (int colIndex = 0; colIndex < columnMeta.size(); colIndex++) {
|
||||||
|
JSONArray col = columnMeta.getJSONArray(colIndex);
|
||||||
|
String col_name = col.getString(0);
|
||||||
|
int col_type = TSDBConstants.taosType2JdbcType(col.getInteger(1));
|
||||||
|
int col_length = col.getInteger(2);
|
||||||
|
columnNames.add(col_name);
|
||||||
|
columns.add(new Field(col_name, col_type, col_length, ""));
|
||||||
|
}
|
||||||
|
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
|
||||||
|
|
||||||
// row data
|
// row data
|
||||||
JSONArray data = resultJson.getJSONArray("data");
|
JSONArray data = resultJson.getJSONArray("data");
|
||||||
|
resultSet = new ArrayList<>();
|
||||||
|
for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
|
||||||
|
ArrayList row = new ArrayList();
|
||||||
|
JSONArray jsonRow = data.getJSONArray(rowIndex);
|
||||||
|
for (int colIndex = 0; colIndex < jsonRow.size(); colIndex++) {
|
||||||
|
row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).type));
|
||||||
|
}
|
||||||
|
resultSet.add(row);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
int columnIndex = 0;
|
int columnIndex = 0;
|
||||||
for (; columnIndex < data.size(); columnIndex++) {
|
for (; columnIndex < data.size(); columnIndex++) {
|
||||||
ArrayList oneRow = new ArrayList<>();
|
ArrayList oneRow = new ArrayList<>();
|
||||||
|
@ -52,50 +77,77 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
||||||
columns.add(new Field(name, "", 0, ""));
|
columns.add(new Field(name, "", 0, ""));
|
||||||
}
|
}
|
||||||
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
|
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private Object parseColumnData(JSONArray row, int colIndex, int sqlType) {
|
||||||
* 由多个resultSet的JSON构造结果集
|
switch (sqlType) {
|
||||||
*
|
case Types.NULL:
|
||||||
* @param resultJson: 包含data信息的结果集,有sql返回的结果集
|
|
||||||
* @param fieldJson: 包含多个(最多2个)meta信息的结果集,有describe xxx
|
|
||||||
**/
|
|
||||||
public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List<JSONObject> fieldJson) {
|
|
||||||
this(database, statement, resultJson);
|
|
||||||
ArrayList<Field> newColumns = new ArrayList<>();
|
|
||||||
|
|
||||||
for (Field column : columns) {
|
|
||||||
Field field = findField(column.name, fieldJson);
|
|
||||||
if (field != null) {
|
|
||||||
newColumns.add(field);
|
|
||||||
} else {
|
|
||||||
newColumns.add(column);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
this.columns = newColumns;
|
|
||||||
this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Field findField(String columnName, List<JSONObject> fieldJsonList) {
|
|
||||||
for (JSONObject fieldJSON : fieldJsonList) {
|
|
||||||
JSONArray fieldDataJson = fieldJSON.getJSONArray("data");
|
|
||||||
for (int i = 0; i < fieldDataJson.size(); i++) {
|
|
||||||
JSONArray field = fieldDataJson.getJSONArray(i);
|
|
||||||
if (columnName.equalsIgnoreCase(field.getString(0))) {
|
|
||||||
return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
return null;
|
||||||
|
case Types.BOOLEAN:
|
||||||
|
return row.getBoolean(colIndex);
|
||||||
|
case Types.TINYINT:
|
||||||
|
case Types.SMALLINT:
|
||||||
|
return row.getShort(colIndex);
|
||||||
|
case Types.INTEGER:
|
||||||
|
return row.getInteger(colIndex);
|
||||||
|
case Types.BIGINT:
|
||||||
|
return row.getBigInteger(colIndex);
|
||||||
|
case Types.FLOAT:
|
||||||
|
return row.getFloat(colIndex);
|
||||||
|
case Types.DOUBLE:
|
||||||
|
return row.getDouble(colIndex);
|
||||||
|
case Types.TIMESTAMP:
|
||||||
|
return new Timestamp(row.getDate(colIndex).getTime());
|
||||||
|
case Types.BINARY:
|
||||||
|
case Types.NCHAR:
|
||||||
|
default:
|
||||||
|
return row.getString(colIndex);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// /**
|
||||||
|
// * 由多个resultSet的JSON构造结果集
|
||||||
|
// *
|
||||||
|
// * @param resultJson: 包含data信息的结果集,有sql返回的结果集
|
||||||
|
// * @param fieldJson: 包含多个(最多2个)meta信息的结果集,有describe xxx
|
||||||
|
// **/
|
||||||
|
// public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List<JSONObject> fieldJson) throws SQLException {
|
||||||
|
// this(database, statement, resultJson);
|
||||||
|
// ArrayList<Field> newColumns = new ArrayList<>();
|
||||||
|
//
|
||||||
|
// for (Field column : columns) {
|
||||||
|
// Field field = findField(column.name, fieldJson);
|
||||||
|
// if (field != null) {
|
||||||
|
// newColumns.add(field);
|
||||||
|
// } else {
|
||||||
|
// newColumns.add(column);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// this.columns = newColumns;
|
||||||
|
// this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// public Field findField(String columnName, List<JSONObject> fieldJsonList) {
|
||||||
|
// for (JSONObject fieldJSON : fieldJsonList) {
|
||||||
|
// JSONArray fieldDataJson = fieldJSON.getJSONArray("data");
|
||||||
|
// for (int i = 0; i < fieldDataJson.size(); i++) {
|
||||||
|
// JSONArray field = fieldDataJson.getJSONArray(i);
|
||||||
|
// if (columnName.equalsIgnoreCase(field.getString(0))) {
|
||||||
|
// return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3));
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return null;
|
||||||
|
// }
|
||||||
|
|
||||||
public class Field {
|
public class Field {
|
||||||
String name;
|
String name;
|
||||||
String type;
|
int type;
|
||||||
int length;
|
int length;
|
||||||
String note;
|
String note;
|
||||||
|
|
||||||
public Field(String name, String type, int length, String note) {
|
public Field(String name, int type, int length, String note) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.length = length;
|
this.length = length;
|
||||||
|
|
|
@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBConstants;
|
||||||
import java.sql.ResultSetMetaData;
|
import java.sql.ResultSetMetaData;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.Timestamp;
|
import java.sql.Timestamp;
|
||||||
|
import java.sql.Types;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
||||||
public class RestfulResultSetMetaData implements ResultSetMetaData {
|
public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
@ -53,14 +54,14 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isSigned(int column) throws SQLException {
|
public boolean isSigned(int column) throws SQLException {
|
||||||
String type = this.fields.get(column - 1).type.toUpperCase();
|
int type = this.fields.get(column - 1).type;
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case "TINYINT":
|
case Types.TINYINT:
|
||||||
case "SMALLINT":
|
case Types.SMALLINT:
|
||||||
case "INT":
|
case Types.INTEGER:
|
||||||
case "BIGINT":
|
case Types.BIGINT:
|
||||||
case "FLOAT":
|
case Types.FLOAT:
|
||||||
case "DOUBLE":
|
case Types.DOUBLE:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
@ -89,14 +90,14 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getPrecision(int column) throws SQLException {
|
public int getPrecision(int column) throws SQLException {
|
||||||
String type = this.fields.get(column - 1).type.toUpperCase();
|
int type = this.fields.get(column - 1).type;
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case "FLOAT":
|
case Types.FLOAT:
|
||||||
return 5;
|
return 5;
|
||||||
case "DOUBLE":
|
case Types.DOUBLE:
|
||||||
return 9;
|
return 9;
|
||||||
case "BINARY":
|
case Types.BINARY:
|
||||||
case "NCHAR":
|
case Types.NCHAR:
|
||||||
return this.fields.get(column - 1).length;
|
return this.fields.get(column - 1).length;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -105,11 +106,11 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getScale(int column) throws SQLException {
|
public int getScale(int column) throws SQLException {
|
||||||
String type = this.fields.get(column - 1).type.toUpperCase();
|
int type = this.fields.get(column - 1).type;
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case "FLOAT":
|
case Types.FLOAT:
|
||||||
return 5;
|
return 5;
|
||||||
case "DOUBLE":
|
case Types.DOUBLE:
|
||||||
return 9;
|
return 9;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -128,36 +129,13 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getColumnType(int column) throws SQLException {
|
public int getColumnType(int column) throws SQLException {
|
||||||
String type = this.fields.get(column - 1).type.toUpperCase();
|
return this.fields.get(column - 1).type;
|
||||||
switch (type) {
|
|
||||||
case "BOOL":
|
|
||||||
return java.sql.Types.BOOLEAN;
|
|
||||||
case "TINYINT":
|
|
||||||
return java.sql.Types.TINYINT;
|
|
||||||
case "SMALLINT":
|
|
||||||
return java.sql.Types.SMALLINT;
|
|
||||||
case "INT":
|
|
||||||
return java.sql.Types.INTEGER;
|
|
||||||
case "BIGINT":
|
|
||||||
return java.sql.Types.BIGINT;
|
|
||||||
case "FLOAT":
|
|
||||||
return java.sql.Types.FLOAT;
|
|
||||||
case "DOUBLE":
|
|
||||||
return java.sql.Types.DOUBLE;
|
|
||||||
case "BINARY":
|
|
||||||
return java.sql.Types.BINARY;
|
|
||||||
case "TIMESTAMP":
|
|
||||||
return java.sql.Types.TIMESTAMP;
|
|
||||||
case "NCHAR":
|
|
||||||
return java.sql.Types.NCHAR;
|
|
||||||
}
|
|
||||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getColumnTypeName(int column) throws SQLException {
|
public String getColumnTypeName(int column) throws SQLException {
|
||||||
String type = fields.get(column - 1).type;
|
int type = fields.get(column - 1).type;
|
||||||
return type.toUpperCase();
|
return TSDBConstants.jdbcType2TaosTypeName(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -177,26 +155,26 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getColumnClassName(int column) throws SQLException {
|
public String getColumnClassName(int column) throws SQLException {
|
||||||
String type = this.fields.get(column - 1).type;
|
int type = this.fields.get(column - 1).type;
|
||||||
String columnClassName = "";
|
String columnClassName = "";
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case "BOOL":
|
case Types.BOOLEAN:
|
||||||
return Boolean.class.getName();
|
return Boolean.class.getName();
|
||||||
case "TINYINT":
|
case Types.TINYINT:
|
||||||
case "SMALLINT":
|
case Types.SMALLINT:
|
||||||
return Short.class.getName();
|
return Short.class.getName();
|
||||||
case "INT":
|
case Types.INTEGER:
|
||||||
return Integer.class.getName();
|
return Integer.class.getName();
|
||||||
case "BIGINT":
|
case Types.BIGINT:
|
||||||
return Long.class.getName();
|
return Long.class.getName();
|
||||||
case "FLOAT":
|
case Types.FLOAT:
|
||||||
return Float.class.getName();
|
return Float.class.getName();
|
||||||
case "DOUBLE":
|
case Types.DOUBLE:
|
||||||
return Double.class.getName();
|
return Double.class.getName();
|
||||||
case "TIMESTAMP":
|
case Types.TIMESTAMP:
|
||||||
return Timestamp.class.getName();
|
return Timestamp.class.getName();
|
||||||
case "BINARY":
|
case Types.BINARY:
|
||||||
case "NCHAR":
|
case Types.NCHAR:
|
||||||
return String.class.getName();
|
return String.class.getName();
|
||||||
}
|
}
|
||||||
return columnClassName;
|
return columnClassName;
|
||||||
|
|
|
@ -151,22 +151,21 @@ public class RestfulStatement extends AbstractStatement {
|
||||||
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
|
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
|
||||||
}
|
}
|
||||||
// parse table name from sql
|
// parse table name from sql
|
||||||
String[] tableIdentifiers = parseTableIdentifier(sql);
|
// String[] tableIdentifiers = parseTableIdentifier(sql);
|
||||||
if (tableIdentifiers != null) {
|
// if (tableIdentifiers != null) {
|
||||||
List<JSONObject> fieldJsonList = new ArrayList<>();
|
// List<JSONObject> fieldJsonList = new ArrayList<>();
|
||||||
for (String tableIdentifier : tableIdentifiers) {
|
// for (String tableIdentifier : tableIdentifiers) {
|
||||||
// field meta
|
// String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
|
||||||
String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
|
// JSONObject fieldJson = JSON.parseObject(fields);
|
||||||
JSONObject fieldJson = JSON.parseObject(fields);
|
// if (fieldJson.getString("status").equals("error")) {
|
||||||
if (fieldJson.getString("status").equals("error")) {
|
// throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
|
||||||
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
|
// }
|
||||||
}
|
// fieldJsonList.add(fieldJson);
|
||||||
fieldJsonList.add(fieldJson);
|
// }
|
||||||
}
|
// this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
|
||||||
this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
|
// } else {
|
||||||
} else {
|
|
||||||
this.resultSet = new RestfulResultSet(database, this, resultJson);
|
this.resultSet = new RestfulResultSet(database, this, resultJson);
|
||||||
}
|
// }
|
||||||
this.affectedRows = 0;
|
this.affectedRows = 0;
|
||||||
return resultSet;
|
return resultSet;
|
||||||
}
|
}
|
||||||
|
@ -201,7 +200,7 @@ public class RestfulStatement extends AbstractStatement {
|
||||||
@Override
|
@Override
|
||||||
public ResultSet getResultSet() throws SQLException {
|
public ResultSet getResultSet() throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed())
|
||||||
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
return resultSet;
|
return resultSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@ import java.util.HashMap;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
public class ResultSetTest {
|
public class ResultSetTest {
|
||||||
static Connection connection;
|
static Connection connection;
|
||||||
|
|
|
@ -48,29 +48,28 @@ public class SubscribeTest {
|
||||||
@Test
|
@Test
|
||||||
public void subscribe() {
|
public void subscribe() {
|
||||||
try {
|
try {
|
||||||
|
|
||||||
String rawSql = "select * from " + dbName + "." + tName + ";";
|
String rawSql = "select * from " + dbName + "." + tName + ";";
|
||||||
System.out.println(rawSql);
|
System.out.println(rawSql);
|
||||||
TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
|
// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
|
||||||
|
|
||||||
int a = 0;
|
// int a = 0;
|
||||||
while (true) {
|
// while (true) {
|
||||||
TimeUnit.MILLISECONDS.sleep(1000);
|
// TimeUnit.MILLISECONDS.sleep(1000);
|
||||||
TSDBResultSet resSet = subscribe.consume();
|
// TSDBResultSet resSet = subscribe.consume();
|
||||||
while (resSet.next()) {
|
// while (resSet.next()) {
|
||||||
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
|
// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
|
||||||
System.out.printf(i + ": " + resSet.getString(i) + "\t");
|
// System.out.printf(i + ": " + resSet.getString(i) + "\t");
|
||||||
}
|
// }
|
||||||
System.out.println("\n======" + a + "==========");
|
// System.out.println("\n======" + a + "==========");
|
||||||
}
|
// }
|
||||||
a++;
|
// a++;
|
||||||
if (a >= 2) {
|
// if (a >= 2) {
|
||||||
break;
|
// break;
|
||||||
}
|
// }
|
||||||
// resSet.close();
|
// resSet.close();
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
subscribe.close(true);
|
// subscribe.close(true);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import java.util.Random;
|
||||||
public class RestfulJDBCTest {
|
public class RestfulJDBCTest {
|
||||||
|
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
// private static final String host = "master";
|
// private static final String host = "master";
|
||||||
private static Connection connection;
|
private static Connection connection;
|
||||||
private Random random = new Random(System.currentTimeMillis());
|
private Random random = new Random(System.currentTimeMillis());
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ import java.sql.*;
|
||||||
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
|
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
|
||||||
public class SQLTest {
|
public class SQLTest {
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
// private static final String host = "master";
|
// private static final String host = "master";
|
||||||
private static Connection connection;
|
private static Connection connection;
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -323,6 +323,18 @@ public class SQLTest {
|
||||||
SQLExecutor.executeQuery(connection, sql);
|
SQLExecutor.executeQuery(connection, sql);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase052() {
|
||||||
|
String sql = "select server_status()";
|
||||||
|
SQLExecutor.executeQuery(connection, sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase053() {
|
||||||
|
String sql = "select avg(cpu_taosd), avg(cpu_system), max(cpu_cores), avg(mem_taosd), avg(mem_system), max(mem_total), avg(disk_used), max(disk_total), avg(band_speed), avg(io_read), avg(io_write), sum(req_http), sum(req_select), sum(req_insert) from log.dn1 where ts> now - 60m and ts<= now interval(1m) fill(value, 0)";
|
||||||
|
SQLExecutor.executeQuery(connection, sql);
|
||||||
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void before() throws ClassNotFoundException, SQLException {
|
public static void before() throws ClassNotFoundException, SQLException {
|
||||||
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
||||||
|
|
|
@ -29,7 +29,7 @@ typedef struct {
|
||||||
static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}};
|
static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}};
|
||||||
int64_t tsMinFreeMemSizeForStart = 0;
|
int64_t tsMinFreeMemSizeForStart = 0;
|
||||||
|
|
||||||
static int32_t bindTcpPort(int16_t port) {
|
static int32_t bindTcpPort(uint16_t port) {
|
||||||
SOCKET serverSocket;
|
SOCKET serverSocket;
|
||||||
struct sockaddr_in server_addr;
|
struct sockaddr_in server_addr;
|
||||||
|
|
||||||
|
@ -85,9 +85,9 @@ static int32_t bindUdpPort(int16_t port) {
|
||||||
|
|
||||||
static int32_t dnodeCheckNetwork() {
|
static int32_t dnodeCheckNetwork() {
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
int16_t startPort = tsServerPort;
|
uint16_t startPort = tsServerPort;
|
||||||
|
|
||||||
for (int16_t port = startPort; port < startPort + 12; port++) {
|
for (uint16_t port = startPort; port < startPort + 12; port++) {
|
||||||
ret = bindTcpPort(port);
|
ret = bindTcpPort(port);
|
||||||
if (0 != ret) {
|
if (0 != ret) {
|
||||||
dError("failed to tcp bind port %d, quit", port);
|
dError("failed to tcp bind port %d, quit", port);
|
||||||
|
|
|
@ -286,7 +286,7 @@ do { \
|
||||||
#define TSDB_MAX_COMP_LEVEL 2
|
#define TSDB_MAX_COMP_LEVEL 2
|
||||||
#define TSDB_DEFAULT_COMP_LEVEL 2
|
#define TSDB_DEFAULT_COMP_LEVEL 2
|
||||||
|
|
||||||
#define TSDB_MIN_WAL_LEVEL 1
|
#define TSDB_MIN_WAL_LEVEL 0
|
||||||
#define TSDB_MAX_WAL_LEVEL 2
|
#define TSDB_MAX_WAL_LEVEL 2
|
||||||
#define TSDB_DEFAULT_WAL_LEVEL 1
|
#define TSDB_DEFAULT_WAL_LEVEL 1
|
||||||
|
|
||||||
|
|
|
@ -6,15 +6,17 @@
|
||||||
"user": "root",
|
"user": "root",
|
||||||
"password": "taosdata",
|
"password": "taosdata",
|
||||||
"thread_count": 4,
|
"thread_count": 4,
|
||||||
"thread_count_create_tbl": 1,
|
"thread_count_create_tbl": 4,
|
||||||
"result_file": "./insert_res.txt",
|
"result_file": "./insert_res.txt",
|
||||||
"confirm_parameter_prompt": "no",
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"num_of_records_per_req": 100,
|
||||||
"databases": [{
|
"databases": [{
|
||||||
"dbinfo": {
|
"dbinfo": {
|
||||||
"name": "db",
|
"name": "db",
|
||||||
"drop": "no",
|
"drop": "yes",
|
||||||
"replica": 1,
|
"replica": 1,
|
||||||
"days": 2,
|
"days": 10,
|
||||||
"cache": 16,
|
"cache": 16,
|
||||||
"blocks": 8,
|
"blocks": 8,
|
||||||
"precision": "ms",
|
"precision": "ms",
|
||||||
|
@ -23,6 +25,7 @@
|
||||||
"maxRows": 4096,
|
"maxRows": 4096,
|
||||||
"comp":2,
|
"comp":2,
|
||||||
"walLevel":1,
|
"walLevel":1,
|
||||||
|
"cachelast":0,
|
||||||
"quorum":1,
|
"quorum":1,
|
||||||
"fsync":3000,
|
"fsync":3000,
|
||||||
"update": 0
|
"update": 0
|
||||||
|
@ -30,20 +33,19 @@
|
||||||
"super_tables": [{
|
"super_tables": [{
|
||||||
"name": "stb",
|
"name": "stb",
|
||||||
"child_table_exists":"no",
|
"child_table_exists":"no",
|
||||||
"childtable_count": 1,
|
"childtable_count": 100,
|
||||||
"childtable_prefix": "stb_",
|
"childtable_prefix": "stb_",
|
||||||
"auto_create_table": "no",
|
"auto_create_table": "no",
|
||||||
"data_source": "rand",
|
"data_source": "rand",
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rate": 0,
|
|
||||||
"insert_rows": 100000,
|
"insert_rows": 100000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
"multi_thread_write_one_tbl": "no",
|
||||||
"number_of_tbl_in_one_sql": 1,
|
"number_of_tbl_in_one_sql": 0,
|
||||||
"rows_per_tbl": 100,
|
"rows_per_tbl": 100,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
"disorder_range": 1000,
|
"disorder_range": 1000,
|
||||||
"timestamp_step": 10,
|
"timestamp_step": 1,
|
||||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||||
"sample_format": "csv",
|
"sample_format": "csv",
|
||||||
"sample_file": "./sample.csv",
|
"sample_file": "./sample.csv",
|
||||||
|
|
|
@ -6,13 +6,14 @@
|
||||||
"user": "root",
|
"user": "root",
|
||||||
"password": "taosdata",
|
"password": "taosdata",
|
||||||
"confirm_parameter_prompt": "yes",
|
"confirm_parameter_prompt": "yes",
|
||||||
"databases": "db01",
|
"databases": "dbx",
|
||||||
"specified_table_query":
|
"specified_table_query":
|
||||||
{"query_interval":1, "concurrent":1,
|
{"query_interval":1, "concurrent":4,
|
||||||
"sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}]
|
"sqls": [{"sql": "select last_row(*) from stb where color='red'", "result": "./query_res0.txt"},
|
||||||
|
{"sql": "select count(*) from stb_01", "result": "./query_res1.txt"}]
|
||||||
},
|
},
|
||||||
"super_table_query":
|
"super_table_query":
|
||||||
{"stblname": "stb01", "query_interval":1, "threads":1,
|
{"stblname": "stb", "query_interval":1, "threads":4,
|
||||||
"sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}]
|
"sqls": [{"sql": "select last_row(*) from xxxx", "result": "./query_res2.txt"}]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
"port": 6030,
|
"port": 6030,
|
||||||
"user": "root",
|
"user": "root",
|
||||||
"password": "taosdata",
|
"password": "taosdata",
|
||||||
"databases": "db",
|
"databases": "dbx",
|
||||||
"specified_table_query":
|
"specified_table_query":
|
||||||
{"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes",
|
{"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes",
|
||||||
"sqls": [{"sql": "select avg(col1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}]
|
"sqls": [{"sql": "select avg(col1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}]
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -769,6 +769,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
|
||||||
}
|
}
|
||||||
sprintf(tmpBuf, ".select-tbname.tmp");
|
sprintf(tmpBuf, ".select-tbname.tmp");
|
||||||
(void)remove(tmpBuf);
|
(void)remove(tmpBuf);
|
||||||
|
free(tblBuf);
|
||||||
close(fd);
|
close(fd);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1523,6 +1524,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
|
||||||
}
|
}
|
||||||
sprintf(tmpBuf, ".show-tables.tmp");
|
sprintf(tmpBuf, ".show-tables.tmp");
|
||||||
(void)remove(tmpBuf);
|
(void)remove(tmpBuf);
|
||||||
|
free(tblBuf);
|
||||||
close(fd);
|
close(fd);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -832,12 +832,13 @@ static int32_t mnodeProcessBatchCreateTableMsg(SMnodeMsg *pMsg) {
|
||||||
return code;
|
return code;
|
||||||
} else if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
} else if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||||
++pMsg->pBatchMasterMsg->received;
|
++pMsg->pBatchMasterMsg->received;
|
||||||
|
pMsg->pBatchMasterMsg->code = code;
|
||||||
mnodeDestroySubMsg(pMsg);
|
mnodeDestroySubMsg(pMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
||||||
>= pMsg->pBatchMasterMsg->expected) {
|
>= pMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, TSDB_CODE_SUCCESS);
|
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->pBatchMasterMsg->code);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||||
|
@ -916,11 +917,13 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
|
||||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||||
mError("msg:%p, app:%p table:%s, failed to drop table, in monitor database", pMsg, pMsg->rpcMsg.ahandle,
|
mError("msg:%p, app:%p table:%s, failed to drop table, in monitor database", pMsg, pMsg->rpcMsg.ahandle,
|
||||||
pDrop->name);
|
pDrop->name);
|
||||||
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->name);
|
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->name);
|
||||||
if (pMsg->pTable == NULL) {
|
if (pMsg->pTable == NULL) {
|
||||||
|
@ -1906,7 +1909,8 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
||||||
sdbDeleteRow(&desc);
|
sdbDeleteRow(&desc);
|
||||||
|
|
||||||
if (pMsg->pBatchMasterMsg) {
|
if (pMsg->pBatchMasterMsg) {
|
||||||
++pMsg->pBatchMasterMsg->successed;
|
++pMsg->pBatchMasterMsg->received;
|
||||||
|
pMsg->pBatchMasterMsg->code = code;
|
||||||
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
||||||
>= pMsg->pBatchMasterMsg->expected) {
|
>= pMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code);
|
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code);
|
||||||
|
@ -2688,6 +2692,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
|
||||||
|
|
||||||
if (pMsg->pBatchMasterMsg) {
|
if (pMsg->pBatchMasterMsg) {
|
||||||
++pMsg->pBatchMasterMsg->received;
|
++pMsg->pBatchMasterMsg->received;
|
||||||
|
pMsg->pBatchMasterMsg->code = code;
|
||||||
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
||||||
>= pMsg->pBatchMasterMsg->expected) {
|
>= pMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code);
|
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code);
|
||||||
|
@ -2726,6 +2731,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
|
||||||
|
|
||||||
if (pMsg->pBatchMasterMsg) {
|
if (pMsg->pBatchMasterMsg) {
|
||||||
++pMsg->pBatchMasterMsg->received;
|
++pMsg->pBatchMasterMsg->received;
|
||||||
|
pMsg->pBatchMasterMsg->code = rpcMsg->code;
|
||||||
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
||||||
>= pMsg->pBatchMasterMsg->expected) {
|
>= pMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, rpcMsg->code);
|
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, rpcMsg->code);
|
||||||
|
@ -3020,10 +3026,12 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
||||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||||
mError("msg:%p, app:%p table:%s, failed to alter table, its log db", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
mError("msg:%p, app:%p table:%s, failed to alter table, its log db", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
|
||||||
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableFname);
|
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableFname);
|
||||||
if (pMsg->pTable == NULL) {
|
if (pMsg->pTable == NULL) {
|
||||||
|
|
|
@ -537,6 +537,7 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
|
||||||
|
|
||||||
if (pMsg->pBatchMasterMsg) {
|
if (pMsg->pBatchMasterMsg) {
|
||||||
++pMsg->pBatchMasterMsg->received;
|
++pMsg->pBatchMasterMsg->received;
|
||||||
|
pMsg->pBatchMasterMsg->code = pMsg->code;
|
||||||
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
|
||||||
>= pMsg->pBatchMasterMsg->expected) {
|
>= pMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->code);
|
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->code);
|
||||||
|
@ -1002,6 +1003,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
|
||||||
|
|
||||||
if (mnodeMsg->pBatchMasterMsg) {
|
if (mnodeMsg->pBatchMasterMsg) {
|
||||||
++mnodeMsg->pBatchMasterMsg->received;
|
++mnodeMsg->pBatchMasterMsg->received;
|
||||||
|
mnodeMsg->pBatchMasterMsg->code = code;
|
||||||
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
|
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
|
||||||
>= mnodeMsg->pBatchMasterMsg->expected) {
|
>= mnodeMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, code);
|
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, code);
|
||||||
|
@ -1024,6 +1026,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
|
||||||
|
|
||||||
if (mnodeMsg->pBatchMasterMsg) {
|
if (mnodeMsg->pBatchMasterMsg) {
|
||||||
++mnodeMsg->pBatchMasterMsg->received;
|
++mnodeMsg->pBatchMasterMsg->received;
|
||||||
|
mnodeMsg->pBatchMasterMsg->code = mnodeMsg->code;
|
||||||
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
|
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
|
||||||
>= mnodeMsg->pBatchMasterMsg->expected) {
|
>= mnodeMsg->pBatchMasterMsg->expected) {
|
||||||
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, mnodeMsg->code);
|
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, mnodeMsg->code);
|
||||||
|
|
|
@ -83,6 +83,20 @@ extern "C" {
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define DEFAULT_DOUBLE_COMP(x, y) \
|
||||||
|
do { \
|
||||||
|
if (isnan(x) && isnan(y)) { return 0; } \
|
||||||
|
if (isnan(x)) { return -1; } \
|
||||||
|
if (isnan(y)) { return 1; } \
|
||||||
|
if ((x) == (y)) { \
|
||||||
|
return 0; \
|
||||||
|
} else { \
|
||||||
|
return (x) < (y) ? -1 : 1; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define DEFAULT_FLOAT_COMP(x, y) DEFAULT_DOUBLE_COMP(x, y)
|
||||||
|
|
||||||
#define ALIGN_NUM(n, align) (((n) + ((align)-1)) & (~((align)-1)))
|
#define ALIGN_NUM(n, align) (((n) + ((align)-1)) & (~((align)-1)))
|
||||||
|
|
||||||
// align to 8bytes
|
// align to 8bytes
|
||||||
|
|
|
@ -34,6 +34,8 @@
|
||||||
#define REST_JSON_DATA_LEN 4
|
#define REST_JSON_DATA_LEN 4
|
||||||
#define REST_JSON_HEAD "head"
|
#define REST_JSON_HEAD "head"
|
||||||
#define REST_JSON_HEAD_LEN 4
|
#define REST_JSON_HEAD_LEN 4
|
||||||
|
#define REST_JSON_HEAD_INFO "column_meta"
|
||||||
|
#define REST_JSON_HEAD_INFO_LEN 11
|
||||||
#define REST_JSON_ROWS "rows"
|
#define REST_JSON_ROWS "rows"
|
||||||
#define REST_JSON_ROWS_LEN 4
|
#define REST_JSON_ROWS_LEN 4
|
||||||
#define REST_JSON_AFFECT_ROWS "affected_rows"
|
#define REST_JSON_AFFECT_ROWS "affected_rows"
|
||||||
|
|
|
@ -59,7 +59,9 @@ void httpDispatchToResultQueue(void *param, TAOS_RES *result, int32_t code, int3
|
||||||
pMsg->fp = fp;
|
pMsg->fp = fp;
|
||||||
taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg);
|
taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg);
|
||||||
} else {
|
} else {
|
||||||
(*fp)(param, result, code, rows);
|
taos_stop_query(result);
|
||||||
|
taos_free_result(result);
|
||||||
|
//(*fp)(param, result, code, rows);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,6 +75,44 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
|
||||||
// head array end
|
// head array end
|
||||||
httpJsonToken(jsonBuf, JsonArrEnd);
|
httpJsonToken(jsonBuf, JsonArrEnd);
|
||||||
|
|
||||||
|
// column_meta begin
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonPairHead(jsonBuf, REST_JSON_HEAD_INFO, REST_JSON_HEAD_INFO_LEN);
|
||||||
|
// column_meta array begin
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonToken(jsonBuf, JsonArrStt);
|
||||||
|
|
||||||
|
if (num_fields == 0) {
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonToken(jsonBuf, JsonArrStt);
|
||||||
|
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN);
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonInt(jsonBuf, TSDB_DATA_TYPE_INT);
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonInt(jsonBuf, 4);
|
||||||
|
|
||||||
|
httpJsonToken(jsonBuf, JsonArrEnd);
|
||||||
|
} else {
|
||||||
|
for (int32_t i = 0; i < num_fields; ++i) {
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonToken(jsonBuf, JsonArrStt);
|
||||||
|
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name));
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonInt(jsonBuf, fields[i].type);
|
||||||
|
httpJsonItemToken(jsonBuf);
|
||||||
|
httpJsonInt(jsonBuf, fields[i].bytes);
|
||||||
|
|
||||||
|
httpJsonToken(jsonBuf, JsonArrEnd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// column_meta array end
|
||||||
|
httpJsonToken(jsonBuf, JsonArrEnd);
|
||||||
|
|
||||||
// data begin
|
// data begin
|
||||||
httpJsonItemToken(jsonBuf);
|
httpJsonItemToken(jsonBuf);
|
||||||
httpJsonPairHead(jsonBuf, REST_JSON_DATA, REST_JSON_DATA_LEN);
|
httpJsonPairHead(jsonBuf, REST_JSON_DATA, REST_JSON_DATA_LEN);
|
||||||
|
|
|
@ -362,20 +362,10 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
|
||||||
return (first < second) ? -1 : 1;
|
return (first < second) ? -1 : 1;
|
||||||
};
|
};
|
||||||
case TSDB_DATA_TYPE_DOUBLE: {
|
case TSDB_DATA_TYPE_DOUBLE: {
|
||||||
double first = GET_DOUBLE_VAL(f1);
|
DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
|
||||||
double second = GET_DOUBLE_VAL(f2);
|
|
||||||
if (first == second) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return (first < second) ? -1 : 1;
|
|
||||||
};
|
};
|
||||||
case TSDB_DATA_TYPE_FLOAT: {
|
case TSDB_DATA_TYPE_FLOAT: {
|
||||||
float first = GET_FLOAT_VAL(f1);
|
DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
|
||||||
float second = GET_FLOAT_VAL(f2);
|
|
||||||
if (first == second) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return (first < second) ? -1 : 1;
|
|
||||||
};
|
};
|
||||||
case TSDB_DATA_TYPE_BIGINT: {
|
case TSDB_DATA_TYPE_BIGINT: {
|
||||||
int64_t first = *(int64_t *)f1;
|
int64_t first = *(int64_t *)f1;
|
||||||
|
|
|
@ -58,6 +58,15 @@ SSqlInfo qSQLParse(const char *pStr) {
|
||||||
sqlInfo.valid = false;
|
sqlInfo.valid = false;
|
||||||
goto abort_parse;
|
goto abort_parse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case TK_HEX:
|
||||||
|
case TK_OCT:
|
||||||
|
case TK_BIN:{
|
||||||
|
snprintf(sqlInfo.msg, tListLen(sqlInfo.msg), "unsupported token: \"%s\"", t0.z);
|
||||||
|
sqlInfo.valid = false;
|
||||||
|
goto abort_parse;
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
Parse(pParser, t0.type, t0, &sqlInfo);
|
Parse(pParser, t0.type, t0, &sqlInfo);
|
||||||
if (sqlInfo.valid == false) {
|
if (sqlInfo.valid == false) {
|
||||||
|
|
|
@ -48,7 +48,7 @@ tMemBucket *createUnsignedDataBucket(int32_t start, int32_t end, int32_t type) {
|
||||||
uint64_t k = i;
|
uint64_t k = i;
|
||||||
int32_t ret = tMemBucketPut(pBucket, &k, 1);
|
int32_t ret = tMemBucketPut(pBucket, &k, 1);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
printf("value out of range:%f", k);
|
printf("value out of range:%" PRId64, k);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,7 +245,7 @@ void unsignedDataTest() {
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
TEST(testCase, percentileTest) {
|
TEST(testCase, percentileTest) {
|
||||||
// qsortTest();
|
// qsortTest();
|
||||||
intDataTest();
|
intDataTest();
|
||||||
bigintDataTest();
|
bigintDataTest();
|
||||||
doubleDataTest();
|
doubleDataTest();
|
||||||
|
|
|
@ -227,10 +227,10 @@ TEST(testCase, db_table_name) {
|
||||||
EXPECT_EQ(testValidateName(t60_1), TSDB_CODE_TSC_INVALID_SQL);
|
EXPECT_EQ(testValidateName(t60_1), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t61[] = "' ABC '";
|
char t61[] = "' ABC '";
|
||||||
EXPECT_EQ(testValidateName(t61), TSDB_CODE_SUCCESS);
|
EXPECT_EQ(testValidateName(t61), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t61_1[] = "' ABC '";
|
char t61_1[] = "' ABC '";
|
||||||
EXPECT_EQ(testValidateName(t61_1), TSDB_CODE_SUCCESS);
|
EXPECT_EQ(testValidateName(t61_1), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t62[] = " ABC . def ";
|
char t62[] = " ABC . def ";
|
||||||
EXPECT_EQ(testValidateName(t62), TSDB_CODE_TSC_INVALID_SQL);
|
EXPECT_EQ(testValidateName(t62), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
@ -249,13 +249,13 @@ TEST(testCase, db_table_name) {
|
||||||
EXPECT_EQ(testValidateName(t65), TSDB_CODE_TSC_INVALID_SQL);
|
EXPECT_EQ(testValidateName(t65), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t66[] = "' ABC '.' DEF '";
|
char t66[] = "' ABC '.' DEF '";
|
||||||
EXPECT_EQ(testValidateName(t66), TSDB_CODE_SUCCESS);
|
EXPECT_EQ(testValidateName(t66), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t67[] = "abc . ' DEF '";
|
char t67[] = "abc . ' DEF '";
|
||||||
EXPECT_EQ(testValidateName(t67), TSDB_CODE_TSC_INVALID_SQL);
|
EXPECT_EQ(testValidateName(t67), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t68[] = "' abc '.' DEF '";
|
char t68[] = "' abc '.' DEF '";
|
||||||
EXPECT_EQ(testValidateName(t68), TSDB_CODE_SUCCESS);
|
EXPECT_EQ(testValidateName(t68), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
// do not use key words
|
// do not use key words
|
||||||
char t69[] = "table.'DEF'";
|
char t69[] = "table.'DEF'";
|
||||||
|
@ -265,7 +265,7 @@ TEST(testCase, db_table_name) {
|
||||||
EXPECT_EQ(testValidateName(t70), TSDB_CODE_TSC_INVALID_SQL);
|
EXPECT_EQ(testValidateName(t70), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t71[] = "'_abXYZ1234 '.' deFF '";
|
char t71[] = "'_abXYZ1234 '.' deFF '";
|
||||||
EXPECT_EQ(testValidateName(t71), TSDB_CODE_SUCCESS);
|
EXPECT_EQ(testValidateName(t71), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
||||||
char t72[] = "'_abDEF&^%1234'.' DIef'";
|
char t72[] = "'_abDEF&^%1234'.' DIef'";
|
||||||
EXPECT_EQ(testValidateName(t72), TSDB_CODE_TSC_INVALID_SQL);
|
EXPECT_EQ(testValidateName(t72), TSDB_CODE_TSC_INVALID_SQL);
|
||||||
|
|
|
@ -1281,7 +1281,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) {
|
||||||
SRpcConn *pConn = rpcSetupConnToServer(pContext);
|
SRpcConn *pConn = rpcSetupConnToServer(pContext);
|
||||||
if (pConn == NULL) {
|
if (pConn == NULL) {
|
||||||
pContext->code = terrno;
|
pContext->code = terrno;
|
||||||
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
|
taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ typedef struct {
|
||||||
TSKEY keyLast;
|
TSKEY keyLast;
|
||||||
int64_t numOfRows;
|
int64_t numOfRows;
|
||||||
SSkipList* pData;
|
SSkipList* pData;
|
||||||
|
T_REF_DECLARE()
|
||||||
} STableData;
|
} STableData;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -76,7 +77,7 @@ typedef struct {
|
||||||
|
|
||||||
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
||||||
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
||||||
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem);
|
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem, SArray* pATable);
|
||||||
void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem);
|
void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem);
|
||||||
void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes);
|
void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes);
|
||||||
int tsdbAsyncCommit(STsdbRepo* pRepo);
|
int tsdbAsyncCommit(STsdbRepo* pRepo);
|
||||||
|
|
|
@ -597,7 +597,7 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) {
|
||||||
// Get the data in row
|
// Get the data in row
|
||||||
ASSERT(pTable->lastRow == NULL);
|
ASSERT(pTable->lastRow == NULL);
|
||||||
STSchema *pSchema = tsdbGetTableSchema(pTable);
|
STSchema *pSchema = tsdbGetTableSchema(pTable);
|
||||||
pTable->lastRow = taosTMalloc(schemaTLen(pSchema));
|
pTable->lastRow = taosTMalloc(dataRowMaxBytesFromSchema(pSchema));
|
||||||
if (pTable->lastRow == NULL) {
|
if (pTable->lastRow == NULL) {
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
tsdbDestroyReadH(&readh);
|
tsdbDestroyReadH(&readh);
|
||||||
|
|
|
@ -124,17 +124,66 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
|
int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem, SArray *pATable) {
|
||||||
|
SMemTable *tmem;
|
||||||
|
|
||||||
|
// Get snap object
|
||||||
if (tsdbLockRepo(pRepo) < 0) return -1;
|
if (tsdbLockRepo(pRepo) < 0) return -1;
|
||||||
|
|
||||||
*pMem = pRepo->mem;
|
tmem = pRepo->mem;
|
||||||
*pIMem = pRepo->imem;
|
*pIMem = pRepo->imem;
|
||||||
tsdbRefMemTable(pRepo, *pMem);
|
tsdbRefMemTable(pRepo, tmem);
|
||||||
tsdbRefMemTable(pRepo, *pIMem);
|
tsdbRefMemTable(pRepo, *pIMem);
|
||||||
|
|
||||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||||
|
|
||||||
if (*pMem != NULL) taosRLockLatch(&((*pMem)->latch));
|
// Copy mem objects and ref needed STableData
|
||||||
|
if (tmem) {
|
||||||
|
taosRLockLatch(&(tmem->latch));
|
||||||
|
|
||||||
|
*pMem = (SMemTable *)calloc(1, sizeof(**pMem));
|
||||||
|
if (*pMem == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
taosRUnLockLatch(&(tmem->latch));
|
||||||
|
tsdbUnRefMemTable(pRepo, tmem);
|
||||||
|
tsdbUnRefMemTable(pRepo, *pIMem);
|
||||||
|
*pMem = NULL;
|
||||||
|
*pIMem = NULL;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
(*pMem)->tData = (STableData **)calloc(tmem->maxTables, sizeof(STableData *));
|
||||||
|
if ((*pMem)->tData == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
taosRUnLockLatch(&(tmem->latch));
|
||||||
|
free(*pMem);
|
||||||
|
tsdbUnRefMemTable(pRepo, tmem);
|
||||||
|
tsdbUnRefMemTable(pRepo, *pIMem);
|
||||||
|
*pMem = NULL;
|
||||||
|
*pIMem = NULL;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
(*pMem)->keyFirst = tmem->keyFirst;
|
||||||
|
(*pMem)->keyLast = tmem->keyLast;
|
||||||
|
(*pMem)->numOfRows = tmem->numOfRows;
|
||||||
|
(*pMem)->maxTables = tmem->maxTables;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < taosArrayGetSize(pATable); i++) {
|
||||||
|
STable * pTable = *(STable **)taosArrayGet(pATable, i);
|
||||||
|
int32_t tid = TABLE_TID(pTable);
|
||||||
|
STableData *pTableData = (tid < tmem->maxTables) ? tmem->tData[tid] : NULL;
|
||||||
|
|
||||||
|
if ((pTableData == NULL) || (TABLE_UID(pTable) != pTableData->uid)) continue;
|
||||||
|
|
||||||
|
(*pMem)->tData[tid] = tmem->tData[tid];
|
||||||
|
T_REF_INC(tmem->tData[tid]);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosRUnLockLatch(&(tmem->latch));
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbUnRefMemTable(pRepo, tmem);
|
||||||
|
|
||||||
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
|
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -144,8 +193,14 @@ void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem)
|
||||||
tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem);
|
tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem);
|
||||||
|
|
||||||
if (pMem != NULL) {
|
if (pMem != NULL) {
|
||||||
taosRUnLockLatch(&(pMem->latch));
|
for (size_t i = 0; i < pMem->maxTables; i++) {
|
||||||
tsdbUnRefMemTable(pRepo, pMem);
|
STableData *pTableData = pMem->tData[i];
|
||||||
|
if (pTableData) {
|
||||||
|
tsdbFreeTableData(pTableData);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
free(pMem->tData);
|
||||||
|
free(pMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pIMem != NULL) {
|
if (pIMem != NULL) {
|
||||||
|
@ -436,7 +491,7 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
|
||||||
STableData *pTableData = (STableData *)calloc(1, sizeof(*pTableData));
|
STableData *pTableData = (STableData *)calloc(1, sizeof(*pTableData));
|
||||||
if (pTableData == NULL) {
|
if (pTableData == NULL) {
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
goto _err;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTableData->uid = TABLE_UID(pTable);
|
pTableData->uid = TABLE_UID(pTable);
|
||||||
|
@ -449,21 +504,23 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
|
||||||
tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey);
|
tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey);
|
||||||
if (pTableData->pData == NULL) {
|
if (pTableData->pData == NULL) {
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
goto _err;
|
free(pTableData);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pTableData;
|
T_REF_INC(pTableData);
|
||||||
|
|
||||||
_err:
|
return pTableData;
|
||||||
tsdbFreeTableData(pTableData);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tsdbFreeTableData(STableData *pTableData) {
|
static void tsdbFreeTableData(STableData *pTableData) {
|
||||||
if (pTableData) {
|
if (pTableData) {
|
||||||
|
int32_t ref = T_REF_DEC(pTableData);
|
||||||
|
if (ref == 0) {
|
||||||
tSkipListDestroy(pTableData->pData);
|
tSkipListDestroy(pTableData->pData);
|
||||||
free(pTableData);
|
free(pTableData);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); }
|
static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); }
|
||||||
|
|
|
@ -187,13 +187,15 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
|
||||||
return pLocalIdList;
|
return pLocalIdList;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) {
|
static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle, SArray* psTable) {
|
||||||
assert(pQueryHandle != NULL && pQueryHandle->pMemRef != NULL);
|
assert(pQueryHandle != NULL && pQueryHandle->pMemRef != NULL);
|
||||||
|
|
||||||
SMemRef* pMemRef = pQueryHandle->pMemRef;
|
SMemRef* pMemRef = pQueryHandle->pMemRef;
|
||||||
if (pQueryHandle->pMemRef->ref++ == 0) {
|
if (pQueryHandle->pMemRef->ref++ == 0) {
|
||||||
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem));
|
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem), psTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(psTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) {
|
static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) {
|
||||||
|
@ -242,7 +244,7 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
|
||||||
return rows;
|
return rows;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STableGroupInfo* pGroupList, STsdbMeta* pMeta) {
|
static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STableGroupInfo* pGroupList, STsdbMeta* pMeta, SArray** psTable) {
|
||||||
size_t sizeOfGroup = taosArrayGetSize(pGroupList->pGroupList);
|
size_t sizeOfGroup = taosArrayGetSize(pGroupList->pGroupList);
|
||||||
assert(sizeOfGroup >= 1 && pMeta != NULL);
|
assert(sizeOfGroup >= 1 && pMeta != NULL);
|
||||||
|
|
||||||
|
@ -252,6 +254,12 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SArray* pTable = taosArrayInit(4, sizeof(STable*));
|
||||||
|
if (pTable == NULL) {
|
||||||
|
taosArrayDestroy(pTableCheckInfo);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// todo apply the lastkey of table check to avoid to load header file
|
// todo apply the lastkey of table check to avoid to load header file
|
||||||
for (int32_t i = 0; i < sizeOfGroup; ++i) {
|
for (int32_t i = 0; i < sizeOfGroup; ++i) {
|
||||||
SArray* group = *(SArray**) taosArrayGet(pGroupList->pGroupList, i);
|
SArray* group = *(SArray**) taosArrayGet(pGroupList->pGroupList, i);
|
||||||
|
@ -284,24 +292,40 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArraySort(pTableCheckInfo, tsdbCheckInfoCompar);
|
taosArraySort(pTableCheckInfo, tsdbCheckInfoCompar);
|
||||||
|
|
||||||
|
size_t gsize = taosArrayGetSize(pTableCheckInfo);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < gsize; ++i) {
|
||||||
|
STableCheckInfo* pInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, i);
|
||||||
|
|
||||||
|
taosArrayPush(pTable, &pInfo->pTableObj);
|
||||||
|
}
|
||||||
|
|
||||||
|
*psTable = pTable;
|
||||||
|
|
||||||
return pTableCheckInfo;
|
return pTableCheckInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey) {
|
static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey, SArray** psTable) {
|
||||||
size_t si = taosArrayGetSize(pTableCheckInfo);
|
size_t si = taosArrayGetSize(pTableCheckInfo);
|
||||||
SArray* pNew = taosArrayInit(si, sizeof(STableCheckInfo));
|
SArray* pNew = taosArrayInit(si, sizeof(STableCheckInfo));
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SArray* pTable = taosArrayInit(si, sizeof(STable*));
|
||||||
|
|
||||||
for (int32_t j = 0; j < si; ++j) {
|
for (int32_t j = 0; j < si; ++j) {
|
||||||
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, j);
|
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, j);
|
||||||
STableCheckInfo info = { .lastKey = skey, .pTableObj = pCheckInfo->pTableObj};
|
STableCheckInfo info = { .lastKey = skey, .pTableObj = pCheckInfo->pTableObj};
|
||||||
|
|
||||||
info.tableId = pCheckInfo->tableId;
|
info.tableId = pCheckInfo->tableId;
|
||||||
taosArrayPush(pNew, &info);
|
taosArrayPush(pNew, &info);
|
||||||
|
taosArrayPush(pTable, &pCheckInfo->pTableObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*psTable = pTable;
|
||||||
|
|
||||||
// it is ordered already, no need to sort again.
|
// it is ordered already, no need to sort again.
|
||||||
taosArraySort(pNew, tsdbCheckInfoCompar);
|
taosArraySort(pNew, tsdbCheckInfoCompar);
|
||||||
return pNew;
|
return pNew;
|
||||||
|
@ -332,7 +356,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbMayTakeMemSnapshot(pQueryHandle);
|
//tsdbMayTakeMemSnapshot(pQueryHandle);
|
||||||
assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL);
|
assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL);
|
||||||
|
|
||||||
if (ASCENDING_TRAVERSE(pCond->order)) {
|
if (ASCENDING_TRAVERSE(pCond->order)) {
|
||||||
|
@ -393,14 +417,18 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
|
||||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||||
assert(pMeta != NULL);
|
assert(pMeta != NULL);
|
||||||
|
|
||||||
|
SArray* psTable = NULL;
|
||||||
|
|
||||||
// todo apply the lastkey of table check to avoid to load header file
|
// todo apply the lastkey of table check to avoid to load header file
|
||||||
pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta);
|
pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &psTable);
|
||||||
if (pQueryHandle->pTableCheckInfo == NULL) {
|
if (pQueryHandle->pTableCheckInfo == NULL) {
|
||||||
tsdbCleanupQueryHandle(pQueryHandle);
|
tsdbCleanupQueryHandle(pQueryHandle);
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tsdbMayTakeMemSnapshot(pQueryHandle, psTable);
|
||||||
|
|
||||||
tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo);
|
tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo);
|
||||||
return (TsdbQueryHandleT) pQueryHandle;
|
return (TsdbQueryHandleT) pQueryHandle;
|
||||||
}
|
}
|
||||||
|
@ -2337,12 +2365,18 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
|
||||||
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pMemRef);
|
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pMemRef);
|
||||||
|
|
||||||
tfree(cond.colList);
|
tfree(cond.colList);
|
||||||
pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey);
|
|
||||||
|
SArray* psTable = NULL;
|
||||||
|
|
||||||
|
pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey, &psTable);
|
||||||
if (pSecQueryHandle->pTableCheckInfo == NULL) {
|
if (pSecQueryHandle->pTableCheckInfo == NULL) {
|
||||||
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
tsdbMayTakeMemSnapshot(pSecQueryHandle, psTable);
|
||||||
|
|
||||||
if (!tsdbNextDataBlock((void*)pSecQueryHandle)) {
|
if (!tsdbNextDataBlock((void*)pSecQueryHandle)) {
|
||||||
// no result in current query, free the corresponding result rows structure
|
// no result in current query, free the corresponding result rows structure
|
||||||
if (type == TSDB_PREV_ROW) {
|
if (type == TSDB_PREV_ROW) {
|
||||||
|
|
|
@ -44,6 +44,7 @@ enum {
|
||||||
TAOS_CFG_VTYPE_INT8,
|
TAOS_CFG_VTYPE_INT8,
|
||||||
TAOS_CFG_VTYPE_INT16,
|
TAOS_CFG_VTYPE_INT16,
|
||||||
TAOS_CFG_VTYPE_INT32,
|
TAOS_CFG_VTYPE_INT32,
|
||||||
|
TAOS_CFG_VTYPE_UINT16,
|
||||||
TAOS_CFG_VTYPE_FLOAT,
|
TAOS_CFG_VTYPE_FLOAT,
|
||||||
TAOS_CFG_VTYPE_STRING,
|
TAOS_CFG_VTYPE_STRING,
|
||||||
TAOS_CFG_VTYPE_IPSTR,
|
TAOS_CFG_VTYPE_IPSTR,
|
||||||
|
|
|
@ -392,8 +392,8 @@ __compar_fn_t getKeyComparFunc(int32_t keyType) {
|
||||||
int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
|
int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2));
|
case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2));
|
||||||
case TSDB_DATA_TYPE_DOUBLE: DEFAULT_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
|
case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
|
||||||
case TSDB_DATA_TYPE_FLOAT: DEFAULT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
|
case TSDB_DATA_TYPE_FLOAT: DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
|
||||||
case TSDB_DATA_TYPE_BIGINT: DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2));
|
case TSDB_DATA_TYPE_BIGINT: DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2));
|
||||||
case TSDB_DATA_TYPE_SMALLINT: DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2));
|
case TSDB_DATA_TYPE_SMALLINT: DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2));
|
||||||
case TSDB_DATA_TYPE_TINYINT:
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
|
|
|
@ -95,6 +95,23 @@ static void taosReadInt16Config(SGlobalCfg *cfg, char *input_value) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) {
|
||||||
|
int32_t value = atoi(input_value);
|
||||||
|
uint16_t *option = (uint16_t *)cfg->ptr;
|
||||||
|
if (value < cfg->minValue || value > cfg->maxValue) {
|
||||||
|
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d",
|
||||||
|
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
|
||||||
|
} else {
|
||||||
|
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
|
||||||
|
*option = (uint16_t)value;
|
||||||
|
cfg->cfgStatus = TAOS_CFG_CSTATUS_FILE;
|
||||||
|
} else {
|
||||||
|
uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value,
|
||||||
|
tsCfgStatusStr[cfg->cfgStatus], *option);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) {
|
static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) {
|
||||||
int32_t value = atoi(input_value);
|
int32_t value = atoi(input_value);
|
||||||
int8_t *option = (int8_t *)cfg->ptr;
|
int8_t *option = (int8_t *)cfg->ptr;
|
||||||
|
@ -239,6 +256,9 @@ static void taosReadConfigOption(const char *option, char *value, char *value2,
|
||||||
case TAOS_CFG_VTYPE_INT32:
|
case TAOS_CFG_VTYPE_INT32:
|
||||||
taosReadInt32Config(cfg, value);
|
taosReadInt32Config(cfg, value);
|
||||||
break;
|
break;
|
||||||
|
case TAOS_CFG_VTYPE_UINT16:
|
||||||
|
taosReadUInt16Config(cfg, value);
|
||||||
|
break;
|
||||||
case TAOS_CFG_VTYPE_FLOAT:
|
case TAOS_CFG_VTYPE_FLOAT:
|
||||||
taosReadFloatConfig(cfg, value);
|
taosReadFloatConfig(cfg, value);
|
||||||
break;
|
break;
|
||||||
|
@ -422,6 +442,9 @@ void taosPrintGlobalCfg() {
|
||||||
case TAOS_CFG_VTYPE_INT32:
|
case TAOS_CFG_VTYPE_INT32:
|
||||||
uInfo(" %s:%s%d%s", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
uInfo(" %s:%s%d%s", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||||
break;
|
break;
|
||||||
|
case TAOS_CFG_VTYPE_UINT16:
|
||||||
|
uInfo(" %s:%s%d%s", cfg->option, blank, *((uint16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||||
|
break;
|
||||||
case TAOS_CFG_VTYPE_FLOAT:
|
case TAOS_CFG_VTYPE_FLOAT:
|
||||||
uInfo(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
uInfo(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||||
break;
|
break;
|
||||||
|
@ -459,6 +482,9 @@ static void taosDumpCfg(SGlobalCfg *cfg) {
|
||||||
case TAOS_CFG_VTYPE_INT32:
|
case TAOS_CFG_VTYPE_INT32:
|
||||||
printf(" %s:%s%d%s\n", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
printf(" %s:%s%d%s\n", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||||
break;
|
break;
|
||||||
|
case TAOS_CFG_VTYPE_UINT16:
|
||||||
|
printf(" %s:%s%d%s\n", cfg->option, blank, *((uint16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||||
|
break;
|
||||||
case TAOS_CFG_VTYPE_FLOAT:
|
case TAOS_CFG_VTYPE_FLOAT:
|
||||||
printf(" %s:%s%f%s\n", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
printf(" %s:%s%f%s\n", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -55,9 +55,15 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cd ${WKC}/tests
|
cd ${WKC}/tests
|
||||||
./test-all.sh b1
|
./test-all.sh b1
|
||||||
|
date'''
|
||||||
|
sh '''
|
||||||
cd ${WKC}/tests
|
cd ${WKC}/tests
|
||||||
./test-all.sh full jdbc
|
./test-all.sh full jdbc
|
||||||
date'''
|
date'''
|
||||||
|
sh '''
|
||||||
|
cd ${WKC}/tests
|
||||||
|
./test-all.sh full unit
|
||||||
|
date'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,9 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.18</version>
|
<version>2.0.20</version>
|
||||||
|
<!-- <scope>system</scope>-->
|
||||||
|
<!-- <systemPath>${project.basedir}/src/main/resources/taos-jdbcdriver-2.0.20-dist.jar</systemPath>-->
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -6,6 +6,7 @@ import org.springframework.beans.factory.annotation.Autowired;
|
||||||
import org.springframework.web.bind.annotation.*;
|
import org.springframework.web.bind.annotation.*;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
@RequestMapping("/weather")
|
@RequestMapping("/weather")
|
||||||
@RestController
|
@RestController
|
||||||
|
@ -20,7 +21,7 @@ public class WeatherController {
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
@GetMapping("/init")
|
@GetMapping("/init")
|
||||||
public boolean init() {
|
public int init() {
|
||||||
return weatherService.init();
|
return weatherService.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,19 +45,23 @@ public class WeatherController {
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
@PostMapping("/{temperature}/{humidity}")
|
@PostMapping("/{temperature}/{humidity}")
|
||||||
public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) {
|
public int saveWeather(@PathVariable float temperature, @PathVariable int humidity) {
|
||||||
return weatherService.save(temperature, humidity);
|
return weatherService.save(temperature, humidity);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@GetMapping("/count")
|
||||||
* upload multi weather info
|
public int count() {
|
||||||
*
|
return weatherService.count();
|
||||||
* @param weatherList
|
}
|
||||||
* @return
|
|
||||||
*/
|
@GetMapping("/subTables")
|
||||||
@PostMapping("/batch")
|
public List<String> getSubTables() {
|
||||||
public int batchSaveWeather(@RequestBody List<Weather> weatherList) {
|
return weatherService.getSubTables();
|
||||||
return weatherService.save(weatherList);
|
}
|
||||||
|
|
||||||
|
@GetMapping("/avg")
|
||||||
|
public List<Weather> avg() {
|
||||||
|
return weatherService.avg();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,16 +4,26 @@ import com.taosdata.example.springbootdemo.domain.Weather;
|
||||||
import org.apache.ibatis.annotations.Param;
|
import org.apache.ibatis.annotations.Param;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
public interface WeatherMapper {
|
public interface WeatherMapper {
|
||||||
|
|
||||||
int insert(Weather weather);
|
void dropDB();
|
||||||
|
|
||||||
int batchInsert(List<Weather> weatherList);
|
|
||||||
|
|
||||||
List<Weather> select(@Param("limit") Long limit, @Param("offset")Long offset);
|
|
||||||
|
|
||||||
void createDB();
|
void createDB();
|
||||||
|
|
||||||
void createTable();
|
void createSuperTable();
|
||||||
|
|
||||||
|
void createTable(Weather weather);
|
||||||
|
|
||||||
|
List<Weather> select(@Param("limit") Long limit, @Param("offset") Long offset);
|
||||||
|
|
||||||
|
int insert(Weather weather);
|
||||||
|
|
||||||
|
int count();
|
||||||
|
|
||||||
|
List<String> getSubTables();
|
||||||
|
|
||||||
|
List<Weather> avg();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,28 +4,29 @@
|
||||||
<mapper namespace="com.taosdata.example.springbootdemo.dao.WeatherMapper">
|
<mapper namespace="com.taosdata.example.springbootdemo.dao.WeatherMapper">
|
||||||
|
|
||||||
<resultMap id="BaseResultMap" type="com.taosdata.example.springbootdemo.domain.Weather">
|
<resultMap id="BaseResultMap" type="com.taosdata.example.springbootdemo.domain.Weather">
|
||||||
<id column="ts" jdbcType="TIMESTAMP" property="ts" />
|
<id column="ts" jdbcType="TIMESTAMP" property="ts"/>
|
||||||
<result column="temperature" jdbcType="INTEGER" property="temperature" />
|
<result column="temperature" jdbcType="FLOAT" property="temperature"/>
|
||||||
<result column="humidity" jdbcType="FLOAT" property="humidity" />
|
<result column="humidity" jdbcType="FLOAT" property="humidity"/>
|
||||||
</resultMap>
|
</resultMap>
|
||||||
|
|
||||||
<update id="createDB" >
|
<update id="dropDB">
|
||||||
create database if not exists test;
|
drop database if exists test
|
||||||
</update>
|
</update>
|
||||||
|
|
||||||
<update id="createTable" >
|
<update id="createDB">
|
||||||
create table if not exists test.weather(ts timestamp, temperature int, humidity float);
|
create database if not exists test
|
||||||
</update>
|
</update>
|
||||||
|
|
||||||
<sql id="Base_Column_List">
|
<update id="createSuperTable">
|
||||||
ts, temperature, humidity
|
create table if not exists test.weather(ts timestamp, temperature float, humidity float) tags(location nchar(64), groupId int)
|
||||||
</sql>
|
</update>
|
||||||
|
|
||||||
|
<update id="createTable" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
|
||||||
|
create table if not exists test.t#{groupId} using test.weather tags(#{location}, #{groupId})
|
||||||
|
</update>
|
||||||
|
|
||||||
<select id="select" resultMap="BaseResultMap">
|
<select id="select" resultMap="BaseResultMap">
|
||||||
select
|
select * from test.weather order by ts desc
|
||||||
<include refid="Base_Column_List" />
|
|
||||||
from test.weather
|
|
||||||
order by ts desc
|
|
||||||
<if test="limit != null">
|
<if test="limit != null">
|
||||||
limit #{limit,jdbcType=BIGINT}
|
limit #{limit,jdbcType=BIGINT}
|
||||||
</if>
|
</if>
|
||||||
|
@ -34,16 +35,26 @@
|
||||||
</if>
|
</if>
|
||||||
</select>
|
</select>
|
||||||
|
|
||||||
<insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather" >
|
<insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
|
||||||
insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT})
|
insert into test.t#{groupId} (ts, temperature, humidity) values (#{ts}, ${temperature}, ${humidity})
|
||||||
</insert>
|
</insert>
|
||||||
|
|
||||||
<insert id="batchInsert" parameterType="java.util.List" >
|
<select id="getSubTables" resultType="String">
|
||||||
insert into test.weather (ts, temperature, humidity) values
|
select tbname from test.weather
|
||||||
<foreach separator=" " collection="list" item="weather" index="index" >
|
</select>
|
||||||
(now + #{index}a, #{weather.temperature}, #{weather.humidity})
|
|
||||||
</foreach>
|
|
||||||
</insert>
|
|
||||||
|
|
||||||
|
<select id="count" resultType="int">
|
||||||
|
select count(*) from test.weather
|
||||||
|
</select>
|
||||||
|
|
||||||
|
<resultMap id="avgResultSet" type="com.taosdata.example.springbootdemo.domain.Weather">
|
||||||
|
<id column="ts" jdbcType="TIMESTAMP" property="ts" />
|
||||||
|
<result column="avg(temperature)" jdbcType="FLOAT" property="temperature" />
|
||||||
|
<result column="avg(humidity)" jdbcType="FLOAT" property="humidity" />
|
||||||
|
</resultMap>
|
||||||
|
|
||||||
|
<select id="avg" resultMap="avgResultSet">
|
||||||
|
select avg(temperature), avg(humidity)from test.weather interval(1m)
|
||||||
|
</select>
|
||||||
|
|
||||||
</mapper>
|
</mapper>
|
|
@ -6,12 +6,21 @@ import java.sql.Timestamp;
|
||||||
|
|
||||||
public class Weather {
|
public class Weather {
|
||||||
|
|
||||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
|
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8")
|
||||||
private Timestamp ts;
|
private Timestamp ts;
|
||||||
|
private float temperature;
|
||||||
private int temperature;
|
|
||||||
|
|
||||||
private float humidity;
|
private float humidity;
|
||||||
|
private String location;
|
||||||
|
private int groupId;
|
||||||
|
|
||||||
|
public Weather() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public Weather(Timestamp ts, float temperature, float humidity) {
|
||||||
|
this.ts = ts;
|
||||||
|
this.temperature = temperature;
|
||||||
|
this.humidity = humidity;
|
||||||
|
}
|
||||||
|
|
||||||
public Timestamp getTs() {
|
public Timestamp getTs() {
|
||||||
return ts;
|
return ts;
|
||||||
|
@ -21,11 +30,11 @@ public class Weather {
|
||||||
this.ts = ts;
|
this.ts = ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getTemperature() {
|
public float getTemperature() {
|
||||||
return temperature;
|
return temperature;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTemperature(int temperature) {
|
public void setTemperature(float temperature) {
|
||||||
this.temperature = temperature;
|
this.temperature = temperature;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,4 +45,20 @@ public class Weather {
|
||||||
public void setHumidity(float humidity) {
|
public void setHumidity(float humidity) {
|
||||||
this.humidity = humidity;
|
this.humidity = humidity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getLocation() {
|
||||||
|
return location;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLocation(String location) {
|
||||||
|
this.location = location;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getGroupId() {
|
||||||
|
return groupId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setGroupId(int groupId) {
|
||||||
|
this.groupId = groupId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,25 +5,41 @@ import com.taosdata.example.springbootdemo.domain.Weather;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
import org.springframework.stereotype.Service;
|
import org.springframework.stereotype.Service;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
@Service
|
@Service
|
||||||
public class WeatherService {
|
public class WeatherService {
|
||||||
|
|
||||||
@Autowired
|
@Autowired
|
||||||
private WeatherMapper weatherMapper;
|
private WeatherMapper weatherMapper;
|
||||||
|
private Random random = new Random(System.currentTimeMillis());
|
||||||
|
private String[] locations = {"北京", "上海", "广州", "深圳", "天津"};
|
||||||
|
|
||||||
public boolean init() {
|
public int init() {
|
||||||
|
weatherMapper.dropDB();
|
||||||
weatherMapper.createDB();
|
weatherMapper.createDB();
|
||||||
weatherMapper.createTable();
|
weatherMapper.createSuperTable();
|
||||||
return true;
|
long ts = System.currentTimeMillis();
|
||||||
|
long thirtySec = 1000 * 30;
|
||||||
|
int count = 0;
|
||||||
|
for (int i = 0; i < 20; i++) {
|
||||||
|
Weather weather = new Weather(new Timestamp(ts + (thirtySec * i)), 30 * random.nextFloat(), random.nextInt(100));
|
||||||
|
weather.setLocation(locations[random.nextInt(locations.length)]);
|
||||||
|
weather.setGroupId(i % locations.length);
|
||||||
|
weatherMapper.createTable(weather);
|
||||||
|
count += weatherMapper.insert(weather);
|
||||||
|
}
|
||||||
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Weather> query(Long limit, Long offset) {
|
public List<Weather> query(Long limit, Long offset) {
|
||||||
return weatherMapper.select(limit, offset);
|
return weatherMapper.select(limit, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int save(int temperature, float humidity) {
|
public int save(float temperature, int humidity) {
|
||||||
Weather weather = new Weather();
|
Weather weather = new Weather();
|
||||||
weather.setTemperature(temperature);
|
weather.setTemperature(temperature);
|
||||||
weather.setHumidity(humidity);
|
weather.setHumidity(humidity);
|
||||||
|
@ -31,8 +47,15 @@ public class WeatherService {
|
||||||
return weatherMapper.insert(weather);
|
return weatherMapper.insert(weather);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int save(List<Weather> weatherList) {
|
public int count() {
|
||||||
return weatherMapper.batchInsert(weatherList);
|
return weatherMapper.count();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public List<String> getSubTables() {
|
||||||
|
return weatherMapper.getSubTables();
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Weather> avg() {
|
||||||
|
return weatherMapper.avg();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
# datasource config - JDBC-JNI
|
# datasource config - JDBC-JNI
|
||||||
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
|
#spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
|
||||||
spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
|
#spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
|
||||||
spring.datasource.username=root
|
#spring.datasource.username=root
|
||||||
spring.datasource.password=taosdata
|
#spring.datasource.password=taosdata
|
||||||
|
|
||||||
# datasource config - JDBC-RESTful
|
# datasource config - JDBC-RESTful
|
||||||
#spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
||||||
#spring.datasource.url=jdbc:TAOS-RS://master:6041/test?user=root&password=taosdata
|
spring.datasource.url=jdbc:TAOS-RS://master:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
|
||||||
|
spring.datasource.username=root
|
||||||
|
spring.datasource.password=taosdata
|
||||||
|
|
||||||
spring.datasource.druid.initial-size=5
|
spring.datasource.druid.initial-size=5
|
||||||
spring.datasource.druid.min-idle=5
|
spring.datasource.druid.min-idle=5
|
||||||
|
|
|
@ -4,7 +4,7 @@ import com.taosdata.taosdemo.components.DataSourceFactory;
|
||||||
import com.taosdata.taosdemo.components.JdbcTaosdemoConfig;
|
import com.taosdata.taosdemo.components.JdbcTaosdemoConfig;
|
||||||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||||
import com.taosdata.taosdemo.service.DatabaseService;
|
import com.taosdata.taosdemo.service.DatabaseService;
|
||||||
import com.taosdata.taosdemo.service.QueryService;
|
import com.taosdata.taosdemo.service.SqlExecuteTask;
|
||||||
import com.taosdata.taosdemo.service.SubTableService;
|
import com.taosdata.taosdemo.service.SubTableService;
|
||||||
import com.taosdata.taosdemo.service.SuperTableService;
|
import com.taosdata.taosdemo.service.SuperTableService;
|
||||||
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
|
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
|
||||||
|
@ -32,6 +32,17 @@ public class TaosDemoApplication {
|
||||||
}
|
}
|
||||||
// 初始化
|
// 初始化
|
||||||
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password);
|
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password);
|
||||||
|
if (config.executeSql != null && !config.executeSql.isEmpty() && !config.executeSql.replaceAll("\\s", "").isEmpty()) {
|
||||||
|
Thread task = new Thread(new SqlExecuteTask(dataSource, config.executeSql));
|
||||||
|
task.start();
|
||||||
|
try {
|
||||||
|
task.join();
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
final DatabaseService databaseService = new DatabaseService(dataSource);
|
final DatabaseService databaseService = new DatabaseService(dataSource);
|
||||||
final SuperTableService superTableService = new SuperTableService(dataSource);
|
final SuperTableService superTableService = new SuperTableService(dataSource);
|
||||||
final SubTableService subTableService = new SubTableService(dataSource);
|
final SubTableService subTableService = new SubTableService(dataSource);
|
||||||
|
@ -96,7 +107,6 @@ public class TaosDemoApplication {
|
||||||
// 查询
|
// 查询
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**********************************************************************************/
|
/**********************************************************************************/
|
||||||
// 删除表
|
// 删除表
|
||||||
if (config.dropTable) {
|
if (config.dropTable) {
|
||||||
|
|
|
@ -42,7 +42,7 @@ public final class JdbcTaosdemoConfig {
|
||||||
public int rate = 10;
|
public int rate = 10;
|
||||||
public long range = 1000l;
|
public long range = 1000l;
|
||||||
// select task
|
// select task
|
||||||
|
public String executeSql;
|
||||||
// drop task
|
// drop task
|
||||||
public boolean dropTable = false;
|
public boolean dropTable = false;
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ public final class JdbcTaosdemoConfig {
|
||||||
System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10");
|
System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10");
|
||||||
System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms");
|
System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms");
|
||||||
// query task
|
// query task
|
||||||
// System.out.println("-sqlFile The select sql file");
|
System.out.println("-executeSql execute a specific sql.");
|
||||||
// drop task
|
// drop task
|
||||||
System.out.println("-dropTable Drop data before quit. Default is false");
|
System.out.println("-dropTable Drop data before quit. Default is false");
|
||||||
System.out.println("--help Give this help list");
|
System.out.println("--help Give this help list");
|
||||||
|
@ -207,6 +207,9 @@ public final class JdbcTaosdemoConfig {
|
||||||
range = Integer.parseInt(args[++i]);
|
range = Integer.parseInt(args[++i]);
|
||||||
}
|
}
|
||||||
// select task
|
// select task
|
||||||
|
if ("-executeSql".equals(args[i]) && i < args.length - 1) {
|
||||||
|
executeSql = args[++i];
|
||||||
|
}
|
||||||
|
|
||||||
// drop task
|
// drop task
|
||||||
if ("-dropTable".equals(args[i]) && i < args.length - 1) {
|
if ("-dropTable".equals(args[i]) && i < args.length - 1) {
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
package com.taosdata.taosdemo.service;
|
||||||
|
|
||||||
|
import com.taosdata.taosdemo.utils.Printer;
|
||||||
|
|
||||||
|
import javax.sql.DataSource;
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.sql.ResultSet;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.sql.Statement;
|
||||||
|
|
||||||
|
public class SqlExecuteTask implements Runnable {
|
||||||
|
private final DataSource dataSource;
|
||||||
|
private final String sql;
|
||||||
|
|
||||||
|
public SqlExecuteTask(DataSource dataSource, String sql) {
|
||||||
|
this.dataSource = dataSource;
|
||||||
|
this.sql = sql;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) {
|
||||||
|
long start = System.currentTimeMillis();
|
||||||
|
boolean execute = stmt.execute(sql);
|
||||||
|
long end = System.currentTimeMillis();
|
||||||
|
if (execute) {
|
||||||
|
ResultSet rs = stmt.getResultSet();
|
||||||
|
Printer.printResult(rs);
|
||||||
|
} else {
|
||||||
|
Printer.printSql(sql, true, (end - start));
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package com.taosdata.taosdemo.utils;
|
||||||
|
|
||||||
|
import java.sql.ResultSet;
|
||||||
|
import java.sql.ResultSetMetaData;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
|
||||||
|
public class Printer {
|
||||||
|
|
||||||
|
public static void printResult(ResultSet resultSet) throws SQLException {
|
||||||
|
ResultSetMetaData metaData = resultSet.getMetaData();
|
||||||
|
while (resultSet.next()) {
|
||||||
|
for (int i = 1; i <= metaData.getColumnCount(); i++) {
|
||||||
|
String columnLabel = metaData.getColumnLabel(i);
|
||||||
|
String value = resultSet.getString(i);
|
||||||
|
System.out.printf("%s: %s\t", columnLabel, value);
|
||||||
|
}
|
||||||
|
System.out.println();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void printSql(String sql, boolean succeed, long cost) {
|
||||||
|
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Printer() {
|
||||||
|
}
|
||||||
|
}
|
|
@ -467,7 +467,6 @@ int main(int argc, char *argv[]) {
|
||||||
const char* passwd = "taosdata";
|
const char* passwd = "taosdata";
|
||||||
|
|
||||||
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
|
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
|
||||||
taos_init();
|
|
||||||
|
|
||||||
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
|
|
|
@ -99,8 +99,6 @@ int main(int argc, char *argv[])
|
||||||
tableList = (STable *)malloc(size);
|
tableList = (STable *)malloc(size);
|
||||||
memset(tableList, 0, size);
|
memset(tableList, 0, size);
|
||||||
|
|
||||||
taos_init();
|
|
||||||
|
|
||||||
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
||||||
if (taos == NULL)
|
if (taos == NULL)
|
||||||
taos_error(taos);
|
taos_error(taos);
|
||||||
|
|
|
@ -61,11 +61,6 @@ int main(int argc, char *argv[]) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// init TAOS
|
|
||||||
if (taos_init()) {
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
||||||
|
@ -97,15 +92,14 @@ void Test(TAOS *taos, char *qstr, int index) {
|
||||||
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
|
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
|
||||||
// }
|
// }
|
||||||
TAOS_RES *result1 = taos_query(taos, qstr);
|
TAOS_RES *result1 = taos_query(taos, qstr);
|
||||||
if (result1) {
|
if (result1 == NULL || taos_errno(result1) != 0) {
|
||||||
printf("insert row: %i\n", i);
|
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
|
||||||
} else {
|
|
||||||
printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/);
|
|
||||||
taos_free_result(result1);
|
taos_free_result(result1);
|
||||||
exit(1);
|
exit(1);
|
||||||
|
} else {
|
||||||
|
printf("insert row: %i\n", i);
|
||||||
}
|
}
|
||||||
taos_free_result(result1);
|
taos_free_result(result1);
|
||||||
|
|
||||||
}
|
}
|
||||||
printf("success to insert rows, total %d rows\n", i);
|
printf("success to insert rows, total %d rows\n", i);
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ TARGET=exe
|
||||||
LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
|
LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
|
||||||
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \
|
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \
|
||||||
-Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \
|
-Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \
|
||||||
-msse4.2 -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99
|
-Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99
|
||||||
|
|
||||||
all: $(TARGET)
|
all: $(TARGET)
|
||||||
|
|
||||||
|
|
|
@ -22,12 +22,6 @@ int main(int argc, char *argv[])
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// init TAOS
|
|
||||||
if (taos_init()) {
|
|
||||||
printf("failed to init taos\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
|
printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
|
||||||
|
|
|
@ -54,12 +54,6 @@ int main(int argc, char *argv[])
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// init TAOS
|
|
||||||
if (taos_init()) {
|
|
||||||
printf("failed to init taos\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
strcpy(db_name, argv[2]);
|
strcpy(db_name, argv[2]);
|
||||||
strcpy(tbl_name, argv[3]);
|
strcpy(tbl_name, argv[3]);
|
||||||
|
|
||||||
|
|
|
@ -216,12 +216,6 @@ int main(int argc, char *argv[]) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// init TAOS
|
|
||||||
if (taos_init()) {
|
|
||||||
printf("failed to init taos\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
|
printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
|
||||||
|
|
|
@ -19,6 +19,10 @@ class TDTestCase:
|
||||||
"double",
|
"double",
|
||||||
"smallint",
|
"smallint",
|
||||||
"tinyint",
|
"tinyint",
|
||||||
|
"int unsigned",
|
||||||
|
"bigint unsigned",
|
||||||
|
"smallint unsigned",
|
||||||
|
"tinyint unsigned",
|
||||||
"binary(10)",
|
"binary(10)",
|
||||||
"nchar(10)",
|
"nchar(10)",
|
||||||
"timestamp"]
|
"timestamp"]
|
||||||
|
|
|
@ -19,6 +19,10 @@ class TDTestCase:
|
||||||
"double",
|
"double",
|
||||||
"smallint",
|
"smallint",
|
||||||
"tinyint",
|
"tinyint",
|
||||||
|
"int unsigned",
|
||||||
|
"bigint unsigned",
|
||||||
|
"smallint unsigned",
|
||||||
|
"tinyint unsigned",
|
||||||
"binary(10)",
|
"binary(10)",
|
||||||
"nchar(10)",
|
"nchar(10)",
|
||||||
"timestamp"]
|
"timestamp"]
|
||||||
|
|
|
@ -66,6 +66,14 @@ class TDTestCase:
|
||||||
"alter table dt add column tbcol8 nchar(20)")
|
"alter table dt add column tbcol8 nchar(20)")
|
||||||
tdSql.execute(
|
tdSql.execute(
|
||||||
"alter table dt add column tbcol9 binary(20)")
|
"alter table dt add column tbcol9 binary(20)")
|
||||||
|
tdSql.execute(
|
||||||
|
"alter table dt add column tbcol10 tinyint unsigned")
|
||||||
|
tdSql.execute(
|
||||||
|
"alter table dt add column tbcol11 int unsigned")
|
||||||
|
tdSql.execute(
|
||||||
|
"alter table dt add column tbcol12 smallint unsigned")
|
||||||
|
tdSql.execute(
|
||||||
|
"alter table dt add column tbcol13 bigint unsigned")
|
||||||
|
|
||||||
# restart taosd
|
# restart taosd
|
||||||
tdDnodes.forcestop(1)
|
tdDnodes.forcestop(1)
|
||||||
|
|
|
@ -11,15 +11,9 @@
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
sys.path.insert(0, os.getcwd())
|
|
||||||
from fabric import Connection
|
from fabric import Connection
|
||||||
from util.sql import *
|
|
||||||
from util.log import *
|
|
||||||
import taos
|
|
||||||
import random
|
import random
|
||||||
import threading
|
import time
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
class Node:
|
class Node:
|
||||||
|
@ -76,6 +70,19 @@ class Node:
|
||||||
print("remove taosd error for node %d " % self.index)
|
print("remove taosd error for node %d " % self.index)
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
|
|
||||||
|
def forceStopOneTaosd(self):
|
||||||
|
try:
|
||||||
|
self.conn.run("kill -9 $(ps -ax|grep taosd|awk '{print $1}')")
|
||||||
|
except Exception as e:
|
||||||
|
print("kill taosd error on node%d " % self.index)
|
||||||
|
|
||||||
|
def startOneTaosd(self):
|
||||||
|
try:
|
||||||
|
self.conn.run("nohup taosd -c /etc/taos/ > /dev/null 2>&1 &")
|
||||||
|
except Exception as e:
|
||||||
|
print("start taosd error on node%d " % self.index)
|
||||||
|
logging.exception(e)
|
||||||
|
|
||||||
def installTaosd(self, packagePath):
|
def installTaosd(self, packagePath):
|
||||||
self.conn.put(packagePath, self.homeDir)
|
self.conn.put(packagePath, self.homeDir)
|
||||||
self.conn.cd(self.homeDir)
|
self.conn.cd(self.homeDir)
|
||||||
|
@ -122,100 +129,51 @@ class Node:
|
||||||
|
|
||||||
class Nodes:
|
class Nodes:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.node1 = Node(1, 'root', '52.151.60.239', 'node1', 'r', '/root/')
|
self.tdnodes = []
|
||||||
self.node2 = Node(2, 'root', '52.183.32.246', 'node1', 'r', '/root/')
|
self.tdnodes.append(Node(0, 'root', '52.143.103.7', 'node1', 'a', '/root/'))
|
||||||
self.node3 = Node(3, 'root', '51.143.46.79', 'node1', 'r', '/root/')
|
self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
|
||||||
self.node4 = Node(4, 'root', '52.183.2.76', 'node1', 'r', '/root/')
|
self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
|
||||||
self.node5 = Node(5, 'root', '13.66.225.87', 'node1', 'r', '/root/')
|
self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
|
||||||
|
self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
|
||||||
|
|
||||||
|
def stopOneNode(self, index):
|
||||||
|
self.tdnodes[index].forceStopOneTaosd()
|
||||||
|
|
||||||
|
def startOneNode(self, index):
|
||||||
|
self.tdnodes[index].startOneTaosd()
|
||||||
|
|
||||||
def stopAllTaosd(self):
|
def stopAllTaosd(self):
|
||||||
self.node1.stopTaosd()
|
for i in range(len(self.tdnodes)):
|
||||||
self.node2.stopTaosd()
|
self.tdnodes[i].stopTaosd()
|
||||||
self.node3.stopTaosd()
|
|
||||||
|
|
||||||
def startAllTaosd(self):
|
def startAllTaosd(self):
|
||||||
self.node1.startTaosd()
|
for i in range(len(self.tdnodes)):
|
||||||
self.node2.startTaosd()
|
self.tdnodes[i].startTaosd()
|
||||||
self.node3.startTaosd()
|
|
||||||
|
|
||||||
def restartAllTaosd(self):
|
def restartAllTaosd(self):
|
||||||
self.node1.restartTaosd()
|
for i in range(len(self.tdnodes)):
|
||||||
self.node2.restartTaosd()
|
self.tdnodes[i].restartTaosd()
|
||||||
self.node3.restartTaosd()
|
|
||||||
|
|
||||||
def addConfigs(self, configKey, configValue):
|
def addConfigs(self, configKey, configValue):
|
||||||
self.node1.configTaosd(configKey, configValue)
|
for i in range(len(self.tdnodes)):
|
||||||
self.node2.configTaosd(configKey, configValue)
|
self.tdnodes[i].configTaosd(configKey, configValue)
|
||||||
self.node3.configTaosd(configKey, configValue)
|
|
||||||
|
|
||||||
def removeConfigs(self, configKey, configValue):
|
def removeConfigs(self, configKey, configValue):
|
||||||
self.node1.removeTaosConfig(configKey, configValue)
|
for i in range(len(self.tdnodes)):
|
||||||
self.node2.removeTaosConfig(configKey, configValue)
|
self.tdnodes[i].removeTaosConfig(configKey, configValue)
|
||||||
self.node3.removeTaosConfig(configKey, configValue)
|
|
||||||
|
|
||||||
def removeAllDataFiles(self):
|
def removeAllDataFiles(self):
|
||||||
self.node1.removeData()
|
for i in range(len(self.tdnodes)):
|
||||||
self.node2.removeData()
|
self.tdnodes[i].removeData()
|
||||||
self.node3.removeData()
|
|
||||||
|
|
||||||
class ClusterTest:
|
# kill taosd randomly every 10 mins
|
||||||
def __init__(self, hostName):
|
nodes = Nodes()
|
||||||
self.host = hostName
|
loop = 0
|
||||||
self.user = "root"
|
while True:
|
||||||
self.password = "taosdata"
|
loop = loop + 1
|
||||||
self.config = "/etc/taos"
|
index = random.randint(0, 4)
|
||||||
self.dbName = "mytest"
|
print("loop: %d, kill taosd on node%d" %(loop, index))
|
||||||
self.stbName = "meters"
|
nodes.stopOneNode(index)
|
||||||
self.numberOfThreads = 20
|
time.sleep(60)
|
||||||
self.numberOfTables = 10000
|
nodes.startOneNode(index)
|
||||||
self.numberOfRecords = 1000
|
time.sleep(600)
|
||||||
self.tbPrefix = "t"
|
|
||||||
self.ts = 1538548685000
|
|
||||||
self.repeat = 1
|
|
||||||
|
|
||||||
def connectDB(self):
|
|
||||||
self.conn = taos.connect(
|
|
||||||
host=self.host,
|
|
||||||
user=self.user,
|
|
||||||
password=self.password,
|
|
||||||
config=self.config)
|
|
||||||
|
|
||||||
def createSTable(self, replica):
|
|
||||||
cursor = self.conn.cursor()
|
|
||||||
tdLog.info("drop database if exists %s" % self.dbName)
|
|
||||||
cursor.execute("drop database if exists %s" % self.dbName)
|
|
||||||
tdLog.info("create database %s replica %d" % (self.dbName, replica))
|
|
||||||
cursor.execute("create database %s replica %d" % (self.dbName, replica))
|
|
||||||
tdLog.info("use %s" % self.dbName)
|
|
||||||
cursor.execute("use %s" % self.dbName)
|
|
||||||
tdLog.info("drop table if exists %s" % self.stbName)
|
|
||||||
cursor.execute("drop table if exists %s" % self.stbName)
|
|
||||||
tdLog.info("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName)
|
|
||||||
cursor.execute("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName)
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def insertData(self, threadID):
|
|
||||||
print("Thread %d: starting" % threadID)
|
|
||||||
cursor = self.conn.cursor()
|
|
||||||
tablesPerThread = int(self.numberOfTables / self.numberOfThreads)
|
|
||||||
baseTableID = tablesPerThread * threadID
|
|
||||||
for i in range (tablesPerThread):
|
|
||||||
cursor.execute("create table %s%d using %s tags(%d)" % (self.tbPrefix, baseTableID + i, self.stbName, baseTableID + i))
|
|
||||||
query = "insert into %s%d values" % (self.tbPrefix, baseTableID + i)
|
|
||||||
base = self.numberOfRecords * i
|
|
||||||
for j in range(self.numberOfRecords):
|
|
||||||
query += "(%d, %f, %d, %d)" % (self.ts + base + j, random.random(), random.randint(210, 230), random.randint(0, 10))
|
|
||||||
cursor.execute(query)
|
|
||||||
cursor.close()
|
|
||||||
print("Thread %d: finishing" % threadID)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
threads = []
|
|
||||||
tdLog.info("Inserting data")
|
|
||||||
for i in range(self.numberOfThreads):
|
|
||||||
thread = threading.Thread(target=self.insertData, args=(i,))
|
|
||||||
threads.append(thread)
|
|
||||||
thread.start()
|
|
||||||
|
|
||||||
for i in range(self.numberOfThreads):
|
|
||||||
threads[i].join()
|
|
|
@ -350,18 +350,27 @@ class ConcurrentInquiry:
|
||||||
cl.execute("create database if not exists %s;" %self.dbname)
|
cl.execute("create database if not exists %s;" %self.dbname)
|
||||||
cl.execute("use %s" % self.dbname)
|
cl.execute("use %s" % self.dbname)
|
||||||
for k in range(stableNum):
|
for k in range(stableNum):
|
||||||
sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20)) \
|
sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \
|
||||||
tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20))" % (self.stb_prefix+str(k))
|
tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20), t11 int unsigned , t12 smallint unsigned , t13 tinyint unsigned , t14 bigint unsigned)" % (self.stb_prefix+str(k))
|
||||||
cl.execute(sql)
|
cl.execute(sql)
|
||||||
for j in range(subtableNum):
|
for j in range(subtableNum):
|
||||||
sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s')" % \
|
if j % 100 == 0:
|
||||||
(self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j))
|
sql = "create table %s using %s tags(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" % \
|
||||||
|
(self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k))
|
||||||
|
else:
|
||||||
|
sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" % \
|
||||||
|
(self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j), j%43, j%23 , j%17 , j%3167)
|
||||||
print(sql)
|
print(sql)
|
||||||
cl.execute(sql)
|
cl.execute(sql)
|
||||||
for i in range(insertRows):
|
for i in range(insertRows):
|
||||||
|
if i % 100 == 0 :
|
||||||
ret = cl.execute(
|
ret = cl.execute(
|
||||||
"insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s')" %
|
"insert into %s values (%d , NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" %
|
||||||
(self.subtb_prefix+str(k)+'_'+str(j),t0+i,i%100,i/2.0,i%41,i%51,i%53,i*1.0,i%2,'taos'+str(i),'涛思'+str(i)))
|
(self.subtb_prefix+str(k)+'_'+str(j), t0+i))
|
||||||
|
else:
|
||||||
|
ret = cl.execute(
|
||||||
|
"insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" %
|
||||||
|
(self.subtb_prefix+str(k)+'_'+str(j), t0+i, i%100, i/2.0, i%41, i%51, i%53, i*1.0, i%2,'taos'+str(i),'涛思'+str(i), i%43, i%23 , i%17 , i%3167))
|
||||||
cl.close()
|
cl.close()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
|
|
|
@ -3,12 +3,16 @@ ulimit -c unlimited
|
||||||
|
|
||||||
python3 ./test.py -f insert/basic.py
|
python3 ./test.py -f insert/basic.py
|
||||||
python3 ./test.py -f insert/int.py
|
python3 ./test.py -f insert/int.py
|
||||||
|
python3 ./test.py -f insert/unsignedInt.py
|
||||||
python3 ./test.py -f insert/float.py
|
python3 ./test.py -f insert/float.py
|
||||||
python3 ./test.py -f insert/bigint.py
|
python3 ./test.py -f insert/bigint.py
|
||||||
|
python3 ./test.py -f insert/unsignedBigint.py
|
||||||
python3 ./test.py -f insert/bool.py
|
python3 ./test.py -f insert/bool.py
|
||||||
python3 ./test.py -f insert/double.py
|
python3 ./test.py -f insert/double.py
|
||||||
python3 ./test.py -f insert/smallint.py
|
python3 ./test.py -f insert/smallint.py
|
||||||
|
python3 ./test.py -f insert/unsignedSmallint.py
|
||||||
python3 ./test.py -f insert/tinyint.py
|
python3 ./test.py -f insert/tinyint.py
|
||||||
|
python3 ./test.py -f insert/unsignedTinyint.py
|
||||||
python3 ./test.py -f insert/date.py
|
python3 ./test.py -f insert/date.py
|
||||||
python3 ./test.py -f insert/binary.py
|
python3 ./test.py -f insert/binary.py
|
||||||
python3 ./test.py -f insert/nchar.py
|
python3 ./test.py -f insert/nchar.py
|
||||||
|
@ -151,6 +155,7 @@ python3 ./test.py -f query/filterCombo.py
|
||||||
python3 ./test.py -f query/queryNormal.py
|
python3 ./test.py -f query/queryNormal.py
|
||||||
python3 ./test.py -f query/queryError.py
|
python3 ./test.py -f query/queryError.py
|
||||||
python3 ./test.py -f query/filterAllIntTypes.py
|
python3 ./test.py -f query/filterAllIntTypes.py
|
||||||
|
python3 ./test.py -f query/filterAllUnsignedIntTypes.py
|
||||||
python3 ./test.py -f query/filterFloatAndDouble.py
|
python3 ./test.py -f query/filterFloatAndDouble.py
|
||||||
python3 ./test.py -f query/filterOtherTypes.py
|
python3 ./test.py -f query/filterOtherTypes.py
|
||||||
python3 ./test.py -f query/querySort.py
|
python3 ./test.py -f query/querySort.py
|
||||||
|
|
|
@ -34,11 +34,11 @@ class TDTestCase:
|
||||||
floatData = []
|
floatData = []
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
intData.append(i + 1)
|
intData.append(i + 1)
|
||||||
floatData.append(i + 0.1)
|
floatData.append(i + 0.1)
|
||||||
|
|
||||||
|
@ -64,6 +64,14 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 0, np.average(floatData))
|
tdSql.checkData(0, 0, np.average(floatData))
|
||||||
tdSql.query("select avg(col6) from test")
|
tdSql.query("select avg(col6) from test")
|
||||||
tdSql.checkData(0, 0, np.average(floatData))
|
tdSql.checkData(0, 0, np.average(floatData))
|
||||||
|
tdSql.query("select avg(col11) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
tdSql.query("select avg(col12) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
tdSql.query("select avg(col13) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
tdSql.query("select avg(col14) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -34,11 +34,11 @@ class TDTestCase:
|
||||||
floatData = []
|
floatData = []
|
||||||
|
|
||||||
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
# col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
# col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
#tdSql.execute("create table test1 using test tags('beijing')")
|
#tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
#tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
#tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
intData.append(i + 1)
|
intData.append(i + 1)
|
||||||
floatData.append(i + 0.1)
|
floatData.append(i + 0.1)
|
||||||
|
|
||||||
|
@ -64,6 +64,14 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 0, np.average(floatData))
|
tdSql.checkData(0, 0, np.average(floatData))
|
||||||
tdSql.query("select avg(col6) from test")
|
tdSql.query("select avg(col6) from test")
|
||||||
tdSql.checkData(0, 0, np.average(floatData))
|
tdSql.checkData(0, 0, np.average(floatData))
|
||||||
|
tdSql.query("select avg(col11) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
tdSql.query("select avg(col12) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
tdSql.query("select avg(col13) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
tdSql.query("select avg(col14) from test")
|
||||||
|
tdSql.checkData(0, 0, np.average(intData))
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -31,11 +31,11 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
# bottom verifacation
|
# bottom verifacation
|
||||||
tdSql.error("select bottom(ts, 10) from test")
|
tdSql.error("select bottom(ts, 10) from test")
|
||||||
|
@ -85,6 +85,26 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 1, 0.1)
|
tdSql.checkData(0, 1, 0.1)
|
||||||
tdSql.checkData(1, 1, 1.1)
|
tdSql.checkData(1, 1, 1.1)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col11, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col12, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col13, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col14, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
#TD-2457 bottom + interval + order by
|
#TD-2457 bottom + interval + order by
|
||||||
tdSql.error('select top(col2,1) from test interval(1y) order by col2;')
|
tdSql.error('select top(col2,1) from test interval(1y) order by col2;')
|
||||||
|
|
||||||
|
|
|
@ -31,11 +31,11 @@ class TDTestCase:
|
||||||
tdSql.execute("use db")
|
tdSql.execute("use db")
|
||||||
|
|
||||||
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
# col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
# col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
#tdSql.execute("create table test1 using test tags('beijing')")
|
#tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
#for i in range(self.rowNum):
|
#for i in range(self.rowNum):
|
||||||
# tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
# tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
# bottom verifacation
|
# bottom verifacation
|
||||||
tdSql.error("select bottom(ts, 10) from test")
|
tdSql.error("select bottom(ts, 10) from test")
|
||||||
|
@ -75,6 +75,26 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 1, 1)
|
tdSql.checkData(0, 1, 1)
|
||||||
tdSql.checkData(1, 1, 2)
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col11, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col12, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col13, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select bottom(col14, 2) from test")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
|
||||||
tdSql.query("select bottom(col5, 2) from test")
|
tdSql.query("select bottom(col5, 2) from test")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
tdSql.checkData(0, 1, 0.1)
|
tdSql.checkData(0, 1, 0.1)
|
||||||
|
|
|
@ -31,11 +31,11 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
# Count verifacation
|
# Count verifacation
|
||||||
tdSql.query("select count(*) from test")
|
tdSql.query("select count(*) from test")
|
||||||
|
@ -62,11 +62,20 @@ class TDTestCase:
|
||||||
tdSql.query("select count(col9) from test")
|
tdSql.query("select count(col9) from test")
|
||||||
tdSql.checkData(0, 0, 10)
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select count(col11) from test")
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
tdSql.query("select count(col12) from test")
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
tdSql.query("select count(col13) from test")
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
tdSql.query("select count(col14) from test")
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
tdSql.execute("alter table test add column col10 int")
|
tdSql.execute("alter table test add column col10 int")
|
||||||
tdSql.query("select count(col10) from test")
|
tdSql.query("select count(col10) from test")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
tdSql.execute("insert into test1 values(now, 1, 2, 3, 4, 1.1, 2.2, false, 'test', 'test' 1)")
|
tdSql.execute("insert into test1 values(now, 1, 2, 3, 4, 1.1, 2.2, false, 'test', 'test' , 1, 1, 1, 1, 1)")
|
||||||
tdSql.query("select count(col10) from test")
|
tdSql.query("select count(col10) from test")
|
||||||
tdSql.checkData(0, 0, 1)
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
|
|
@ -31,11 +31,11 @@ class TDTestCase:
|
||||||
tdSql.execute("use db")
|
tdSql.execute("use db")
|
||||||
|
|
||||||
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
# col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
# col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
#tdSql.execute("create table test1 using test tags('beijing')")
|
#tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
#for i in range(self.rowNum):
|
#for i in range(self.rowNum):
|
||||||
# tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
# tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
# Count verifacation
|
# Count verifacation
|
||||||
tdSql.query("select count(*) from test")
|
tdSql.query("select count(*) from test")
|
||||||
|
@ -62,6 +62,15 @@ class TDTestCase:
|
||||||
tdSql.query("select count(col9) from test")
|
tdSql.query("select count(col9) from test")
|
||||||
tdSql.checkData(0, 0, 11)
|
tdSql.checkData(0, 0, 11)
|
||||||
|
|
||||||
|
tdSql.query("select count(col11) from test")
|
||||||
|
tdSql.checkData(0, 0, 11)
|
||||||
|
tdSql.query("select count(col12) from test")
|
||||||
|
tdSql.checkData(0, 0, 11)
|
||||||
|
tdSql.query("select count(col13) from test")
|
||||||
|
tdSql.checkData(0, 0, 11)
|
||||||
|
tdSql.query("select count(col14) from test")
|
||||||
|
tdSql.checkData(0, 0, 11)
|
||||||
|
|
||||||
#tdSql.execute("alter table test add column col10 int")
|
#tdSql.execute("alter table test add column col10 int")
|
||||||
#tdSql.query("select count(col10) from test")
|
#tdSql.query("select count(col10) from test")
|
||||||
#tdSql.checkRows(0)
|
#tdSql.checkRows(0)
|
||||||
|
|
|
@ -31,9 +31,9 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
|
tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1))
|
||||||
|
|
||||||
# diff verifacation
|
# diff verifacation
|
||||||
tdSql.query("select diff(col1) from test1")
|
tdSql.query("select diff(col1) from test1")
|
||||||
|
@ -55,8 +55,8 @@ class TDTestCase:
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
tdSql.error("select diff(ts) from test")
|
tdSql.error("select diff(ts) from test")
|
||||||
tdSql.error("select diff(ts) from test1")
|
tdSql.error("select diff(ts) from test1")
|
||||||
|
@ -72,6 +72,15 @@ class TDTestCase:
|
||||||
tdSql.error("select diff(col8) from test1")
|
tdSql.error("select diff(col8) from test1")
|
||||||
tdSql.error("select diff(col9) from test")
|
tdSql.error("select diff(col9) from test")
|
||||||
tdSql.error("select diff(col9) from test1")
|
tdSql.error("select diff(col9) from test1")
|
||||||
|
tdSql.error("select diff(col11) from test1")
|
||||||
|
tdSql.error("select diff(col12) from test1")
|
||||||
|
tdSql.error("select diff(col13) from test1")
|
||||||
|
tdSql.error("select diff(col14) from test1")
|
||||||
|
tdSql.error("select diff(col11) from test")
|
||||||
|
tdSql.error("select diff(col12) from test")
|
||||||
|
tdSql.error("select diff(col13) from test")
|
||||||
|
tdSql.error("select diff(col14) from test")
|
||||||
|
|
||||||
|
|
||||||
tdSql.query("select diff(col1) from test1")
|
tdSql.query("select diff(col1) from test1")
|
||||||
tdSql.checkRows(10)
|
tdSql.checkRows(10)
|
||||||
|
|
|
@ -31,7 +31,7 @@ class TDTestCase:
|
||||||
tdSql.execute("use db")
|
tdSql.execute("use db")
|
||||||
|
|
||||||
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
#tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
# col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
# col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
#tdSql.execute("create table test1 using test tags('beijing')")
|
#tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
#tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
|
#tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
|
||||||
|
|
||||||
|
@ -55,8 +55,8 @@ class TDTestCase:
|
||||||
#tdSql.checkRows(0)
|
#tdSql.checkRows(0)
|
||||||
|
|
||||||
#for i in range(self.rowNum):
|
#for i in range(self.rowNum):
|
||||||
# tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
# tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
# % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
tdSql.error("select diff(ts) from test")
|
tdSql.error("select diff(ts) from test")
|
||||||
tdSql.error("select diff(ts) from test1")
|
tdSql.error("select diff(ts) from test1")
|
||||||
|
@ -71,7 +71,14 @@ class TDTestCase:
|
||||||
tdSql.error("select diff(col8) from test")
|
tdSql.error("select diff(col8) from test")
|
||||||
tdSql.error("select diff(col8) from test1")
|
tdSql.error("select diff(col8) from test1")
|
||||||
tdSql.error("select diff(col9) from test")
|
tdSql.error("select diff(col9) from test")
|
||||||
tdSql.error("select diff(col9) from test1")
|
tdSql.error("select diff(col11) from test1")
|
||||||
|
tdSql.error("select diff(col12) from test1")
|
||||||
|
tdSql.error("select diff(col13) from test1")
|
||||||
|
tdSql.error("select diff(col14) from test1")
|
||||||
|
tdSql.error("select diff(col11) from test")
|
||||||
|
tdSql.error("select diff(col12) from test")
|
||||||
|
tdSql.error("select diff(col13) from test")
|
||||||
|
tdSql.error("select diff(col14) from test")
|
||||||
|
|
||||||
tdSql.query("select diff(col1) from test1")
|
tdSql.query("select diff(col1) from test1")
|
||||||
tdSql.checkRows(10)
|
tdSql.checkRows(10)
|
||||||
|
|
|
@ -31,7 +31,7 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
||||||
|
|
||||||
|
@ -52,6 +52,18 @@ class TDTestCase:
|
||||||
tdSql.query("select first(col4) from test1")
|
tdSql.query("select first(col4) from test1")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select first(col11) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select first(col12) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select first(col13) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select first(col14) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
tdSql.query("select first(col5) from test1")
|
tdSql.query("select first(col5) from test1")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
@ -68,8 +80,8 @@ class TDTestCase:
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
tdSql.query("select first(*) from test1")
|
tdSql.query("select first(*) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
|
@ -91,6 +103,22 @@ class TDTestCase:
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 1)
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select first(col11) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select first(col12) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select first(col13) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select first(col14) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
tdSql.query("select first(col5) from test1")
|
tdSql.query("select first(col5) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 0.1)
|
tdSql.checkData(0, 0, 0.1)
|
||||||
|
|
|
@ -31,7 +31,7 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
||||||
|
|
||||||
|
@ -52,6 +52,18 @@ class TDTestCase:
|
||||||
tdSql.query("select last(col4) from test1")
|
tdSql.query("select last(col4) from test1")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select last(col11) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select last(col12) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select last(col13) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdSql.query("select last(col14) from test1")
|
||||||
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
tdSql.query("select last(col5) from test1")
|
tdSql.query("select last(col5) from test1")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
@ -68,8 +80,8 @@ class TDTestCase:
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
tdSql.query("select last(*) from test1")
|
tdSql.query("select last(*) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
|
@ -91,6 +103,22 @@ class TDTestCase:
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 10)
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last(col11) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last(col12) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last(col13) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last(col14) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
tdSql.query("select last(col5) from test1")
|
tdSql.query("select last(col5) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 9.1)
|
tdSql.checkData(0, 0, 9.1)
|
||||||
|
|
|
@ -31,7 +31,7 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
||||||
|
|
||||||
|
@ -56,6 +56,22 @@ class TDTestCase:
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, None)
|
tdSql.checkData(0, 0, None)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col11) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, None)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col12) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, None)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col13) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, None)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col14) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, None)
|
||||||
|
|
||||||
tdSql.query("select last_row(col5) from test1")
|
tdSql.query("select last_row(col5) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, None)
|
tdSql.checkData(0, 0, None)
|
||||||
|
@ -77,8 +93,8 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 0, None)
|
tdSql.checkData(0, 0, None)
|
||||||
|
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
tdSql.query("select last_row(*) from test1")
|
tdSql.query("select last_row(*) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
|
@ -100,6 +116,22 @@ class TDTestCase:
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 10)
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col11) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col12) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col13) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
|
tdSql.query("select last_row(col14) from test1")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
|
||||||
tdSql.query("select last_row(col5) from test1")
|
tdSql.query("select last_row(col5) from test1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 9.1)
|
tdSql.checkData(0, 0, 9.1)
|
||||||
|
|
|
@ -31,11 +31,11 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute("create table test1 using test tags('beijing')")
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
for i in range(self.rowNum):
|
for i in range(self.rowNum):
|
||||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
# leastsquares verifacation
|
# leastsquares verifacation
|
||||||
tdSql.error("select leastsquares(ts, 1, 1) from test1")
|
tdSql.error("select leastsquares(ts, 1, 1) from test1")
|
||||||
|
@ -48,6 +48,10 @@ class TDTestCase:
|
||||||
tdSql.error("select leastsquares(col7, 1, 1) from test1")
|
tdSql.error("select leastsquares(col7, 1, 1) from test1")
|
||||||
tdSql.error("select leastsquares(col8, 1, 1) from test1")
|
tdSql.error("select leastsquares(col8, 1, 1) from test1")
|
||||||
tdSql.error("select leastsquares(col9, 1, 1) from test1")
|
tdSql.error("select leastsquares(col9, 1, 1) from test1")
|
||||||
|
tdSql.error("select leastsquares(col11, 1, 1) from test")
|
||||||
|
tdSql.error("select leastsquares(col12, 1, 1) from test")
|
||||||
|
tdSql.error("select leastsquares(col13, 1, 1) from test")
|
||||||
|
tdSql.error("select leastsquares(col14, 1, 1) from test")
|
||||||
|
|
||||||
tdSql.query("select leastsquares(col1, 1, 1) from test1")
|
tdSql.query("select leastsquares(col1, 1, 1) from test1")
|
||||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||||
|
@ -61,6 +65,18 @@ class TDTestCase:
|
||||||
tdSql.query("select leastsquares(col4, 1, 1) from test1")
|
tdSql.query("select leastsquares(col4, 1, 1) from test1")
|
||||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||||
|
|
||||||
|
tdSql.query("select leastsquares(col11, 1, 1) from test1")
|
||||||
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||||
|
|
||||||
|
tdSql.query("select leastsquares(col12, 1, 1) from test1")
|
||||||
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||||
|
|
||||||
|
tdSql.query("select leastsquares(col13, 1, 1) from test1")
|
||||||
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||||
|
|
||||||
|
tdSql.query("select leastsquares(col14, 1, 1) from test1")
|
||||||
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||||
|
|
||||||
tdSql.query("select leastsquares(col5, 1, 1) from test1")
|
tdSql.query("select leastsquares(col5, 1, 1) from test1")
|
||||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}')
|
tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}')
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue