Merge branch 'develop' into feature/TD-5838

This commit is contained in:
wangmm0220 2021-08-16 19:00:00 +08:00
commit 848547426c
120 changed files with 25720 additions and 2142 deletions

View File

@ -15,7 +15,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -39,7 +39,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -66,7 +66,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -91,7 +91,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -116,7 +116,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -142,7 +142,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -168,7 +168,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -193,7 +193,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -218,7 +218,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -241,7 +241,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request

92
Jenkinsfile vendored
View File

@ -5,7 +5,7 @@ node {
git url: 'https://github.com/taosdata/TDengine.git'
}
def skipstage=0
def skipbuild=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
@ -33,8 +33,7 @@ def abort_previous(){
milestone(buildNumber)
}
def pre_test(){
sh'hostname'
sh '''
sudo rmtaos || echo "taosd has not installed"
'''
@ -52,7 +51,13 @@ def pre_test(){
git checkout master
'''
}
else {
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKC}
git checkout 2.0
'''
}
else{
sh '''
cd ${WKC}
git checkout develop
@ -75,7 +80,13 @@ def pre_test(){
git checkout master
'''
}
else {
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WK}
git checkout 2.0
'''
}
else{
sh '''
cd ${WK}
git checkout develop
@ -95,19 +106,17 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python
pip3 install ${WKC}/src/connector/python/
'''
return 1
}
pipeline {
agent none
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('pre_build'){
agent{label 'master'}
@ -123,19 +132,22 @@ pipeline {
rm -rf ${WORKSPACE}.tes
cp -r ${WORKSPACE} ${WORKSPACE}.tes
cd ${WORKSPACE}.tes
git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
git checkout master
git pull origin master
'''
}
else {
else if(env.CHANGE_TARGET == '2.0'){
sh '''
git checkout 2.0
'''
}
else{
sh '''
git checkout develop
git pull origin develop
'''
}
}
@ -145,30 +157,32 @@ pipeline {
'''
script{
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
skipbuild='2'
skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
println skipbuild
}
println env.skipstage
sh'''
rm -rf ${WORKSPACE}.tes
'''
}
}
stage('Parallel test stage') {
//only build pr
when {
allOf{
changeRequest()
expression {
env.skipstage != 0
expression{
return skipbuild.trim() == '2'
}
}
}
parallel {
stage('python_1_s1') {
agent{label 'p1'}
agent{label " slave1 || slave11 "}
steps {
pre_test()
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
@ -179,11 +193,11 @@ pipeline {
}
}
stage('python_2_s5') {
agent{label 'p2'}
agent{label " slave5 || slave15 "}
steps {
pre_test()
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
@ -193,9 +207,9 @@ pipeline {
}
}
stage('python_3_s6') {
agent{label 'p3'}
agent{label " slave6 || slave16 "}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
@ -206,9 +220,9 @@ pipeline {
}
}
stage('test_b1_s2') {
agent{label 'b1'}
agent{label " slave2 || slave12 "}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
@ -217,9 +231,8 @@ pipeline {
}
}
}
stage('test_crash_gen_s3') {
agent{label "b2"}
agent{label " slave3 || slave13 "}
steps {
pre_test()
@ -245,7 +258,7 @@ pipeline {
./handle_taosd_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
@ -253,12 +266,10 @@ pipeline {
date
'''
}
}
}
stage('test_valgrind_s4') {
agent{label "b3"}
agent{label " slave4 || slave14 "}
steps {
pre_test()
@ -269,7 +280,7 @@ pipeline {
./handle_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
@ -284,9 +295,9 @@ pipeline {
}
}
stage('test_b4_s7') {
agent{label 'b4'}
agent{label " slave7 || slave17 "}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
@ -303,9 +314,9 @@ pipeline {
}
}
stage('test_b5_s8') {
agent{label 'b5'}
agent{label " slave8 || slave18 "}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
@ -316,9 +327,9 @@ pipeline {
}
}
stage('test_b6_s9') {
agent{label 'b6'}
agent{label " slave9 || slave19 "}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
@ -329,9 +340,9 @@ pipeline {
}
}
stage('test_b7_s10') {
agent{label 'b7'}
agent{label " slave10 || slave20 "}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
@ -422,5 +433,4 @@ pipeline {
)
}
}
}

View File

@ -180,7 +180,7 @@ IF (TD_WINDOWS)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd5999 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()

View File

@ -34,12 +34,22 @@ ENDIF ()
#
# Set compiler options
SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
IF (TD_LINUX)
SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
ELSE ()
SET(COMMON_C_FLAGS "${COMMON_FLAGS} ")
ENDIF ()
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
IF (TD_WINDOWS)
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
ELSE ()
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
ENDIF ()
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")

View File

@ -6,7 +6,7 @@
TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行Command Line Interface, CLI工具 TAOS Shell 手动执行 SQL 即席查询Ad-Hoc Query。TDengine 支持如下查询功能:
- 单列、多列数据查询
- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 聚合结果的分组Group by、排序Order by、约束输出Limit/Offset
- 数值列及聚合结果的四则运算
- 时间戳对齐的连接查询Join Query: 隐式连接)操作

View File

@ -138,7 +138,7 @@ select * from meters where ts > now - 1d and current > 10;
订阅的`topic`实际上是它的名字因为订阅功能是在客户端API中实现的所以没必要保证它全局唯一但需要它在一台客户端机器上唯一。
如果名`topic`的订阅不存在,参数`restart`没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个`topic`时,`restart`就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果`restart`是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且`restart`是 **false****0**),用户程序就不会读到之前已经读取的数据了。
如果名`topic`的订阅不存在,参数`restart`没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个`topic`时,`restart`就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果`restart`是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且`restart`是 **false****0**),用户程序就不会读到之前已经读取的数据了。
`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。在同步模式下,如果前后两次调用`taos_consume`的时间间隔小于此时间,`taos_consume`会阻塞,直到间隔超过此时间。异步模式下,这个时间是两次调用回调函数的最小时间间隔。
@ -179,7 +179,8 @@ void print_result(TAOS_RES* res, int blockFetch) {
  } else {
    while ((row = taos_fetch_row(res))) {
      char temp[256];
      taos_print_row(temp, row, fields, num_fields);puts(temp);
      taos_print_row(temp, row, fields, num_fields);
      puts(temp);
      nRows++;
    }
  }
@ -211,14 +212,14 @@ taos_unsubscribe(tsub, keep);
则可以在示例代码所在目录执行以下命令来编译并启动示例程序:
```shell
```bash
$ make
$ ./subscribe -sql='select * from meters where current > 10;'
```
示例程序启动后,打开另一个终端窗口,启动 TDengine 的 shell 向 **D1001** 插入一条电流为 12A 的数据:
```shell
```sql
$ taos
> use test;
> insert into D1001 values(now, 12, 220, 1);
@ -313,7 +314,7 @@ public class SubscribeDemo {
运行示例程序,首先,它会消费符合查询条件的所有历史数据:
```shell
```bash
# java -jar subscribe.jar
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
@ -333,16 +334,16 @@ taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
因为这条数据的电流大于10A示例程序会将其消费
```shell
```
ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
```
## <a class="anchor" id="cache"></a>缓存Cache
TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式Least-Recent-UseLRU直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候将最早的数据批量写入磁盘。一般意义上来说对于物联网数据的使用用户最为关心最近产生的数据即当前状态。TDengine充分利用了这一特性将最近到达的当前状态数据保存在缓存中。
TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式Least-Recent-UsedLRU直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候将最早的数据批量写入磁盘。一般意义上来说对于物联网数据的使用用户最为关心最近产生的数据即当前状态。TDengine充分利用了这一特性将最近到达的当前状态数据保存在缓存中。
TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中可以更加快速地响应用户针对最近一条或一批数据的查询分析整体上提供更快的数据库查询响应能力。从这个意义上来说可通过设置合适的配置参数将TDengine作为数据缓存来使用而不需要再部署额外的缓存系统可有效地简化系统架构降低运维的成本。需要注意的是TDengine重启以后系统的缓存将被清空之前缓存的数据均会被批量写入磁盘缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中可以更加快速地响应用户针对最近一条或一批数据的查询分析整体上提供更快的数据库查询响应能力。从这个意义上来说可通过设置合适的配置参数将TDengine作为数据缓存来使用而不需要再部署额外的缓存系统可有效地简化系统架构降低运维的成本。需要注意的是TDengine重启以后系统的缓存将被清空之前缓存的数据均会被批量写入磁盘缓存的数据将不会像专门的key-value缓存系统再将之前缓存的数据重新加载到缓存中。
TDengine分配固定大小的内存空间作为缓存空间缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点virtual node创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。

View File

@ -1,6 +1,72 @@
# Java Connector
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
## 安装
Java连接器支持的系统有 Linux 64/Windows x64/Windows x86。
**安装前准备:**
- 已安装TDengine服务器端
- 已安装好TDengine应用驱动具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。
由于 TDengine 的应用驱动是使用C语言开发的使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client)Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
### 如何获取 TAOS-JDBCDriver
**maven仓库**
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
maven 项目中使用如下 pom.xml 配置即可:
```xml-dtd
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version>
</dependency>
```
**源码编译打包**
下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
### 示例程序
示例程序源码位于install_directory/examples/JDBC有如下目录
JDBCDemo JDBC示例源程序
JDBCConnectorChecker JDBC安装校验源程序及jar包
Springbootdemo springboot示例源程序
SpringJdbcTemplate SpringJDBC模板
### 安装验证
运行如下指令:
```Bash
cd {install_directory}/examples/JDBC/JDBCConnectorChecker
java -jar JDBCConnectorChecker.jar -host <fqdn>
```
验证通过将打印出成功信息。
## Java连接器的使用
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
@ -20,7 +86,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
## JDBC-JNI和JDBC-RESTful的对比
### JDBC-JNI和JDBC-RESTful的对比
<table >
<tr align="center"><th>对比项</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
@ -51,33 +117,34 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
注意:与 JNI 方式不同RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
## 如何获取 taos-jdbcdriver
### <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
### maven 仓库
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
### TDengine DataType 和 Java DataType
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
maven 项目中使用如下 pom.xml 配置即可:
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
## JDBC的使用说明
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
### 获取连接
@ -112,12 +179,12 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
@ -166,8 +233,7 @@ properties 中的配置参数如下:
#### 使用客户端配置文件建立连接
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示
1. 在 Java 应用中不指定 hostname 和 port
@ -214,7 +280,7 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可
例如:在 url 中指定了 password 为 taosdata在 Properties 中指定了 password 为 taosdemo那么JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
> 更多详细配置请参考[客户端配置](https://www.taosdata.com/cn/documentation/administrator/#client)
### 创建数据库和表
@ -242,8 +308,8 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
> now 为系统内部函数,默认为客户端所在计算机当前时间。
> `now + 1s` 代表客户端当前时间往后加 1 秒数字后面代表时间单位a(毫秒)s(秒)m(分)h(小时)d(天)w(周)n(月)y(年)。
### 查询数据
@ -464,7 +530,7 @@ conn.close();
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)
**Druid**
@ -505,13 +571,13 @@ public static void main(String[] args) throws Exception {
}
```
> 更多 druid 使用问题请查看[官方说明][6]
> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)
**注意事项**
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
如下所示,`select server_status()` 执行成功会返回 `1`
```shell
```sql
taos> select server_status();
server_status()|
================
@ -521,43 +587,10 @@ Query OK, 1 row(s) in set (0.000141s)
## 与框架使用
## 在框架中使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate)
* Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo)
@ -567,7 +600,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
**解决方法**Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@ -575,21 +608,5 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
**解决方法**:重新安装 64 位 JDK。
* 其它问题请参考 [Issues][7]
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#client
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF
* 其它问题请参考 [Issues](https://github.com/taosdata/TDengine/issues)

View File

@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
* 在没有安装TDengine服务端软件的系统中使用连接器除RESTful外访问 TDengine 数据库需要安装相应版本的客户端安装包来使应用驱动Linux系统中文件名为libtaos.soWindows系统中为taos.dll被安装在系统中否则会产生无法找到相应库文件的错误。
* 所有执行 SQL 语句的 API例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等以及其它语言中与它们对应的API每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 升级 TDengine 到 2.0.8.0 版本的用户,必须更新 JDBC。连接 TDengine 必须升级 taos-jdbcdriver 到 2.0.12 及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 无论选用何种编程语言的连接器2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接或基于线程建立连接池以避免连接内的“USE statement”状态量在线程之间相互干扰但连接的查询和写入操作都是线程安全的
## <a class="anchor" id="driver"></a>安装连接器驱动步骤
@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
**Linux**
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载<!-- REPLACE_OPEN_TO_ENTERPRISE__YOU_CAN_GET_CONNECTOR_DRIVER_FROM_TAOS -->**
* X64硬件环境TDengine-client-2.x.x.x-Linux-x64.tar.gz
@ -46,7 +46,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
`tar -xzvf TDengine-client-xxxxxxxxx.tar.gz`
其中xxxxxxx需要替换为实际版本的字符串。
其中xxxxxxxxx需要替换为实际版本的字符串。
**3. 执行安装脚本**
@ -68,7 +68,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
**Windows x64/x86**
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 **
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载<!-- REPLACE_OPEN_TO_ENTERPRISE__YOU_CAN_GET_CONNECTOR_DRIVER_WINDOWS_FROM_TAOS -->**
* X64硬件环境TDengine-client-2.X.X.X-Windows-x64.exe
@ -99,13 +99,13 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
**2卸载运行unins000.exe可卸载TDengine应用驱动。**
**安装验证**
### 安装验证
以上安装和配置完成后并确认TDengine服务已经正常启动运行此时可以执行taos客户端进行登录。
**Linux环境**
linux shell下直接执行 taos应该就能正常链接到tdegine服务进入到taos shell界面示例如下
Linux shell下直接执行 taos应该就能正常连接到TDegine服务进入到taos shell界面示例如下
```mysql
$ taos
@ -146,7 +146,10 @@ taos>
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** |
C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine头文件 _taos.h_(安装后,位于 _/usr/local/taos/include_
C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine头文件 *taos.h*里面列出了提供的API的函数原型。安装后taos.h位于
- Linux`/usr/local/taos/include`
- Windows`C:\TDengine\include`
```C
#include <taos.h>
@ -156,9 +159,22 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
* 在编译时需要链接TDengine动态库。Linux 为 *libtaos.so* ,安装后,位于 _/usr/local/taos/driver_。Windows为 taos.dll安装后位于 *C:\TDengine*
* 如未特别说明当API的返回值是整数时_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。
* 在 taoserror.h中有所有的错误码以及对应的原因描述。
### 示例程序
使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c 。
示例程序源码也可以在安装目录下的 examples/c 路径下找到:
**apitest.c、asyncdemo.c、demo.c、prepare.c、stream.c、subscribe.c**
该目录下有makefile在Linux环境下直接执行make就可以编译得到执行文件。
在一台机器上启动TDengine服务执行这些示例程序按照提示输入TDengine服务的FQDN就可以正常运行并打印出信息。
**提示:**在ARM环境下编译时请将makefile中的-msse4.2打开这个选项只有在x64/x86硬件平台上才能支持。
### 基础API
基础API用于完成创建数据库连接等工作为其它API的执行提供运行时环境。
@ -187,7 +203,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- user用户名
- pass密码
- db数据库名字如果用户没有提供也可以正常连接用户可以通过该连接创建新的数据库如果用户提供了数据库名字则说明该数据库用户已经创建好缺省使用该数据库
- port端口号
- portTDengine管理主节点的端口号
返回值为空表示失败。应用程序需要保存返回的参数以便后续API调用。
@ -201,7 +217,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- `void taos_close(TAOS *taos)`
关闭连接, 其中`taos`是`taos_connect`函数返回的指针。
关闭连接其中`taos`是`taos_connect`函数返回的指针。
### 同步查询API
@ -213,7 +229,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- `int taos_result_precision(TAOS_RES *res)`
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒`2` 代表纳秒
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
@ -237,13 +253,13 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
获取查询结果集每列数据的属性(数据类型、名字、字节数与taos_num_fileds配合使用可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下:
获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度与taos_num_fileds配合使用可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下:
```c
typedef struct taosField {
char name[65]; // 列名
uint8_t type; // 数据类型
int16_t bytes; // 字节
int16_t bytes; // 长度,单位是字节
} TAOS_FIELD;
```
@ -440,18 +456,30 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
取消订阅。 如参数 `keepProgress` 不为0API会保留订阅的进度信息后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
<!-- REPLACE_OPEN_TO_ENTERPRISE__JAVA_CONNECTOR_DOC -->
## <a class="anchor" id="python"></a>Python Connector
Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html)
### 安装准备
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
* 已安装python 2.7 or >= 3.4
* 已安装pip 或 pip3
**安装**:参见下面具体步骤
### Python客户端安装
**示例程序**位于install_directory/examples/python
#### Linux
### 安装
Python连接器支持的系统有Linux 64/Windows x64
安装前准备:
- 已安装好TDengine应用驱动请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)
- 已安装python 2.7 or >= 3.4
- 已安装pip
### Python连接器安装
**Linux**
用户可以在源代码的src/connector/python或者tar.gz的/connector/python文件夹下找到connector安装包。用户可以通过pip命令安装
@ -461,9 +489,10 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/
`pip3 install src/connector/python/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面
```cmd
**Windows**
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\Windows\system32" 目录下, 然后进入Windows *cmd* 命令行界面
```bash
cd C:\TDengine\connector\python
python -m pip install .
```
@ -471,7 +500,39 @@ python -m pip install .
* 如果机器上没有pip命令用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
### 示例程序
示例程序源码位于install_directory/examples/Python
**read_example.py Python示例源程序**
用户可以参考read_example.py这个程序来设计用户自己的写入、查询程序。
在安装了对应的应用驱动后通过import taos引入taos类。主要步骤如下
- 通过taos.connect获取TDengineConnection对象这个对象可以一个程序只申请一个在多线程中共享。
- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
- 通过游标对象的execute()方法执行写入或查询的SQL语句
- 如果执行的是写入语句execute返回的是成功写入的行数信息affected rows
- 如果执行的是查询语句则execute执行成功后需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。
### 安装验证
运行如下指令:
```bash
cd {install_directory}/examples/python/PYTHONConnectorChecker/`
python3 PythonChecker.py -host <fqdn>
```
验证通过将打印出成功信息。
### Python连接器的使用
#### 代码示例
@ -486,7 +547,7 @@ import taos
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
* <em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
* *host* 是TDengine 服务端所有IP, *config* 为客户端配置文件所在目录
* 写入数据
@ -599,18 +660,46 @@ conn.close()
注意与标准连接器的一个区别是RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。
### HTTP请求格式
### 安装
RESTful接口不依赖于任何TDengine的库因此客户端不需要安装任何TDengine的库只要客户端的开发语言支持HTTP协议即可。
### 验证
在已经安装TDengine服务器端的情况下可以按照如下方式进行验证。
下面以Ubuntu环境中使用curl工具确认已经安装来验证RESTful接口的正常。
下面示例是列出所有的数据库请把h1.taosdata.com和6041缺省值替换为实际运行的TDengine服务fqdn和端口号
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
```
返回值结果如下表示验证通过:
```json
{
"status": "succ",
"head": ["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],
"data": [
["log","2020-09-02 17:23:00.039",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,"us","ready"],
],
"rows": 1
}
```
### RESTful连接器的使用
#### HTTP请求格式
```
http://<ip>:<PORT>/rest/sql
http://<fqdn>:<port>/rest/sql
```
参数说明:
- IP: 集群中的任一台主机
- PORT: 配置文件中httpPort配置项缺省为6041
- fqnd: 集群中的任一台主机FQDN或IP地址
- port: 配置文件中httpPort配置项缺省为6041
例如http://192.168.0.1:6041/rest/sql 是指向IP地址为192.168.0.1的URL.
例如http://h1.taos.com:6041/rest/sql 是指向地址为h1.taos.com:6041的url。
HTTP请求的Header里需带有身份认证信息TDengine支持Basic认证与自定义认证两种机制后续版本将提供标准安全的数字签名机制来做身份验证。
@ -662,8 +751,8 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
说明:
- status: 告知操作结果是成功还是失败。
- head: 表的定义如果不返回结果集则仅有一列“affected_rows”。从 2.0.17 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中有可能会从返回值中去掉 head 这一项。)
- column_meta: 从 2.0.17 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”列类型为 6也即 float 类型;类型长度为 4也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar则类型长度表示该列最多可以保存的内容长度而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
- head: 表的定义如果不返回结果集则仅有一列“affected_rows”。从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中有可能会从返回值中去掉 head 这一项。)
- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”列类型为 6也即 float 类型;类型长度为 4也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar则类型长度表示该列最多可以保存的内容长度而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有[[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
- rows: 表明总共多少行数据。
@ -684,16 +773,16 @@ column_meta 中的列类型说明:
HTTP请求中需要带有授权码`<TOKEN>`,用于身份识别。授权码通常由管理员提供,可简单的通过发送`HTTP GET`请求来获取授权码,操作如下:
```bash
curl http://<ip>:6041/rest/login/<username>/<password>
curl http://<fqnd>:<port>/rest/login/<username>/<password>
```
其中,`ip`是TDengine数据库的IP地址`username`为数据库用户名,`password`为数据库密码,返回值为`JSON`格式,各字段含义如下:
其中,`fqdn`是TDengine数据库的fqdn或ip地址port是TDengine服务的端口号`username`为数据库用户名,`password`为数据库密码,返回值为`JSON`格式,各字段含义如下:
- status请求结果的标志位
- code返回值代码
- desc: 授权码
- desc授权码
获取授权码示例:
@ -802,10 +891,10 @@ HTTP请求URL采用`sqlutc`时返回结果集的时间戳将采用UTC时间
下面仅列出一些与RESTful接口有关的配置参数其他系统参数请看配置文件里的说明。注意配置修改后需要重启taosd服务才能生效
- 对外提供RESTful服务的端口号默认绑定到 6041实际取值是 serverPort + 11因此可以通过修改 serverPort 参数的设置来修改)
- httpMaxThreads: 启动的线程数量默认为22.0.17版本开始默认值改为CPU核数的一半向下取整
- httpMaxThreads: 启动的线程数量默认为22.0.17.0版本开始默认值改为CPU核数的一半向下取整
- restfulRowLimit: 返回结果集JSON格式的最大条数默认值为10240
- httpEnableCompress: 是否支持压缩默认不支持目前TDengine仅支持gzip压缩格式
- httpDebugFlag: 日志开关131仅错误和报警信息135调试信息143非常详细的调试信息默认131
- httpDebugFlag: 日志开关,默认131。131仅错误和报警信息135调试信息143非常详细的调试信息默认131
## <a class="anchor" id="csharp"></a>CSharp Connector
@ -817,6 +906,12 @@ C#连接器支持的系统有Linux 64/Windows x64/Windows x86
* .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
* 在Windows系统上C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作后续版本将提供ORMDapper框架驱动。
### 示例程序
示例程序源码位于install_directory/examples/C#,有:
TDengineTest.cs C#示例源程序
### 安装验证
运行install_directory/examples/C#/C#Checker/C#Checker.exe
@ -856,32 +951,52 @@ https://www.taosdata.com/blog/2020/11/02/1901.html
### 安装准备
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
Go连接器支持的系统有
TDengine提供了GO驱动程序`taosSql`。 `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`。
| **CPU类型** | x6464bit | | | aarch64 | aarch32 |
| --------------- | ------------ | -------- | -------- | -------- | ---------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** |
安装前准备:
- 已安装好TDengine应用驱动参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)
### 示例程序
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。
```Go
示例程序源码也位于安装目录下的 examples/go/taosdemo.go 文件中
**提示建议Go版本是1.13及以上,并开启模块支持:**
```sh
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct
```
在taosdemo.go所在目录下进行编译和执行
```sh
go mod init *demo*
go build ./demo -h fqdn -p serverPort
```
### Go连接器的使用
TDengine提供了GO驱动程序包`taosSql`.`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。
```go
import (
"database/sql"
_ "github.com/taosdata/driver-go/taosSql"
"database/sql"
_ "github.com/taosdata/driver-go/taosSql"
)
```
**建议使用Go版本1.13或以上,并开启模块支持:**
```bash
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct
```
**提示**:下划线与双引号之间必须有一个空格。
### 常用API
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
该API用来打开DB返回一个类型为\*DB的对象一般情况下DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
该API用来打开DB返回一个类型为\*DB的对象一般情况下DRIVER_NAME设置为字符串`taosSql`dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
**注意** 该API成功创建的时候并没有做权限等检查只有在真正执行Query或者Exec的时候才能真正的去创建连接并同时检查user/password/host/port是不是合法。 另外由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以sql.Open本身特别轻量。
**注意** 该API成功创建的时候并没有做权限等检查只有在真正执行Query或者Exec的时候才能真正的去创建连接并同时检查user/password/host/port是不是合法。另外由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos动态库中。所以sql.Open本身特别轻量。
- `func (db *DB) Exec(query string, args ...interface{}) (Result, error)`
@ -958,11 +1073,11 @@ npm install td2.0-connector
- 安装Visual Studio相关[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
- 进入`cmd`命令行界面, `npm config set msvs_version 2017`
- 进入`cmd`命令行界面`npm config set msvs_version 2017`
如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
如果以上步骤不能成功执行可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64".
如果在Windows 10 ARM 上使用ARM64 Node.js,还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。
### 示例程序
@ -979,7 +1094,7 @@ Node-example-raw.js
1. 新建安装验证目录,例如:`~/tdengine-test`拷贝github上nodejsChecker.js源程序。下载地址https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js
2. 在命令中执行以下命令:
2. 在命令中执行以下命令:
```bash
npm init -y
@ -991,23 +1106,19 @@ node nodejsChecker.js host=localhost
### Node.js连接器的使用
以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node)
以下是Node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node)
#### 建立连接
使用node.js连接器时必须先<em>require</em> `td2.0-connector`,然后使用 `taos.connect` 函数。`taos.connect` 函数必须提供的参数是`host`,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化`cursor` 来和TDengine服务端通信
使用node.js连接器时必须先`require td2.0-connector`,然后使用 `taos.connect` 函数建立到服务端的连接。例如如下代码:
```javascript
const taos = require('td2.0-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var conn = taos.connect({host:"taosdemo.com", user:"root", password:"taosdata", config:"/etc/taos",port:6030})
var cursor = conn.cursor(); // Initializing a new cursor
```
关闭连接可执行
```javascript
conn.close();
```
建立了一个到hostname为taosdemo.com端口为6030Tdengine的默认端口号的连接。连接指定了用户名root和密码taosdata。taos.connect 函数必须提供的参数是`host`其它参数在没有提供的情况下会使用如下的默认值。taos.connect返回了`cursor` 对象使用cursor来执行sql语句。
#### 执行SQL和插入数据
@ -1061,6 +1172,14 @@ promise.then(function(result) {
result.pretty();
})
```
#### 关闭连接
在完成插入、查询等操作后,要关闭连接。代码如下:
```js
conn.close();
```
#### 异步函数
异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。

View File

@ -23,7 +23,7 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde
#### 配置数据源
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码admin/admin,通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
![img](page://images/connections/add_datasource1.jpg)
@ -35,7 +35,7 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde
![img](page://images/connections/add_datasource3.jpg)
* Host TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041
* Host TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041
* UserTDengine 用户名。
* PasswordTDengine 用户密码。
@ -64,7 +64,7 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde
#### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
@ -140,13 +140,13 @@ conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","ro
- dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)将数据框iris写入表test中overwrite必须设置为falseappend必须设为TRUE,且数据框iris要与表test的结构一致。
- dbGetQuery(conn, "select count(*) from test"):查询语句
- dbGetQuery(conn, "select count(*) from test"):查询语句
- dbSendUpdate(conn, "use db")执行任何非查询sql语句。例如dbSendUpdate(conn, "use db") 写入数据dbSendUpdate(conn, "insert into t1 values(now, 99)")等。
- dbReadTable(conn, "test")读取表test中数据
- dbDisconnect(conn):关闭连接
- dbRemoveTable(conn, "test")删除表test
- dbReadTable(conn, "test")读取表test中数据
- dbDisconnect(conn):关闭连接
- dbRemoveTable(conn, "test")删除表test
TDengine客户端暂不支持如下函数
- dbExistsTable(conn, "test")是否存在表test
- dbListTables(conn):显示连接中的所有表
- dbExistsTable(conn, "test")是否存在表test
- dbListTables(conn):显示连接中的所有表

View File

@ -1,6 +1,6 @@
# TDengine 集群安装、管理
多个TDengine服务器也就是多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,先按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。
多个TDengine服务器也就是多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine整体架构一章。而且在安装集群之前,建议先按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。
集群的每个数据节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取如何配置FQDN请参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个数据节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。
@ -12,7 +12,7 @@ TDengine的集群管理极其简单除添加和删除节点需要人工干预
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】
**第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据`rm -rf /var/lib/taos/*`
**注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。
@ -23,23 +23,23 @@ TDengine的集群管理极其简单除添加和删除节点需要人工干预
**第四步**:检查所有数据节点,以及应用程序所在物理节点的网络设置:
1. 每个物理节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查)
2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts)或DNS的配置。如果无法ping通是无法组成集群的
2. 每个物理节点上执行`ping host`其中host是其他物理节点的hostname看能否ping通其它物理节点如果不能ping通需要检查网络设置或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts)或DNS的配置。如果无法ping通是无法组成集群的
3. 从应用运行的物理节点ping taosd运行的数据节点如果无法ping通应用是无法连接taosd的请检查应用所在物理节点的DNS设置或hosts文件
4. 每个数据节点的End Point就是输出的hostname外加端口号比如h1.taosdata.com:6030
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下:
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030其与集群配置相关参数如下:
```
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
firstEp h1.taosdata.com:6030
// 必须配置为本数据节点的FQDN如果本机只有一个hostname, 可注释掉本配置
// 必须配置为本数据节点的FQDN如果本机只有一个hostname, 可注释掉本
fqdn h1.taosdata.com
// 配置本数据节点的端口号缺省是6030
serverPort 6030
// 使用场景请参考《Arbitrator的使用》的部分
// 副本数为偶数的时候,需要配置请参考《Arbitrator的使用》的部分
arbitrator ha.taosdata.com:6042
```
@ -53,7 +53,7 @@ arbitrator ha.taosdata.com:6042
| 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 |
| 3 | offlineThreshold | dnode离线阈值超过该时间将导致Dnode离线 |
| 4 | statusInterval | dnode向mnode报告状态时长 |
| 5 | arbitrator | 系统中裁决器的end point |
| 5 | arbitrator | 系统中裁决器的End Point |
| 6 | timezone | 时区 |
| 7 | balance | 是否启动负载均衡 |
| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
@ -87,7 +87,7 @@ taos>
1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd注意每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030
2. 在第一个数据节点使用CLI程序taos, 登录进TDengine系统, 执行命令:
2. 在第一个数据节点使用CLI程序taos登录进TDengine系统执行命令
```
CREATE DNODE "h2.taos.com:6030";
@ -101,7 +101,7 @@ taos>
SHOW DNODES;
```
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
- 查看该数据节点的taosd是否正常工作如果没有正常运行需要先检查为什么
- 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致需要将正确的End Point添加进去。
@ -121,7 +121,7 @@ taos>
### 添加数据节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
执行CLI程序taos使用root账号登录进系统执行
```
CREATE DNODE "fqdn:port";
@ -131,13 +131,13 @@ CREATE DNODE "fqdn:port";
### 删除数据节点
执行CLI程序taos, 使用root账号登录进TDengine系统执行
执行CLI程序taos使用root账号登录进TDengine系统执行
```
DROP DNODE "fqdn:port";
```mysql
DROP DNODE "fqdn:port | dnodeID";
```
其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号
通过"fqdn:port"或"dnodeID"来指定一个具体的节点都是可以的。其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号dnodeID可以通过SHOW DNODES获得。
<font color=green>**【注意】**</font>
@ -147,25 +147,41 @@ DROP DNODE "fqdn:port";
- 一个数据节点被drop之后其他节点都会感知到这个dnodeID的删除操作任何集群中的节点都不会再接收此dnodeID的请求。
- dnodeID的是集群自动分配的不得人工指定。它在生成时递增的不会重复。
- dnodeID是集群自动分配的不得人工指定。它在生成时是递增的不会重复。
### 手动迁移数据节点
手动将某个vnode迁移到指定的dnode。
执行CLI程序taos使用root账号登录进TDengine系统执行
```mysql
ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
```
其中source-dnodeId是源dnodeId也就是待迁移的vnode所在的dnodeIDvgId可以通过SHOW VGROUPS获得列表的第一列dest-dnodeId是目标dnodeId。
<font color=green>**【注意】**</font>
- 只有在集群的自动负载均衡选项关闭时(balance设置为0),才允许手动迁移。
- 只有处于正常工作状态的vnode才能被迁移master/slave当处于offline/unsynced/syncing状态时是不能迁移的。
- 迁移前务必核实目标dnode的资源足够CPU、内存、硬盘。
### 查看数据节点
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
执行CLI程序taos使用root账号登录进TDengine系统执行
```mysql
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个数据节点后可以使用该命令查看。
它将列出集群中所有的dnode每个dnode的IDend_point(fqdn:port)状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个数据节点后可以使用该命令查看。
### 查看虚拟节点组
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
执行CLI程序taos使用root账号登录进TDengine系统执行
```mysql
SHOW VGROUPS;
```
@ -173,9 +189,9 @@ SHOW VGROUPS;
TDengine通过多副本的机制来提供系统的高可用性包括vnode和mnode的高可用性。
vnode的副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
vnode的副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误"more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
```
```mysql
CREATE DATABASE demo replica 3;
```
@ -183,20 +199,19 @@ CREATE DATABASE demo replica 3;
一个数据节点dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的读写操作。
因为vnode的引入无法简单给出结论“集群中过半数据节点dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个数据节点不工作那整个集群就无法正常工作了。
因为vnode的引入无法简单给出结论“集群中过半数据节点dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个数据节点不工作那整个集群就无法正常工作了。
## <a class="anchor" id="mnode"></a>Mnode的高可用性
TDengine集群是由mnode (taosd的一个模块管理节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
一个集群有多个数据节点dnode但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
```
```mysql
SHOW MNODES;
```
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
当集群中第一个数据节点启动时该数据节点一定会运行一个mnode实例否则该数据节点dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。当集群中第一个数据节点启动时该数据节点一定会运行一个mnode实例否则该数据节点dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的如果numOfMnodes大于2复制参数quorum自动设为2也就是说至少要保证有两个副本写入数据成功才通知客户端应用写入成功。
@ -210,7 +225,7 @@ SHOW MNODES;
- 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
- 如果一个数据节点过热数据量过大系统将自动进行负载均衡将该数据节点的一些vnode自动挪到其他节点。
当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。
当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。
**【提示】负载均衡由参数balance控制它决定是否启动自动负载均衡。**
@ -225,7 +240,7 @@ SHOW MNODES;
## <a class="anchor" id="arbitrator"></a>Arbitrator的使用
如果副本数为偶数,当一个 vnode group 里一半 vnode 不工作时,是无法从中选出 master 的。同理,一半 mnode 不工作时,是无法选出 mnode 的 master 的因为存在“split brain”问题。为解决这个问题TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator那么节点 B 就能正常工作。
如果副本数为偶数,当一个 vnode group 里一半或超过一半的 vnode 不工作时,是无法从中选出 master 的。同理,一半或超过一半的 mnode 不工作时,是无法选出 mnode 的 master 的因为存在“split brain”问题。为解决这个问题TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator那么节点 B 就能正常工作。
总之在目前版本下TDengine 建议在双副本环境要配置 Arbitrator以提升系统的可用性。
@ -235,3 +250,9 @@ Arbitrator 的执行程序名为 tarbitrator。该程序对系统资源几乎没
3. 修改每个 taosd 实例的配置文件,在 taos.cfg 里将参数 arbitrator 设置为 tarbitrator 程序所对应的 End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的 Arbitrator。如果副本数为奇数即使配置了 Arbitrator系统也不会去建立连接。
4. 在配置文件中配置了的 Arbitrator会出现在 `SHOW DNODES;` 指令的返回结果中,对应的 role 列的值会是“arb”。
查看集群 Arbitrator 的状态【2.0.14.0 以后支持】
```mysql
SHOW DNODES;
```

View File

@ -1,4 +1,4 @@
# TDengine的运营与维
# TDengine的运营与
## <a class="anchor" id="planning"></a>容量规划
@ -28,12 +28,28 @@ taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
#### 客户端内存需求
客户端应用采用 taosc 客户端驱动连接服务端,会有内存需求的开销。
客户端的内存开销主要由写入过程中的 SQL 语句、表的元数据信息缓存、以及结构性开销构成。系统最大容纳的表数量为 N每个通过超级表创建的表的 meta data 开销约 256 字节),最大并行写入线程数量 T最大 SQL 语句长度 S通常是 1 Mbytes。由此可以进行客户端内存开销的估算单位 MBytes
```
M = (T * S * 3 + (N / 4096) + 100)
```
举例如下:用户最大并发写入线程数 100子表数总数 10,000,000那么客户端的内存最低要求是
```
100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes)
```
即配置 3 GBytes 内存是最低要求。
### CPU 需求
CPU 的需求取决于如下两方面:
* __数据插入__ TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。
* __查询需求__ TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。
* **数据插入** TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。
* **查询需求** TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。
因此仅对数据插入而言CPU 是可以估算出来的,但查询所耗的计算资源无法估算。在实际运营过程中,不建议 CPU 使用率超过 50%,超过后,需要增加新的节点,以获得更多计算资源。
@ -96,51 +112,170 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
taosd -C
```
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是可以工作的,一般无需设置。**注意:配置文件参数修改后,需要重启*taosd*服务,或客户端应用才能生效。**
- firstEp: taosd启动时主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
- fqdn数据节点的FQDN缺省为操作系统配置的第一个hostname。如果习惯IP地址访问可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
- serverPorttaosd启动后对外服务的端口号默认值为6030。RESTful服务使用的端口号是在此基础上+11即默认值为6041。
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir日志文件目录客户端和服务器的运行日志文件将写入该目录。默认值/var/log/taos。
- arbitrator系统中裁决器的end point, 缺省值为空。
- rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode
- debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值
- numOfLogLines单个日志文件允许的最大行数。默认值10,000,000行。
- logKeepDays日志文件的最长保存时间。大于0时日志文件会被重命名为taosdlog.xxx其中xxx为日志文件最后修改的时间戳单位为秒。默认值0天。
- maxSQLLength单条SQL语句允许最长限制。默认值65380字节。
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。
- stream: 是否启用连续查询流计算功能0表示不允许1表示允许。 默认值1。
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB2.0.15 以前的版本中,此参数的单位是字节)。
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程最大值2表示最大建立2倍CPU核数的查询线程。默认为1表示最大和CPU核数相等的查询线程。该值可以为小数即0.5表示最大建立CPU核数一半的查询线程。
| **#** | **配置参数名称** | **内部** | **S\|C** | **单位** | **含义** | **取值范围** | **缺省值** | **备注** |
| ----- | ----------------------- | -------- | -------- | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
| 1 | firstEP | | **SC** | | taosd启动时主动连接的集群中首个dnode的end point | | localhost:6030 | |
| 2 | secondEP | YES | **SC** | | taosd启动时如果firstEp连接不上尝试连接集群中第二个dnode的end point | | 无 | |
| 3 | fqdn | | **SC** | | 数据节点的FQDN。如果习惯IP地址访问可设置为该节点的IP地址。 | | 缺省为操作系统配置的第一个hostname。 | 这个参数值的长度需要控制在 96 个字符以内。 |
| 4 | serverPort | | **SC** | | taosd启动后对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11即默认值为6041。 |
| 5 | logDir | | **SC** | | 日志文件目录,客户端和服务器的运行日志将写入该目录 | | /var/log/taos | |
| 6 | scriptDir | YES | **S** | | | | | |
| 7 | dataDir | | **S** | | 数据文件目录,所有的数据文件都将写入该目录 | | /var/lib/taos | |
| 8 | arbitrator | | **S** | | 系统中裁决器的end point | | 空 | |
| 9 | numOfThreadsPerCore | | **SC** | | 每个CPU核生成的队列消费者线程数量 | | 1.0 | |
| 10 | ratioOfQueryThreads | | **S** | | 设置查询线程的最大数量 | 0表示只有1个查询线程1表示最大和CPU核数相等的查询线程2表示最大建立2倍CPU核数的查询线程。 | 1 | 该值可以为小数即0.5表示最大建立CPU核数一半的查询线程。 |
| 11 | numOfMnodes | | **S** | | 系统中管理节点个数 | | 3 | |
| 12 | vnodeBak | | **S** | | 删除vnode时是否备份vnode目录 | 01是 | 1 | |
| 13 | telemetryRePorting | | **S** | | 是否允许 TDengine 采集和上报基本使用信息 | 0不允许1允许 | 1 | |
| 14 | balance | | **S** | | 是否启动负载均衡 | 01 | 1 | |
| 15 | balanceInterval | YES | **S** | 秒 | 管理节点在正常运行状态下,检查负载均衡的时间间隔 | 1-30000 | 300 | |
| 16 | role | | **S** | | dnode的可选角色 | 0any既可作为mnode也可分配vnode1mgmt只能作为mnode不能分配vnode2dnode不能作为mnode只能分配vnode | 0 | |
| 17 | maxTmerCtrl | | **SC** | 个 | 定时器个数 | 8-2048 | 512 | |
| 18 | monitorInterval | | **S** | 秒 | 监控数据库记录系统参数CPU/内存)的时间间隔 | 1-600 | 30 | |
| 19 | offlineThreshold | | **S** | 秒 | dnode离线阈值超过该时间将导致dnode离线 | 5-7200000 | 86400*1010天 | |
| 20 | rpcTimer | | **SC** | 毫秒 | rpc重试时长 | 100-3000 | 300 | |
| 21 | rpcMaxTime | | **SC** | 秒 | rpc等待应答最大时长 | 100-7200 | 600 | |
| 22 | statusInterval | | **S** | 秒 | dnode向mnode报告状态间隔 | 1-10 | 1 | |
| 23 | shellActivityTimer | | **SC** | 秒 | shell客户端向mnode发送心跳间隔 | 1-120 | 3 | |
| 24 | tableMetaKeepTimer | | **S** | 秒 | 表的元数据cache时长 | 1-8640000 | 7200 | |
| 25 | minSlidingTime | | **S** | 毫秒 | 最小滑动窗口时长 | 10-1000000 | 10 | 支持us补值后这个值就是1us了。 |
| 26 | minIntervalTime | | **S** | 毫秒 | 时间窗口最小值 | 1-1000000 | 10 | |
| 27 | stream | | **S** | | 是否启用连续查询(流计算功能) | 0不允许1允许 | 1 | |
| 28 | maxStreamCompDelay | | **S** | 毫秒 | 连续查询启动最大延迟 | 10-1000000000 | 20000 | 为避免多个stream同时执行占用太多系统资源程序中对stream的执行时间人为增加了一些随机的延时。maxFirstStreamCompDelay 是stream第一次执行前最少要等待的时间。streamCompDelayRatio 是延迟时间的计算系数,它乘以查询的 interval 后为延迟时间基准。maxStreamCompDelay是延迟时间基准的上限。实际延迟时间为一个不超过延迟时间基准的随机值。stream某次计算失败后需要重试retryStreamCompDelay是重试的等待时间基准。实际重试等待时间为不超过等待时间基准的随机值。 |
| 29 | maxFirstStreamCompDelay | | **S** | 毫秒 | 第一次连续查询启动最大延迟 | 10-1000000000 | 10000 | |
| 30 | retryStreamCompDelay | | **S** | 毫秒 | 连续查询重试等待间隔 | 10-1000000000 | 10 | |
| 31 | streamCompDelayRatio | | **S** | | 连续查询的延迟时间计算系数 | 0.1-0.9 | 0.1 | |
| 32 | maxVgroupsPerDb | | **S** | | 每个DB中 能够使用的最大vnode个数 | 0-8192 | | |
| 33 | maxTablesPerVnode | | **S** | | 每个vnode中能够创建的最大表个数 | | 1000000 | |
| 34 | minTablesPerVnode | YES | **S** | | 每个vnode中必须创建的最小表个数 | | 100 | |
| 35 | tableIncStepPerVnode | YES | **S** | | 每个vnode中超过最小表数后递增步长 | | 1000 | |
| 36 | cache | | **S** | MB | 内存块的大小 | | 16 | |
| 37 | blocks | | **S** | | 每个vnodetsdb中有多少cache大小的内存块。因此一个vnode的用的内存大小粗略为cache * blocks | | 6 | |
| 38 | days | | **S** | 天 | 数据文件存储数据的时间跨度 | | 10 | |
| 39 | keep | | **S** | 天 | 数据保留的天数 | | 3650 | |
| 40 | minRows | | **S** | | 文件块中记录的最小条数 | | 100 | |
| 41 | maxRows | | **S** | | 文件块中记录的最大条数 | | 4096 | |
| 42 | quorum | | **S** | | 异步写入成功所需应答之法定数 | 1-3 | 1 | |
| 43 | comp | | **S** | | 文件压缩标志位 | 0关闭1:一阶段压缩2:两阶段压缩 | 2 | |
| 44 | walLevel | | **S** | | WAL级别 | 1写wal, 但不执行fsync; 2写wal, 而且执行fsync | 1 | |
| 45 | fsync | | **S** | 毫秒 | 当wal设置为2时执行fsync的周期 | 最小为0表示每次写入立即执行fsync最大为180000三分钟 | 3000 | |
| 46 | replica | | **S** | | 副本个数 | 1-3 | 1 | |
| 47 | mqttHostName | YES | **S** | | mqtt uri | | | [mqtt://username:password@hostname:1883/taos/](mqtt://username:password@hostname:1883/taos/) |
| 48 | mqttPort | YES | **S** | | mqtt client name | | | 1883 |
| 49 | mqttTopic | YES | **S** | | | | | /test |
| 50 | compressMsgSize | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中对通讯的消息进行压缩的阈值。如果要压缩消息建议设置为64330字节即大于64330字节的消息体才进行压缩。 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 | -1 | |
| 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 65380 | |
| 52 | maxNumOfOrderedRes | | **SC** | | 支持超级表时间排序允许的最多记录数限制 | | 10万 | |
| 53 | timezone | | **SC** | | 时区 | | 从系统中动态获取当前的时区设置 | |
| 54 | locale | | **SC** | | 系统区位信息及编码格式 | | 系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置 | |
| 55 | charset | | **SC** | | 字符集编码 | | 系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置 | |
| 56 | maxShellConns | | **S** | | 一个dnode容许的连接数 | 10-50000000 | 5000 | |
| 57 | maxConnections | | **S** | | 一个数据库连接所容许的dnode连接数 | 1-100000 | 5000 | 实际测试下来,如果默认没有配,选 50 个 worker thread 会产生 Network unavailable |
| 58 | minimalLogDirGB | | **SC** | GB | 当日志文件夹的磁盘大小小于该值时,停止写日志 | | 0.1 | |
| 59 | minimalTmpDirGB | | **SC** | GB | 当日志文件夹的磁盘大小小于该值时,停止写临时文件 | | 0.1 | |
| 60 | minimalDataDirGB | | **S** | GB | 当日志文件夹的磁盘大小小于该值时,停止写时序数据 | | 0.1 | |
| 61 | mnodeEqualVnodeNum | | **S** | | 一个mnode等同于vnode消耗的个数 | | 4 | |
| 62 | http | | **S** | | 服务器内部的http服务开关。 | 0关闭http服务 1激活http服务。 | 1 | |
| 63 | mqtt | YES | **S** | | 服务器内部的mqtt服务开关。 | 0关闭mqtt服务 1激活mqtt服务。 | 0 | |
| 64 | monitor | | **S** | | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录记录信息存储在`LOG`库中。 | 0关闭监控服务 1激活监控服务。 | 0 | |
| 65 | httpEnableRecordSql | | **S** | | 内部使用记录通过RESTFul接口产生的SQL调用 | | 0 | 生成的文件httpnote.0/httpnote.1),与服务端日志所在目录相同。 |
| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数 | | 2 | |
| 67 | telegrafUseFieldNum | YES | | | | | | |
| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数 | | 10240 | 最大10,000,000 |
| 69 | numOfLogLines | | **SC** | | 单个日志文件允许的最大行数。 | | 10,000,000 | |
| 70 | asyncLog | | **SC** | | 日志写入模式 | 0同步、1异步 | 1 | |
| 71 | logKeepDays | | **SC** | 天 | 日志文件的最长保存时间 | | 0 | 大于0时日志文件会被重命名为taosdlog.xxx其中xxx为日志文件最后修改的时间戳。 |
| 72 | debugFlag | | **SC** | | 运行日志开关 | 131输出错误和警告日志135输出错误、警告和调试日志143输出错误、警告、调试和跟踪日志 | 131或135不同模块有不同的默认值 | |
| 73 | mDebugFlag | | **S** | | 管理模块的日志开关 | 同上 | 135 | |
| 74 | dDebugFlag | | **SC** | | dnode模块的日志开关 | 同上 | 135 | |
| 75 | sDebugFlag | | **SC** | | sync模块的日志开关 | 同上 | 135 | |
| 76 | wDebugFlag | | **SC** | | wal模块的日志开关 | 同上 | 135 | |
| 77 | sdbDebugFlag | | **SC** | | sdb模块的日志开关 | 同上 | 135 | |
| 78 | rpcDebugFlag | | **SC** | | rpc模块的日志开关 | 同上 | | |
| 79 | tmrDebugFlag | | **SC** | | 定时器模块的日志开关 | 同上 | | |
| 80 | cDebugFlag | | **C** | | client模块的日志开关 | 同上 | | |
| 81 | jniDebugFlag | | **C** | | jni模块的日志开关 | 同上 | | |
| 82 | odbcDebugFlag | | **C** | | odbc模块的日志开关 | 同上 | | |
| 83 | uDebugFlag | | **SC** | | 共用功能模块的日志开关 | 同上 | | |
| 84 | httpDebugFlag | | **S** | | http模块的日志开关 | 同上 | | |
| 85 | mqttDebugFlag | | **S** | | mqtt模块的日志开关 | 同上 | | |
| 86 | monitorDebugFlag | | **S** | | 监控模块的日志开关 | 同上 | | |
| 87 | qDebugFlag | | **SC** | | 查询模块的日志开关 | 同上 | | |
| 88 | vDebugFlag | | **SC** | | vnode模块的日志开关 | 同上 | | |
| 89 | tsdbDebugFlag | | **S** | | TSDB模块的日志开关 | 同上 | | |
| 90 | cqDebugFlag | | **SC** | | 连续查询模块的日志开关 | 同上 | | |
| 91 | tscEnableRecordSql | | **C** | | 是否记录客户端sql语句到文件 | 01是 | 0 | 生成的文件tscnote-xxxx.0/tscnote-xxx.1xxxx是pid与客户端日志所在目录相同。 |
| 92 | enableCoreFile | | **SC** | | 是否开启服务crash时生成core文件 | 01是 | 1 | 不同的启动方式生成core文件的目录如下1、systemctl start taosd启动生成的core在根目录下2、手动启动就在taosd执行目录下。 |
| 93 | gitinfo | YES | **SC** | | | 1 | | |
| 94 | gitinfoofInternal | YES | **SC** | | | 2 | | |
| 95 | Buildinfo | YES | **SC** | | | 3 | | |
| 96 | version | YES | **SC** | | | 4 | | |
| 97 | | | | | | | | |
| 98 | maxBinaryDisplayWidth | | **C** | | Taos shell中binary 和 nchar字段的显示宽度上限超过此限制的部分将被隐藏 | 5 - | 30 | 实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth则显示上限为 **字段名长度****maxBinaryDisplayWidth** 的较大者。否则,上限为 **字段名长度****字段值长度** 的较大者。可在 shell 中通过命令 set max_binary_display_width nn动态修改此选项 |
| 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。2.0.15 以前的版本中,此参数的单位是字节) |
| 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程最大值2表示最大建立2倍CPU核数的查询线程。默认为1表示最大和CPU核数相等的查询线程。该值可以为小数即0.5表示最大建立CPU核数一半的查询线程。 |
| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 |
| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | |
| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 |
**注意:**对于端口TDengine会使用从serverPort起13个连续的TCP和UDP端口号请务必在防火墙打开。因此如果是缺省配置需要打开从6030到6042共13个端口而且必须TCP和UDP都打开。详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值):
- days一个数据文件存储数据的时间跨度。单位为天默认值10。
- keep数据库中数据保留的天数。单位为天默认值3650。可通过 alter database 修改)
- minRows文件块中记录的最小条数。单位为条默认值100。
- maxRows文件块中记录的最大条数。单位为条默认值4096。
- comp文件压缩标志位。0关闭1一阶段压缩2两阶段压缩。默认值2。可通过 alter database 修改)
- walWAL级别。1写wal但不执行fsync2写wal, 而且执行fsync。默认值1。在 taos.cfg 中参数名需要写作 walLevel
- fsync当wal设置为2时执行fsync的周期。设置为0表示每次写入立即执行fsync。单位为毫秒默认值3000。
- cache内存块的大小。单位为兆字节MB默认值16。
- blocks每个VNODETSDB中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为cache * blocks。单位为块默认值4。可通过 alter database 修改)
- replica副本个数。取值范围1-3单位为个默认值1。可通过 alter database 修改)
- quorum多副本环境下指令执行的确认数要求。取值范围1、2单位为个默认值1。可通过 alter database 修改)
- precision时间戳精度标识。ms表示毫秒us表示微秒默认值ms。2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- cacheLast是否在内存中缓存子表的最近数据。0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能。默认值0。可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 03 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- update是否允许更新。0不允许1允许。默认值0。
| **#** | **配置参数名称** | **单位** | **含义** | **取值范围** | **缺省值** |
| ----- | ---------------- | -------- | ------------------------------------------------------------ | ------------------------------------------------ | ---------- |
| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 |
| 2 | keep | 天 | (可通过 alter database 修改<!-- REPLACE_OPEN_TO_ENTERPRISE__KEEP_PARAM_DESCRIPTION_IN_PARAM_LIST -->)数据库中数据保留的天数。 | 3650 |
| 3 | cache | MB | 内存块的大小 | | 16 |
| 4 | blocks | | (可通过 alter database 修改)每个 VNODETSDB中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为cache * blocks。 | | 4 |
| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 |
| 6 | minRows | | 文件块中记录的最小条数 | | 100 |
| 7 | maxRows | | 文件块中记录的最大条数 | | 4096 |
| 8 | comp | | (可通过 alter database 修改)文件压缩标志位 | 0关闭1:一阶段压缩2:两阶段压缩 | 2 |
| 9 | walLevel | | (作为 database 的参数时名为 wal在 taos.cfg 中作为参数时需要写作 walLevelWAL级别 | 1写wal但不执行fsync2写wal, 而且执行fsync | 1 |
| 10 | fsync | 毫秒 | 当wal设置为2时执行fsync的周期。设置为0表示每次写入立即执行fsync。 | | 3000 |
| 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 |
| 12 | precision | | 时间戳精度标识2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)(从 2.1.5.0 版本开始,新增对纳秒时间精度的支持) | ms 表示毫秒us 表示微秒ns 表示纳秒 | ms |
| 13 | update | | 是否允许更新 | 0不允许1允许 | 0 |
| 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 03 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能 | 0 |
对于一个应用场景可能有多种数据特征的数据并存最佳的设计是将具有相同数据特征的表放在一个库里这样一个应用有多个库而每个库可以配置不同的存储参数从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数如果指定该参数就将覆盖对应的系统配置参数。举例有下述SQL
```
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
```mysql
CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1;
```
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3允许更新而其他参数与系统配置完全一致。
一个数据库创建成功后,仅部分参数可以修改并实时生效,其余参数不能修改:
| **参数名** | **能否修改** | **范围** | **修改语法示例** |
| ----------- | ------------ | ------------------------------------------------------------ | ------------------------------------- |
| name | | | |
| create time | | | |
| ntables | | | |
| vgroups | | | |
| replica | **YES** | 在线dnode数目为11-121-2>=31-3 | ALTER DATABASE <dbname> REPLICA *n* |
| quorum | **YES** | 1-2 | ALTER DATABASE <dbname> QUORUM *n* |
| days | | | |
| keep | **YES** | days-365000 | ALTER DATABASE <dbname> KEEP *n* |
| cache | | | |
| blocks | **YES** | 3-1000 | ALTER DATABASE <dbname> BLOCKS *n* |
| minrows | | | |
| maxrows | | | |
| wal | | | |
| fsync | | | |
| comp | **YES** | 0-2 | ALTER DATABASE <dbname> COMP *n* |
| precision | | | |
| status | | | |
| update | | | |
| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE <dbname> CACHELAST *n* |
**说明:**在 2.1.3.0 版本之前,通过 ALTER DATABASE 语句修改这些参数后,需要重启服务器才能生效。
TDengine集群中加入一个新的dnode时涉及集群相关的一些参数必须与已有集群的配置相同否则不能成功加入到集群中。会进行校验的参数如下
- numOfMnodes系统中管理节点个数。默认值3。2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始numOfMnodes 默认值改为 1。
@ -172,7 +307,7 @@ ALTER DNODE <dnode_id> <config>
alter dnode 1 debugFlag 135;
```
## <a class="anchor" id="client"></a>客户端配置
## <a class="anchor" id="client"></a>客户端及应用驱动配置
TDengine系统的前台交互客户端应用程序为taos以及应用驱动它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见帮助信息 `taos --help`。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
@ -182,15 +317,15 @@ TDengine系统的前台交互客户端应用程序为taos以及应用驱动
taos -C 或 taos --dump-config
```
客户端配置参数
客户端及应用驱动配置参数列表及解释
- firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
- secondEp: taos 启动时,如果 firstEp 连不上,将尝试连接 secondEp。
- locale
- locale:系统区位信息及编码格式。
默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是编码正确性是客户端来保证。因此如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
@ -198,9 +333,9 @@ taos -C 或 taos --dump-config
在 Linux 中 locale 的命名规则为: <语言>\_<地区>.<字符集编码>zh_CN.UTF-8zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
- charset
- charset:字符集编码。
默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置charset在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败则中断启动过程。
@ -260,7 +395,7 @@ taos -C 或 taos --dump-config
- maxBinaryDisplayWidth
Shell中binary 和 nchar字段的显示宽度上限超过此限制的部分将被隐藏。默认值30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。
Shell中 binary 和 nchar 字段的显示宽度上限超过此限制的部分将被隐藏。默认值30。可在 taos shell 中通过命令 set max_binary_display_width nn 动态修改此选项。
## <a class="anchor" id="user"></a>用户管理
@ -315,7 +450,7 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。C
```mysql
insert into tb1 file 'path/data.csv';
```
注意如果CSV文件首行存在描述信息请手动删除后再导入
**注意如果CSV文件首行存在描述信息请手动删除后再导入。如某列为空填NULL无引号。**
例如现在存在一个子表d1001, 其表结构如下:
@ -343,7 +478,7 @@ taos> DESCRIBE d1001
'2018-10-11 06:38:05.000',17.30000,219,0.32000
'2018-10-12 06:38:05.000',18.30000,219,0.31000
```
那么可以用如下命令导入数据
那么可以用如下命令导入数据
```mysql
taos> insert into d1001 file '~/data.csv';
@ -360,7 +495,7 @@ TDengine提供了方便的数据库导入导出工具taosdump。用户可以将t
**按表导出CSV文件**
如果用户需要导出一个表或一个STable中的数据可在shell中运行
如果用户需要导出一个表或一个STable中的数据可在taos shell中运行
```mysql
select * from <tb_name> >> data.csv;
@ -370,7 +505,9 @@ select * from <tb_name> >> data.csv;
**用taosdump导出数据**
TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)
利用taosdump用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表所有数据或一时间段的数据甚至仅仅表的定义。
具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)。
## <a class="anchor" id="status"></a>系统连接、任务查询管理
@ -435,46 +572,100 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会
安装TDengine后默认会在操作系统中生成下列目录或文件
| 目录/文件 | 说明 |
| ------------------------- | :----------------------------------------------------------- |
| **目录/文件** | **说明** |
| ------------------------- | ------------------------------------------------------------ |
| /usr/local/taos/bin | TDengine可执行文件目录。其中的执行文件都会软链接到/usr/bin目录下。 |
| /usr/local/taos/connector | TDengine各种连接器目录。 |
| /usr/local/taos/driver | TDengine动态链接库目录。会软链接到/usr/lib目录下。 |
| /usr/local/taos/examples | TDengine各种语言应用示例目录。 |
| /usr/local/taos/include | TDengine对外提供的C语言接口的头文件。 |
| /etc/taos/taos.cfg | TDengine默认[配置文件] |
| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 |
| /var/lib/taos | TDengine默认数据文件目录。可通过[配置文件]修改位置。 |
| /var/log/taos | TDengine默认日志文件目录。可通过[配置文件]修改位置。 |
**可执行文件**
TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括:
- _taosd_TDengine服务端可执行文件
- _taos_ TDengine Shell可执行文件
- _taosdump_:数据导入导出工具
- remove.sh卸载TDengine的脚本, 请谨慎执行,链接到/usr/bin目录下的rmtaos命令。会删除TDengine的安装目录/usr/local/taos但会保留/etc/taos、/var/lib/taos、/var/log/taos。
- *taosd*TDengine服务端可执行文件
- *taos*TDengine Shell可执行文件
- *taosdump*:数据导入导出工具
- *taosdemo*TDengine测试工具
- remove.sh卸载TDengine的脚本请谨慎执行链接到/usr/bin目录下的**rmtaos**命令。会删除TDengine的安装目录/usr/local/taos但会保留/etc/taos、/var/lib/taos、/var/log/taos。
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。
## TDengine 的启动、停止、卸载
TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启动和、停止、重启操作。TDengine 的服务进程是 taosd默认情况下 TDengine 在系统启动后将自动启动。DBA 可以通过 systemd/systemctl/service 手动操作停止、启动、重新启动服务。
以 systemctl 为例,命令如下:
- 启动服务进程:`systemctl start taosd`
- 停止服务进程:`systemctl stop taosd`
- 重启服务进程:`systemctl restart taosd`
- 查看服务状态:`systemctl status taosd`
如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
```
......
Active: active (running)
......
```
如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
```
......
Active: inactive (dead)
......
```
卸载 TDengine只需要执行如下命令
```
rmtaos
```
**警告执行该命令后TDengine 程序将被完全删除,务必谨慎使用。**
## <a class="anchor" id="keywords"></a>TDengine参数限制与保留关键字
**名称命名规则**
1. 合法字符:英文字符、数字和下划线
2. 允许英文字符或下划线开头,不允许以数字开头
3. 不区分大小写
**密码合法字符集**
`[a-zA-Z0-9!?$%^&*()_+={[}]:;@~#|<,>.?/]`
去掉了 ```‘“`\``` (单双引号、撇号、反斜杠、空格)
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字符
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳
- 记录的最大长度:包括时间戳 8 byte不能超过 16KB每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度65480 byte
- 单条 SQL 语句默认最大字符串长度65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte
- 数据库副本数:不能超过 3
- 用户名:不能超过 23 个 byte
- 用户密码:不能超过 15 个 byte
- 标签(Tags)数量:不能超过 128 个
- 标签(Tags)数量:不能超过 128 个,可以 0 个
- 标签的总长度:不能超过 16K byte
- 记录条数:仅受存储空间限制
- 表的个数:仅受节点个数限制
- 库的个数:仅受节点个数限制
- 单个库上虚拟节点个数:不能超过 64 个
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
目前 TDengine 有将近 200 个内部保留关键字这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
@ -519,3 +710,102 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| CONNS | ID | NOTNULL | STABLE | WAL |
| COPY | IF | NOW | STABLES | WHERE |
## 诊断及其他
#### 网络连接诊断
当出现客户端应用无法访问服务端时,需要确认客户端与服务端之间网络的各端口连通情况,以便有针对性地排除故障。
目前网络连接诊断支持在Linux 与 LinuxLinux 与 Windows 之间进行诊断测试。
诊断步骤:
1. 如拟诊断的端口范围与服务器 taosd 实例的端口范围相同,须先停掉 taosd 实例
2. 服务端命令行输入:`taos -n server -P <port>` 以服务端身份启动对端口 port 为基准端口的监听
3. 客户端命令行输入:`taos -n client -h <fqdn of server> -P <port>` 以客户端身份启动对指定的服务器、指定的端口发送测试包
服务端运行正常的话会输出以下信息
```bash
# taos -n server -P 6000
12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening
12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening
...
...
...
12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening
12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening
12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening
12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000
12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000
12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000
12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000
12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001
12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001
12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001
12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001
...
...
...
12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011
12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011
12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011
12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011
```
客户端运行正常会输出以下信息:
```bash
# taos -n client -h 172.27.0.7 -P 6000
12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7
12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000
12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000
...
...
...
12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010
12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010
12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011
12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
```
仔细阅读打印出来的错误信息,可以帮助管理员找到原因,以解决问题。
#### 启动状态及RPC诊断
`taos -n startup -h <fqdn of server>`
判断 taosd 服务端是否成功启动,是数据库管理员经常遇到的一种情形。特别当若干台服务器组成集群时,判断每个服务端实例是否成功启动就会是一个重要问题。除检索 taosd 服务端日志文件进行问题定位、分析外,还可以通过 `taos -n startup -h <fqdn of server>` 来诊断一个 taosd 进程的启动状态。
针对多台服务器组成的集群,当服务启动过程耗时较长时,可通过该命令行来诊断每台服务器的 taosd 实例的启动状态,以准确定位问题。
`taos -n rpc -h <fqdn of server>`
该命令用来诊断已经启动的 taosd 实例的端口是否可正常访问。如果 taosd 程序异常或者失去响应,可以通过 `taos -n rpc -h <fqdn of server>` 来发起一个与指定 fqdn 的 rpc 通信,看看 taosd 是否能收到,以此来判定是网络问题还是 taosd 程序异常问题。
#### sync 及 arbitrator 诊断
```
taos -n sync -P 6040 -h <fqdn of server>
taos -n sync -P 6042 -h <fqdn of server>
```
用来诊断 sync 端口是否工作正常,判断服务端 sync 模块是否成功工作。另外,-P 6042 用来诊断 arbitrator 是否配置正常,判断指定服务器的 arbitrator 是否能正常工作。
#### 服务端日志
taosd 服务端日志文件标志位 debugflag 默认为 131在 debug 时往往需要将其提升到 135 或 143 。
一旦设定为 135 或 143日志文件增长很快特别是写入、查询请求量较大时增长速度惊人。如合并保存日志很容易把日志内的关键信息如配置信息、错误信息等冲掉。为此服务端将重要信息日志与其他日志分开存放
- taosinfo 存放重要信息日志
- taosdlog 存放其他日志
其中taosinfo 日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。
taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。

View File

@ -34,16 +34,16 @@ taos> DESCRIBE meters;
- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time时间戳也可以是一个长整数表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数)
- 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降频操作down sampling的时间窗口interval时间单位还可以使用 n(自然月) 和 y(自然年)。
- Epoch Time时间戳也可以是一个长整数表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。
- 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降频操作down sampling的时间窗口interval时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒(从 2.1.5.0 版本开始支持纳秒精度)
在TDengine中普通表的数据模型中可使用以下 10 种数据类型。
| # | **类型** | **Bytes** | **说明** |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制) |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制)(从 2.1.5.0 版本开始支持纳秒精度 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7范围 [-3.4E38, 3.4E38] |
@ -206,7 +206,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。
说明可在like中使用通配符进行名称的匹配这一通配符字符串最长不能超过24字节。
说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
通配符匹配1'%'百分号匹配0到任意个字符2'\_'下划线匹配单个任意字符。
@ -389,7 +389,7 @@ INSERT INTO
INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33);
```
**注意:**
1在第二个例子中两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000而如果是在微秒精度设置下就需要写为 1626164208000000。
1在第二个例子中两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000而如果是在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000
2在使用“插入多条记录”方式写入数据时不能把第一列的时间戳取值都设为 NOW否则会导致语句中的多条记录使用相同的时间戳于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于NOW 函数在执行中会被解析为所在 SQL 语句的实际执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
3允许插入的最老记录的时间戳是相对于当前服务器时间减去配置的 keep 值(数据保留的天数);允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 days 值数据文件存储数据的时间跨度单位为天。keep 和 days 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。
@ -435,6 +435,17 @@ INSERT INTO
INSERT INTO d1001 FILE '/tmp/csvfile.csv';
```
- **插入来自文件的数据记录,并自动建表**
从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如:
```mysql
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
```
也可以在一条语句中向多个表以自动建表的方式插入记录。例如:
```mysql
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
```
**历史记录写入**可使用IMPORT或者INSERT命令IMPORT的语法功能与INSERT完全一样。
**说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中INSERT 语句是无效的,但是 d1001 仍会被创建。
@ -942,6 +953,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
### 选择函数
在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname这样就可以方便地知道被选出的值是源于哪个数据行的。
- **MIN**
```mysql
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
@ -1215,6 +1228,37 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 1 row(s) in set (0.001042s)
```
- **INTERP**
```mysql
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR})];
```
功能说明:返回表/超级表的指定时间截面、指定字段的记录。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
适用于:**表、超级表**。
说明:(从 2.0.15.0 版本开始新增此函数INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中条件语句里面可以附带更多的筛选条件例如标签、tbname。
限制INTERP 目前不支持 FILL(NEXT)。
示例:
```mysql
taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev);
interp(ts) | interp(f1) | interp(f2) | interp(f3) |
====================================================================
2017-07-14 10:42:00.005 | 5 | 9 | 6 |
Query OK, 1 row(s) in set (0.002912s)
taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev);
interp(ts) | interp(f1) | interp(f2) | interp(f3) |
====================================================================
2017-07-14 10:42:00.005 | 5 | 6 | 7 |
Query OK, 1 row(s) in set (0.002005s)
```
### 计算函数
- **DIFF**

View File

@ -6,7 +6,7 @@ TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open
### Install Grafana
TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
TDengine currently supports Grafana 6.2 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
https://grafana.com/grafana/download.
@ -64,7 +64,7 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`.
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:

View File

@ -218,8 +218,4 @@ use telegraf
使用telegraf这个数据库。然后执行show tablesdescribe table等命令详细查询下telegraf这个库里保存了些什么数据。
具体TDengine的查询语句可以参考[TDengine官方文档](https://www.taosdata.com/cn/documentation/taos-sql/)
## 接入多个监控对象
<<<<<<< HEAD
就像前面原理介绍的这个miniDevops的小系统已经提供了一个时序数据库和可视化系统对于多台机器的监控只需要将每台机器的telegraf或prometheus配置按上面所述修改就可以完成监控数据采集和可视化呈现了。
=======
就像前面原理介绍的这个miniDevops的小系统已经提供了一个时序数据库和可视化系统对于多台机器的监控只需要将每台机器的telegraf或prometheus配置按上面所述修改就可以完成监控数据采集和可视化呈现了。
>>>>>>> 740f82af58c4ecc2deecfa36fb1de4ef5ee55efc

View File

@ -29,15 +29,16 @@ extern "C" {
#include "tsched.h"
#include "tsclient.h"
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE))
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo)))
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
#pragma pack(push,1)
@ -221,7 +222,7 @@ void tscExprDestroy(SArray* pExprInfo);
int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, SExprInfo*** pExpr, int32_t* num);
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta, uint64_t id);
SColumn* tscColumnClone(const SColumn* src);
void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
@ -320,7 +321,7 @@ void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex);
bool hasMoreVnodesToTry(SSqlObj *pSql);
bool hasMoreClauseToTry(SSqlObj* pSql);
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta);
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeCachedMeta, uint64_t id);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
@ -359,7 +360,7 @@ bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx);
void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id);
void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id);
#ifdef __cplusplus
}

View File

@ -84,9 +84,14 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct SBoundColumn {
bool hasVal; // denote if current column has bound or not
int32_t offset; // all column offset value
int32_t offset; // all column offset value
int32_t toffset; // first part offset for SDataRow TODO: get offset from STSchema on future
uint8_t valStat; // denote if current column bound or not(0 means has val, 1 means no val)
} SBoundColumn;
typedef enum {
VAL_STAT_HAS = 0x0, // 0 means has val
VAL_STAT_NONE = 0x01, // 1 means no val
} EValStat;
typedef struct {
uint16_t schemaColIdx;
@ -99,32 +104,106 @@ typedef enum _COL_ORDER_STATUS {
ORDER_STATUS_ORDERED = 1,
ORDER_STATUS_DISORDERED = 2,
} EOrderStatus;
typedef struct SParsedDataColInfo {
int16_t numOfCols;
int16_t numOfBound;
int32_t * boundedColumns; // bounded column idx according to schema
uint16_t flen; // TODO: get from STSchema
uint16_t allNullLen; // TODO: get from STSchema
uint16_t extendedVarLen;
int32_t * boundedColumns; // bound column idx according to schema
SBoundColumn * cols;
SBoundIdxInfo *colIdxInfo;
int8_t orderStatus; // bounded columns:
int8_t orderStatus; // bound columns
} SParsedDataColInfo;
#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED)
#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED)
typedef struct {
SSchema * pSchema;
int16_t sversion;
int32_t flen;
uint16_t nCols;
void * buf;
void * pDataBlock;
SSubmitBlk *pSubmitBlk;
int32_t dataLen; // len of SDataRow
int32_t kvLen; // len of SKVRow
} SMemRowInfo;
typedef struct {
uint8_t memRowType;
uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need
TDRowTLenT dataRowInitLen;
TDRowTLenT kvRowInitLen;
SMemRowInfo *rowInfo;
} SMemRowBuilder;
typedef struct {
TDRowLenT allNullLen;
} SMemRowHelper;
typedef enum {
ROW_COMPARE_UNKNOWN = 0,
ROW_COMPARE_NEED = 1,
ROW_COMPARE_NO_NEED = 2,
} ERowCompareStat;
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
int32_t allNullLen);
void destroyMemRowBuilder(SMemRowBuilder *pBuilder);
/**
* @brief
*
* @param memRowType
* @param spd
* @param idx the absolute bound index of columns
* @return FORCE_INLINE
*/
static FORCE_INLINE void tscGetMemRowAppendInfo(SSchema *pSchema, uint8_t memRowType, SParsedDataColInfo *spd,
int32_t idx, int32_t *toffset, int16_t *colId) {
int32_t schemaIdx = 0;
if (IS_DATA_COL_ORDERED(spd)) {
schemaIdx = spd->boundedColumns[idx];
if (isDataRowT(memRowType)) {
*toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart
} else {
*toffset = idx * sizeof(SColIdx); // the offset of SColIdx
}
} else {
ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx);
schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx;
if (isDataRowT(memRowType)) {
*toffset = (spd->cols + schemaIdx)->toffset;
} else {
*toffset = ((spd->colIdxInfo + idx)->finalIdx) * sizeof(SColIdx);
}
}
*colId = pSchema[schemaIdx].colId;
}
/**
* @brief Applicable to consume by multi-columns
*
* @param row
* @param value
* @param isCopyVarData In some scenario, the varVal is copied to row directly before calling tdAppend***ColVal()
* @param colId
* @param colType
* @param idx index in SSchema
* @param pBuilder
* @param spd
* @return FORCE_INLINE
*/
static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder,
int32_t rowNum) {
tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset);
if (pBuilder->compareStat == ROW_COMPARE_NEED) {
SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum;
tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen);
}
}
// Applicable to consume by one row
static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen,
uint8_t compareStat) {
tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset);
if (compareStat == ROW_COMPARE_NEED) {
tdGetColAppendDeltaLen(value, colType, dataLen, kvLen);
}
}
typedef struct STableDataBlocks {
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
@ -146,7 +225,7 @@ typedef struct STableDataBlocks {
uint32_t numOfAllocedParams;
uint32_t numOfParams;
SParamInfo * params;
SMemRowHelper rowHelper;
SMemRowBuilder rowBuilder;
} STableDataBlocks;
typedef struct {
@ -368,7 +447,7 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
void destroyTableNameList(SInsertStatementParam* pInsertParam);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta, uint64_t id);
/**
* free query result of the sql object
@ -435,8 +514,398 @@ int16_t getNewResColId(SSqlCmd* pCmd);
int32_t schemaIdxCompar(const void *lhs, const void *rhs);
int32_t boundIdxCompar(const void *lhs, const void *rhs);
int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen);
int32_t getExtendedRowSize(STableComInfo *tinfo);
static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) {
ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize);
return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen;
}
static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) {
if (isDataRow(row)) {
if (kvLen < (dataLen * KVRatioConvert)) {
memRowSetConvert(row);
}
} else if (kvLen > dataLen) {
memRowSetConvert(row);
}
}
static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) {
memRowSetType(row, memRowType);
if (isDataRowT(memRowType)) {
dataRowSetVersion(memRowDataBody(row), pBlock->pTableMeta->sversion);
dataRowSetLen(memRowDataBody(row), (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBlock->boundColumnInfo.flen));
} else {
ASSERT(nBoundCols > 0);
memRowSetKvVersion(row, pBlock->pTableMeta->sversion);
kvRowSetNCols(memRowKvBody(row), nBoundCols);
kvRowSetLen(memRowKvBody(row), (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
}
}
/**
* TODO: Move to tdataformat.h and refactor when STSchema available.
* - fetch flen and toffset from STSChema and remove param spd
*/
static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols,
SParsedDataColInfo *spd) {
ASSERT(isKvRow(src));
SKVRow kvRow = memRowKvBody(src);
SDataRow dataRow = memRowDataBody(dest);
memRowSetType(dest, SMEM_ROW_DATA);
dataRowSetVersion(dataRow, memRowKvVersion(src));
dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + spd->flen));
int32_t kvIdx = 0;
for (int i = 0; i < nCols; ++i) {
SSchema *schema = pSchema + i;
void * val = tdGetKVRowValOfColEx(kvRow, schema->colId, &kvIdx);
tdAppendDataColVal(dataRow, val != NULL ? val : getNullValue(schema->type), true, schema->type,
(spd->cols + i)->toffset);
}
}
// TODO: Move to tdataformat.h and refactor when STSchema available.
static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, int nBoundCols,
SParsedDataColInfo *spd) {
ASSERT(isDataRow(src));
SDataRow dataRow = memRowDataBody(src);
SKVRow kvRow = memRowKvBody(dest);
memRowSetType(dest, SMEM_ROW_KV);
memRowSetKvVersion(kvRow, dataRowVersion(dataRow));
kvRowSetNCols(kvRow, nBoundCols);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
int32_t toffset = 0, kvOffset = 0;
for (int i = 0; i < nCols; ++i) {
if ((spd->cols + i)->valStat == VAL_STAT_HAS) {
SSchema *schema = pSchema + i;
toffset = (spd->cols + i)->toffset;
void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE);
tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset);
kvOffset += sizeof(SColIdx);
}
}
}
// TODO: Move to tdataformat.h and refactor when STSchema available.
static FORCE_INLINE void convertSMemRow(SMemRow dest, SMemRow src, STableDataBlocks *pBlock) {
STableMeta * pTableMeta = pBlock->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
SSchema * pSchema = tscGetTableSchema(pTableMeta);
SParsedDataColInfo *spd = &pBlock->boundColumnInfo;
ASSERT(dest != src);
if (isDataRow(src)) {
// TODO: Can we use pBlock -> numOfParam directly?
ASSERT(spd->numOfBound > 0);
convertToSKVRow(dest, src, pSchema, tinfo.numOfColumns, spd->numOfBound, spd);
} else {
convertToSDataRow(dest, src, pSchema, tinfo.numOfColumns, spd);
}
}
static bool isNullStr(SStrToken *pToken) {
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
}
static FORCE_INLINE int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
errno = 0;
*value = strtold(pToken->z, endPtr);
// not a valid integer number, return error
if ((*endPtr - pToken->z) != pToken->n) {
return TK_ILLEGAL;
}
return pToken->type;
}
static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str,
bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId,
int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) {
int64_t iv;
int32_t ret;
char * endptr = NULL;
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
}
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: { // bool
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
if (strncmp(pToken->z, "true", pToken->n) == 0) {
tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10);
tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset,
dataLen, kvLen, compareStat);
} else if (pToken->type == TK_FLOAT) {
double dv = strtod(pToken->z, NULL);
tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset,
dataLen, kvLen, compareStat);
} else {
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
}
}
break;
}
case TSDB_DATA_TYPE_TINYINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
} else if (!IS_VALID_TINYINT(iv)) {
return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
}
uint8_t tmpVal = (uint8_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_UTINYINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
} else if (!IS_VALID_UTINYINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
}
uint8_t tmpVal = (uint8_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_SMALLINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
} else if (!IS_VALID_SMALLINT(iv)) {
return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
}
int16_t tmpVal = (int16_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_USMALLINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
} else if (!IS_VALID_USMALLINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
}
uint16_t tmpVal = (uint16_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_INT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
} else if (!IS_VALID_INT(iv)) {
return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
}
int32_t tmpVal = (int32_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_UINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
} else if (!IS_VALID_UINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
}
uint32_t tmpVal = (uint32_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_BIGINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
} else if (!IS_VALID_BIGINT(iv)) {
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
}
tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_UBIGINT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
}
uint64_t tmpVal = (uint64_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_FLOAT:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
isnan(dv)) {
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
float tmpVal = (float)dv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_BINARY:
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost
if (pToken->type == TK_NULL) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else { // too long values will return invalid sql, not be truncated automatically
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
}
// STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
char *rowEnd = memRowEnd(row);
STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n);
tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_NCHAR:
if (pToken->type == TK_NULL) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
} else {
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
int32_t output = 0;
char * rowEnd = memRowEnd(row);
if (!taosMbsToUcs4(pToken->z, pToken->n, (char *)varDataVal(rowEnd), pSchema->bytes - VARSTR_HEADER_SIZE,
&output)) {
char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno));
return tscInvalidOperationMsg(msg, buf, pToken->z);
}
varDataSetLen(rowEnd, output);
tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
case TSDB_DATA_TYPE_TIMESTAMP: {
if (pToken->type == TK_NULL) {
if (primaryKey) {
// When building SKVRow primaryKey, we should not skip even with NULL value.
int64_t tmpVal = 0;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
} else {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
compareStat);
}
} else {
int64_t tmpVal;
if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
}
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
}
break;
}
}
return TSDB_CODE_SUCCESS;
}
#ifdef __cplusplus
}

View File

@ -351,7 +351,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pSql->pStream == NULL) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
if (pQueryInfo != NULL && TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self);
code = tsParseSql(pSql, false);
@ -381,7 +381,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
} else {
if (pSql->retryReason != TSDB_CODE_SUCCESS) {
tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self);
tscResetSqlCmd(pCmd, false);
pSql->retryReason = TSDB_CODE_SUCCESS;
} else {
tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self);

View File

@ -38,43 +38,60 @@ enum {
TSDB_USE_CLI_TS = 1,
};
static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
char *str, char **end);
int32_t getExtendedRowSize(STableComInfo *tinfo) {
return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns;
}
int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) {
pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta
if (pHelper->allNullLen == 0) {
for (uint16_t i = 0; i < nCols; ++i) {
uint8_t type = pSSchema[i].type;
int32_t typeLen = TYPE_BYTES[type];
pHelper->allNullLen += typeLen;
if (TSDB_DATA_TYPE_BINARY == type) {
pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
} else if (TSDB_DATA_TYPE_NCHAR == type) {
int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE;
pHelper->allNullLen += len;
}
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
int32_t allNullLen) {
ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols));
if (nRows > 0) {
// already init(bind multiple rows by single column)
if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) {
return TSDB_CODE_SUCCESS;
}
}
return 0;
}
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
errno = 0;
*value = strtold(pToken->z, endPtr);
// not a valid integer number, return error
if ((*endPtr - pToken->z) != pToken->n) {
return TK_ILLEGAL;
if (nBoundCols == 0) { // file input
pBuilder->memRowType = SMEM_ROW_DATA;
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS;
} else {
float boundRatio = ((float)nBoundCols / (float)nCols);
if (boundRatio < KVRatioKV) {
pBuilder->memRowType = SMEM_ROW_KV;
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS;
} else if (boundRatio > KVRatioData) {
pBuilder->memRowType = SMEM_ROW_DATA;
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS;
}
pBuilder->compareStat = ROW_COMPARE_NEED;
if (boundRatio < KVRatioPredict) {
pBuilder->memRowType = SMEM_ROW_KV;
} else {
pBuilder->memRowType = SMEM_ROW_DATA;
}
}
return pToken->type;
pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx);
if (nRows > 0) {
pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo));
if (pBuilder->rowInfo == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int i = 0; i < nRows; ++i) {
(pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen;
(pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen;
}
}
return TSDB_CODE_SUCCESS;
}
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
@ -146,10 +163,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return TSDB_CODE_SUCCESS;
}
static bool isNullStr(SStrToken* pToken) {
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
}
int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
int16_t timePrec) {
int64_t iv;
@ -400,342 +413,6 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId,
uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) {
payloadColSetId(payload, columnId);
payloadColSetType(payload, columnType);
memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen);
return valueLen;
}
static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart,
char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec,
TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen,
TDRowLenT *kvRowColLen) {
int64_t iv;
int32_t ret;
char * endptr = NULL;
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
}
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: { // bool
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
} else {
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
if (strncmp(pToken->z, "true", pToken->n) == 0) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE,
TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE,
TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else {
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10);
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else if (pToken->type == TK_FLOAT) {
double dv = strtod(pToken->z, NULL);
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else {
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
}
}
break;
}
case TSDB_DATA_TYPE_TINYINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
} else if (!IS_VALID_TINYINT(iv)) {
return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
}
uint8_t tmpVal = (uint8_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]);
}
break;
case TSDB_DATA_TYPE_UTINYINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
} else if (!IS_VALID_UTINYINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
}
uint8_t tmpVal = (uint8_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]);
}
break;
case TSDB_DATA_TYPE_SMALLINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
} else if (!IS_VALID_SMALLINT(iv)) {
return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
}
int16_t tmpVal = (int16_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]);
}
break;
case TSDB_DATA_TYPE_USMALLINT:
if (isNullStr(pToken)) {
*sizeAppend =
tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
} else if (!IS_VALID_USMALLINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
}
uint16_t tmpVal = (uint16_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]);
}
break;
case TSDB_DATA_TYPE_INT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
} else if (!IS_VALID_INT(iv)) {
return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
}
int32_t tmpVal = (int32_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]);
}
break;
case TSDB_DATA_TYPE_UINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
} else if (!IS_VALID_UINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
}
uint32_t tmpVal = (uint32_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]);
}
break;
case TSDB_DATA_TYPE_BIGINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
} else if (!IS_VALID_BIGINT(iv)) {
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
}
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv,
TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]);
}
break;
case TSDB_DATA_TYPE_UBIGINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
}
uint64_t tmpVal = (uint64_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]);
}
break;
case TSDB_DATA_TYPE_FLOAT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset);
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
isnan(dv)) {
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
float tmpVal = (float)dv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv,
TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]);
}
break;
case TSDB_DATA_TYPE_BINARY:
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost
if (pToken->type == TK_NULL) {
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES);
} else { // too long values will return invalid sql, not be truncated automatically
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
}
// STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n);
memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n);
*dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n);
}
break;
case TSDB_DATA_TYPE_NCHAR:
if (pToken->type == TK_NULL) {
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
} else {
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
int32_t output = 0;
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)),
pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno));
return tscInvalidOperationMsg(msg, buf, pToken->z);
}
varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output);
*dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t));
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output);
}
break;
case TSDB_DATA_TYPE_TIMESTAMP: {
if (pToken->type == TK_NULL) {
if (primaryKey) {
// When building SKVRow primaryKey, we should not skip even with NULL value.
int64_t tmpVal = 0;
*sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
} else {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_TIMESTAMP),
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
}
} else {
int64_t tmpVal;
if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
}
*sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId,
pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
}
break;
}
}
return TSDB_CODE_SUCCESS;
}
/*
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
@ -777,31 +454,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
int32_t index = 0;
SStrToken sToken = {0};
SMemRowHelper *pHelper = &pDataBlocks->rowHelper;
char * payload = pDataBlocks->pData + pDataBlocks->size;
char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header
SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo;
SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta);
STableMeta * pTableMeta = pDataBlocks->pTableMeta;
SSchema * schema = tscGetTableSchema(pTableMeta);
SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder;
int32_t dataLen = pBuilder->dataRowInitLen;
int32_t kvLen = pBuilder->kvRowInitLen;
bool isParseBindParam = false;
TDRowTLenT dataRowLen = pHelper->allNullLen;
TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE;
TDRowTLenT payloadValOffset = 0;
TDRowLenT colValOffset = 0;
ASSERT(dataRowLen > 0);
payloadSetNCols(payload, spd->numOfBound);
payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols
// payloadSetTLen(payload, payloadValOffset);
char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple
char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey
initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound);
// 1. set the parsed value from sql string
for (int i = 0; i < spd->numOfBound; ++i) {
// the start position in data block buffer of current value in sql
int32_t colIndex = spd->boundedColumns[i];
char *start = payload + spd->cols[colIndex].offset;
char *start = row + spd->cols[colIndex].offset;
SSchema *pSchema = &schema[colIndex]; // get colId here
@ -810,6 +480,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
*str += index;
if (sToken.type == TK_QUESTION) {
if (!isParseBindParam) {
isParseBindParam = true;
}
if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str);
}
@ -860,53 +533,44 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns.
TDRowLenT kvRowColLen = 0;
TDRowLenT colValAppended = 0;
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t toffset = -1;
int16_t colId = -1;
tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId);
if (!IS_DATA_COL_ORDERED(spd->orderStatus)) {
ASSERT(spd->colIdxInfo != NULL);
if(!isPrimaryKey) {
kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN);
} else {
ASSERT(spd->colIdxInfo[i].finalIdx == 0);
}
}
// the primary key locates in 1st column
int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str,
isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended,
&dataRowDeltaColLen, &kvRowColLen);
int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset,
colId, &dataLen, &kvLen, pBuilder->compareStat);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
if (isPrimaryKey) {
if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) {
TSKEY tsKey = memRowKey(row);
if (tsCheckTimestamp(pDataBlocks, (const char *)&tsKey) != TSDB_CODE_SUCCESS) {
tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
payloadColSetOffset(kvPrimaryKeyStart, colValOffset);
} else {
payloadColSetOffset(kvStart, colValOffset);
if (IS_DATA_COL_ORDERED(spd->orderStatus)) {
kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column
}
}
}
if (!isParseBindParam) {
// 2. check and set convert flag
if (pBuilder->compareStat == ROW_COMPARE_NEED) {
checkAndConvertMemRow(row, dataLen, kvLen);
}
colValOffset += colValAppended;
kvRowLen += kvRowColLen;
dataRowLen += dataRowDeltaColLen;
// 3. set the null value for the columns that do not assign values
if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) {
SDataRow dataRow = memRowDataBody(row);
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (spd->cols[i].valStat == VAL_STAT_NONE) {
tdAppendDataColVal(dataRow, getNullValue(schema[i].type), true, schema[i].type, spd->cols[i].toffset);
}
}
}
}
if (kvRowLen < dataRowLen) {
payloadSetType(payload, SMEM_ROW_KV);
} else {
payloadSetType(payload, SMEM_ROW_DATA);
}
*len = (int32_t)(payloadValOffset + colValOffset);
payloadSetTLen(payload, *len);
*len = getExtendedRowSize(pDataBlocks);
return TSDB_CODE_SUCCESS;
}
@ -957,11 +621,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
int32_t precision = tinfo.precision;
int32_t extendedRowSize = getExtendedRowSize(&tinfo);
initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta),
tscGetNumOfColumns(pDataBlock->pTableMeta), 0);
int32_t extendedRowSize = getExtendedRowSize(pDataBlock);
if (TSDB_CODE_SUCCESS !=
(code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound,
pDataBlock->boundColumnInfo.allNullLen))) {
return code;
}
while (1) {
index = 0;
sToken = tStrGetToken(*str, &index, false);
@ -991,9 +657,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
index = 0;
sToken = tStrGetToken(*str, &index, false);
if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return code;
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
}
*str += index;
@ -1012,19 +676,37 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) {
pColInfo->numOfCols = numOfCols;
pColInfo->numOfBound = numOfCols;
pColInfo->orderStatus = ORDER_STATUS_ORDERED;
pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode
pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t));
pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn));
pColInfo->colIdxInfo = NULL;
pColInfo->flen = 0;
pColInfo->allNullLen = 0;
int32_t nVar = 0;
for (int32_t i = 0; i < pColInfo->numOfCols; ++i) {
uint8_t type = pSchema[i].type;
if (i > 0) {
pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset;
pColInfo->cols[i].toffset = pColInfo->flen;
}
pColInfo->flen += TYPE_BYTES[type];
switch (type) {
case TSDB_DATA_TYPE_BINARY:
pColInfo->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
++nVar;
break;
case TSDB_DATA_TYPE_NCHAR:
pColInfo->allNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
++nVar;
break;
default:
break;
}
pColInfo->cols[i].hasVal = true;
pColInfo->boundedColumns[i] = i;
}
pColInfo->allNullLen += pColInfo->flen;
pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT));
}
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) {
@ -1124,35 +806,29 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
if (dataBuf->tsSource == TSDB_USE_SERVER_TS) {
assert(dataBuf->ordered);
}
// allocate memory
// allocate memory
size_t nAlloc = nRows * sizeof(SBlockKeyTuple);
if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) {
size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple);
char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp;
pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc;
}
memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc);
int32_t extendedRowSize = getExtendedRowSize(dataBuf);
SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
char * pBlockData = pBlocks->data;
TDRowTLenT totolPayloadTLen = 0;
TDRowTLenT payloadTLen = 0;
int n = 0;
while (n < nRows) {
pBlkKeyTuple->skey = payloadTSKey(pBlockData);
pBlkKeyTuple->skey = memRowKey(pBlockData);
pBlkKeyTuple->payloadAddr = pBlockData;
payloadTLen = payloadTLen(pBlockData);
#if 0
ASSERT(payloadNCols(pBlockData) <= 4096);
ASSERT(payloadTLen(pBlockData) < 65536);
#endif
totolPayloadTLen += payloadTLen;
// next loop
pBlockData += payloadTLen;
pBlockData += extendedRowSize;
++pBlkKeyTuple;
++n;
}
@ -1169,7 +845,6 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
TSKEY tj = (pBlkKeyTuple + j)->skey;
if (ti == tj) {
totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j);
++j;
continue;
}
@ -1185,17 +860,15 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
pBlocks->numOfRows = i + 1;
}
dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen;
dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize;
dataBuf->prevTS = INT64_MIN;
return 0;
}
static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
int32_t maxNumOfRows;
int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows);
int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(dataBuf), &maxNumOfRows);
if (TSDB_CODE_SUCCESS != code) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -1533,7 +1206,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
pColInfo->numOfBound = 0;
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols);
for (int32_t i = 0; i < nCols; ++i) {
pColInfo->cols[i].hasVal = false;
pColInfo->cols[i].valStat = VAL_STAT_NONE;
}
int32_t code = TSDB_CODE_SUCCESS;
@ -1572,12 +1245,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
int32_t nScanned = 0, t = lastColIdx + 1;
while (t < nCols) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
if (pColInfo->cols[t].hasVal == true) {
if (pColInfo->cols[t].valStat == VAL_STAT_HAS) {
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
goto _clean;
}
pColInfo->cols[t].hasVal = true;
pColInfo->cols[t].valStat = VAL_STAT_HAS;
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
++pColInfo->numOfBound;
findColumnIndex = true;
@ -1595,12 +1268,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
int32_t nRemain = nCols - nScanned;
while (t < nRemain) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
if (pColInfo->cols[t].hasVal == true) {
if (pColInfo->cols[t].valStat == VAL_STAT_HAS) {
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
goto _clean;
}
pColInfo->cols[t].hasVal = true;
pColInfo->cols[t].valStat = VAL_STAT_HAS;
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
++pColInfo->numOfBound;
findColumnIndex = true;
@ -1835,7 +1508,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
if (dataBuf->boundColumnInfo.cols[0].valStat == VAL_STAT_NONE) {
code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL);
goto _clean;
}
@ -1922,7 +1595,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) {
tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
tscResetSqlCmd(pCmd, true);
tscResetSqlCmd(pCmd, true, pSql->self);
pSql->parseRetry++;
if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) {
@ -1939,7 +1612,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if (ret == TSDB_CODE_TSC_INVALID_OPERATION && pSql->parseRetry < 1 && sqlInfo.type == TSDB_SQL_SELECT) {
tscDebug("0x%"PRIx64 " parse query sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
tscResetSqlCmd(pCmd, true);
tscResetSqlCmd(pCmd, true, pSql->self);
pSql->parseRetry++;
ret = tscValidateSqlInfo(pSql, &sqlInfo);
@ -2046,15 +1719,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
goto _error;
}
tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows);
tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta),
tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0);
if (TSDB_CODE_SUCCESS !=
(ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams,
pTableDataBlock->boundColumnInfo.allNullLen))) {
goto _error;
}
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {

View File

@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
SSchema *schema = (SSchema*)pBlock->pTableMeta->schema;
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null
for (int32_t n = 0; n < rowNum; ++n) {
char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset;
@ -1694,7 +1694,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
pCmd->insertParam.pTableBlockHashList = NULL;
tscResetSqlCmd(pCmd, false);
tscResetSqlCmd(pCmd, false, pSql->self);
pCmd->insertParam.pTableBlockHashList = hashList;
}

View File

@ -18,11 +18,11 @@
#include "tsclient.h"
#include "tsocket.h"
#include "ttimer.h"
#include "tutil.h"
#include "taosmsg.h"
#include "tcq.h"
#include "taos.h"
#include "tscUtil.h"
void tscSaveSlowQueryFp(void *handle, void *tmrId);
TAOS *tscSlowQueryConn = NULL;
@ -227,16 +227,16 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
pHeartbeat->numOfQueries = 0;
SQueryDesc *pQdesc = (SQueryDesc *)pHeartbeat->pData;
// We extract the lock to tscBuildHeartBeatMsg function.
int64_t now = taosGetTimestampMs();
SSqlObj *pSql = pObj->sqlList;
while (pSql) {
/*
* avoid sqlobj may not be correctly removed from sql list
@ -248,41 +248,55 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
}
tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql));
pQdesc->stime = htobe64(pSql->stime);
pQdesc->queryId = htonl(pSql->queryId);
//pQdesc->useconds = htobe64(pSql->res.useconds);
pQdesc->stime = htobe64(pSql->stime);
pQdesc->queryId = htonl(pSql->queryId);
pQdesc->useconds = htobe64(now - pSql->stime);
pQdesc->qId = htobe64(pSql->res.qId);
pQdesc->qId = htobe64(pSql->res.qId);
pQdesc->sqlObjId = htobe64(pSql->self);
pQdesc->pid = pHeartbeat->pid;
pQdesc->stableQuery = pSql->cmd.pQueryInfo->stableQuery;
pQdesc->pid = pHeartbeat->pid;
pQdesc->numOfSub = pSql->subState.numOfSub;
// todo race condition
pQdesc->stableQuery = 0;
char *p = pQdesc->subSqlInfo;
int32_t remainLen = sizeof(pQdesc->subSqlInfo);
if (pQdesc->numOfSub == 0) {
snprintf(p, remainLen, "N/A");
} else {
int32_t len;
for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i,
pSql->pSubs[i]->self,
pSql->subState.states[i] ? 'C' : 'I');
if (len > remainLen) {
break;
// SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
// if (pQueryInfo != NULL) {
// pQdesc->stableQuery = (pQueryInfo->stableQuery)?1:0;
// } else {
// pQdesc->stableQuery = 0;
// }
if (pSql->pSubs != NULL && pSql->subState.states != NULL) {
for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
SSqlObj *psub = pSql->pSubs[i];
int64_t self = (psub != NULL)? psub->self : 0;
int32_t len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i, self, pSql->subState.states[i] ? 'C' : 'I');
if (len > remainLen) {
break;
}
remainLen -= len;
p += len;
}
remainLen -= len;
p += len;
}
}
pQdesc->numOfSub = htonl(pQdesc->numOfSub);
pQdesc->numOfSub = htonl(pQdesc->numOfSub);
taosGetFqdn(pQdesc->fqdn);
pHeartbeat->numOfQueries++;
pQdesc++;
pSql = pSql->next;
if (pHeartbeat->numOfQueries >= allocedQueriesNum) break;
if (pHeartbeat->numOfQueries >= allocedQueriesNum) {
break;
}
}
pHeartbeat->numOfStreams = 0;

View File

@ -1944,20 +1944,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
if (pQueryInfo == NULL) {
return false;
}
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY
&& (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) {
return false;
}
if (tscNumOfExprs(pQueryInfo) == 1){
return true;
}
return false;
}
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
@ -2047,8 +2033,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one column or tag";
const char* msg4 = "not support distinct mixed with proj/agg func";
const char* msg5 = "invalid function name";
const char* msg6 = "not support distinct mixed with join";
const char* msg7 = "not support distinct mixed with groupby";
const char* msg8 = "not support distinct in nest query";
// too many result columns not support order by in query
if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) {
@ -2060,17 +2049,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
bool hasDistinct = false;
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
bool hasAgg = false;
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
int32_t distIdx = -1;
for (int32_t i = 0; i < numOfExpr; ++i) {
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i);
if (hasDistinct == false) {
hasDistinct = (pItem->distinct == true);
hasDistinct = (pItem->distinct == true);
distIdx = hasDistinct ? i : -1;
}
int32_t type = pItem->pNode->type;
if (type == SQL_NODE_SQLFUNCTION) {
hasAgg = true;
if (hasDistinct) break;
pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
SUdfInfo* pUdfInfo = NULL;
if (pItem->pNode->functionId < 0) {
@ -2106,10 +2100,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
}
//TODO(dengyihao), refactor as function
//handle distinct func mixed with other func
if (hasDistinct == true) {
if (!isValidDistinctSql(pQueryInfo) ) {
if (distIdx != 0 || hasAgg) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (joinQuery) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
if (pQueryInfo->pDownstream != NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
pQueryInfo->distinct = true;
}
@ -3725,7 +3731,8 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->Expr.paramList, &pVal, colType, timePrecision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen);
pColumnFilter->len = pVal->nLen;
pColumnFilter->filterstr = 1;
memcpy((char *)(pColumnFilter->pz), (char *)(pVal->pz), pVal->nLen);
@ -5746,36 +5753,41 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
if (isTopBottomQuery(pQueryInfo)) {
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
} else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column
pQueryInfo->order.orderColId = INT32_MIN;
} else { // in case of select tbname from super_table, the default order column can not be the primary ts column
pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro
}
/* for super table query, set default ascending order for group output */
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
}
if (pQueryInfo->distinct) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) {
const char* msg0 = "only support order by primary timestamp";
const char* msg1 = "invalid column name";
const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed";
const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed";
const char* msg0 = "only one column allowed in orderby";
const char* msg1 = "invalid column name in orderby clause";
const char* msg2 = "too many order by columns";
const char* msg3 = "only primary timestamp/tbname/first tag in groupby clause allowed";
const char* msg4 = "only tag in groupby clause allowed in order clause";
const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column";
const char* msg6 = "only primary timestamp allowed as the second order column";
const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column";
const char* msg8 = "only column in groupby clause allowed as order column";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pQueryInfo->distinct == true) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
pQueryInfo->order.orderColId = 0;
return TSDB_CODE_SUCCESS;
}
if (pSqlNode->pSortOrder == NULL) {
if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS;
}
SArray* pSortorder = pSqlNode->pSortOrder;
char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
SArray* pSortOrder = pSqlNode->pSortOrder;
/*
* for table query, there is only one or none order option is allowed, which is the
@ -5783,19 +5795,19 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
*
* for super table query, the order option must be less than 3.
*/
size_t size = taosArrayGetSize(pSortorder);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
size_t size = taosArrayGetSize(pSortOrder);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
if (size > 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
return invalidOperationMsg(pMsgBuf, msg0);
}
} else {
if (size > 2) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return invalidOperationMsg(pMsgBuf, msg2);
}
}
// handle the first part of order by
tVariant* pVar = taosArrayGet(pSortorder, 0);
tVariant* pVar = taosArrayGet(pSortOrder, 0);
// e.g., order by 1 asc, return directly with out further check.
if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
@ -5807,7 +5819,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return invalidOperationMsg(pMsgBuf, msg1);
}
bool orderByTags = false;
@ -5819,7 +5831,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg4);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
@ -5840,13 +5852,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
orderByGroupbyCol = true;
}
}
if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return invalidOperationMsg(pMsgBuf, msg3);
} else { // order by top/bottom result value column is not supported in case of interval query.
assert(!(orderByTags && orderByTS && orderByGroupbyCol));
}
size_t s = taosArrayGetSize(pSortorder);
size_t s = taosArrayGetSize(pSortOrder);
if (s == 1) {
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@ -5865,7 +5878,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pExpr = tscExprGet(pQueryInfo, 1);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg5);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
@ -5883,9 +5896,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
addPrimaryTsColIntoResult(pQueryInfo, pCmd);
}
}
}
if (s == 2) {
} else {
tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@ -5902,22 +5913,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
tVariant* pVar2 = &pItem->pVar;
SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg6);
} else {
tVariantListItem* p1 = taosArrayGet(pSortorder, 1);
tVariantListItem* p1 = taosArrayGet(pSortOrder, 1);
pQueryInfo->order.order = p1->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
} else { // meter query
if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table
if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
@ -5925,21 +5937,24 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
validOrder = (pColIndex->colIndex == index.columnIndex);
}
if (!validOrder) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg7);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
}
if (isTopBottomQuery(pQueryInfo)) {
bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
validOrder = (pColIndex->colIndex == index.columnIndex);
if (pColIndex->colIndex == index.columnIndex) {
return invalidOperationMsg(pMsgBuf, msg8);
}
} else {
/* order of top/bottom query in interval is not valid */
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
@ -5947,13 +5962,8 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pExpr = tscExprGet(pQueryInfo, 1);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg5);
}
validOrder = true;
}
if (!validOrder) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
@ -5963,6 +5973,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return TSDB_CODE_SUCCESS;
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else {
// handle the temp table order by clause. You can order by any single column in case of the temp table, created by
// inner subquery.
assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1);
if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
@ -8666,12 +8688,12 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
pSub->pUdfInfo = pUdfInfo;
pSub->udfCopy = true;
pSub->pDownstream = pQueryInfo;
int32_t code = validateSqlNode(pSql, p, pSub);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pSub->pDownstream = pQueryInfo;
// create dummy table meta info
STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
@ -8730,8 +8752,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select";
int32_t code = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -8753,7 +8774,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
if (pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) {
clearAllTableMetaInfo(pQueryInfo, false);
clearAllTableMetaInfo(pQueryInfo, false, pSql->self);
pQueryInfo->numOfTables = 0;
// parse the subquery in the first place
@ -8781,6 +8802,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, timeWindowQuery, true) !=
TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@ -9022,8 +9044,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
//pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);

View File

@ -409,7 +409,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) {
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) {
// do nothing in case of super table subquery
} else {
pSql->retry += 1;
@ -502,10 +502,22 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
}
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
if (pRes->code == TSDB_CODE_RPC_FQDN_ERROR) {
tscAllocPayload(pCmd, TSDB_FQDN_LEN + 64);
// handle three situation
// 1. epset retry, only return last failure ep
// 2. no epset retry, like 'taos -h invalidFqdn', return invalidFqdn
// 3. other situation, no expected
if (pEpSet) {
char buf[TSDB_FQDN_LEN + 64] = {0};
tscAllocPayload(pCmd, sizeof(buf));
sprintf(tscGetErrorMsgPayload(pCmd), "%s\"%s\"", tstrerror(pRes->code),pEpSet->fqdn[(pEpSet->inUse)%(pEpSet->numOfEps)]);
} else if (pCmd->command >= TSDB_SQL_MGMT) {
SRpcEpSet tEpset;
SRpcCorEpSet *pCorEpSet = pSql->pTscObj->tscCorMgmtEpSet;
taosCorBeginRead(&pCorEpSet->version);
tEpset = pCorEpSet->epSet;
taosCorEndRead(&pCorEpSet->version);
sprintf(tscGetErrorMsgPayload(pCmd), "%s\"%s\"", tstrerror(pRes->code),tEpset.fqdn[(tEpset.inUse)%(tEpset.numOfEps)]);
} else {
sprintf(tscGetErrorMsgPayload(pCmd), "%s", tstrerror(pRes->code));
}
@ -525,6 +537,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
int64_t st = taosGetTimestampUs();
SSchedMsg schedMsg = {0};
schedMsg.fp = doProcessMsgFromServer;
@ -543,6 +556,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
int64_t et = taosGetTimestampUs();
if (et - st > 100) {
tscDebug("add message to task queue, elapsed time:%"PRId64, et - st);
}
}
int doBuildAndSendMsg(SSqlObj *pSql) {
@ -897,16 +915,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SQueryAttr query = {{0}};
tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql);
query.vgId = pTableMeta->vgId;
SArray* tableScanOperator = createTableScanPlan(&query);
SArray* queryOperator = createExecOperatorPlan(&query);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));
@ -2269,6 +2287,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
pMsg = buf;
}
if (pParentCmd->pTableMetaMap == NULL) {
pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
}
for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) {
STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg;
int32_t code = tableMetaMsgConvert(pMetaMsg);
@ -2605,7 +2627,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) {
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
tscRemoveCachedTableMeta(pTableMetaInfo, pSql->self);
tfree(pTableMetaInfo->pTableMeta);
return 0;
}
@ -2990,13 +3012,11 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
tscResetSqlCmd(pCmd, true, pSql->self);
pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
SArray* vgroupList = taosArrayInit(1, POINTER_BYTES);
char* n = strdup(name);

View File

@ -113,7 +113,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
pQueryInfo->command = TSDB_SQL_SELECT;
pSql->fp = tscProcessStreamQueryCallback;
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
executeQuery(pSql, pQueryInfo);
tscIncStreamExecutionCount(pStream);
@ -142,6 +142,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
if(pSql == NULL) {
return ;
}
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
tscDebug("0x%"PRIx64" add into timer", pSql->self);
@ -186,14 +187,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
// launch stream computing in a new thread
SSchedMsg schedMsg = { 0 };
schedMsg.fp = tscProcessStreamLaunchQuery;
SSchedMsg schedMsg = {0};
schedMsg.fp = tscProcessStreamLaunchQuery;
schedMsg.ahandle = pStream;
schedMsg.thandle = (void *)1;
schedMsg.msg = NULL;
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
}
static void cbParseSql(void* param, TAOS_RES* res, int code);
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
@ -201,24 +204,26 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
pStream, numOfRows, retryDelay);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0);
SSqlObj* pSql = pStream->pSql;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
tscFreeSqlResult(pSql);
tscFreeSubobj(pSql);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_SUCCESS) {
cbParseSql(pStream, pSql, code);
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tscDebug("0x%"PRIx64" CQ taso_open_stream IN Process", pSql->self);
} else {
tscError("0x%"PRIx64" open stream failed, code:%s", pSql->self, tstrerror(code));
taosReleaseRef(tscObjRef, pSql->self);
free(pStream);
}
tfree(pTableMetaInfo->pTableMeta);
tscFreeSqlResult(pStream->pSql);
tscFreeSubobj(pStream->pSql);
tfree(pStream->pSql->pSubs);
pStream->pSql->subState.numOfSub = 0;
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
return;
// tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
// return;
}
taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param);
@ -555,7 +560,6 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
pStream->fp(pStream->param, NULL, NULL);
return;
}
@ -582,9 +586,10 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
// set stime with ltime if ltime > stime
const char* dstTable = pStream->dstTable? pStream->dstTable: "";
tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime);
tscDebug("0x%"PRIx64" CQ table %s ltime is %"PRId64, pSql->self, dstTable, pStream->ltime);
if(pStream->ltime != INT64_MIN && pStream->ltime > pStream->stime) {
tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime);
tscWarn("0x%"PRIx64" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime > 0", pSql->self, dstTable, pStream->stime, pStream->ltime);
pStream->stime = pStream->ltime;
}
@ -592,7 +597,6 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
pCmd->command = TSDB_SQL_SELECT;
tscAddIntoStreamList(pStream);
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("0x%"PRIx64" stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql->self,
@ -659,10 +663,9 @@ void cbParseSql(void* param, TAOS_RES* res, int code) {
char sql[128] = "";
sprintf(sql, "select last_row(*) from %s;", pStream->dstTable);
taos_query_a(pSql->pTscObj, sql, fpStreamLastRow, param);
return ;
}
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
int64_t stime, void *param, void (*callback)(void *), void* cqhandle) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) return NULL;
@ -697,14 +700,12 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pStream->param = param;
pStream->pSql = pSql;
pStream->cqhandle = cqhandle;
pSql->pStream = pStream;
pSql->param = pStream;
pSql->maxRetry = TSDB_MAX_REPLICA;
tscSetStreamDestTable(pStream, dstTable);
pSql->pStream = pStream;
pSql->param = pStream;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
@ -725,14 +726,13 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pSql->fp = cbParseSql;
pSql->fetchFp = cbParseSql;
registerSqlObj(pSql);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_SUCCESS) {
cbParseSql(pStream, pSql, code);
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tscDebug(" CQ taso_open_stream IN Process. sql=%s", sqlstr);
tscDebug("0x%"PRIx64" CQ taso_open_stream IN Process", pSql->self);
} else {
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
taosReleaseRef(tscObjRef, pSql->self);
@ -743,7 +743,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
return pStream;
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
int64_t stime, void *param, void (*callback)(void *)) {
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback, NULL);
}

View File

@ -2404,8 +2404,8 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
tscColumnCopy(x, pCol);
} else {
SColumn *p = tscColumnClone(pCol);
taosArrayPush(pNewQueryInfo->colList, &p);
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
}
}
}
@ -2727,16 +2727,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
int32_t code = pParentSql->res.code;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
// remove the cached tableMeta and vgroup id list, and then parse the sql again
SSqlCmd* pParentCmd = &pParentSql->cmd;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0);
tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
tscResetSqlCmd( &pParentSql->cmd, true, pParentSql->self);
pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap);
pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++;
pParentSql->res.code = TSDB_CODE_SUCCESS;
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
@ -3040,7 +3034,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(code == taos_errno(pSql));
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID && code != TSDB_CODE_VND_INVALID_VGROUP_ID)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
int32_t sent = 0;
@ -3151,7 +3145,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
numOfFailed += 1;
// clean up tableMeta in cache
tscFreeQueryInfo(&pSql->cmd, false);
tscFreeQueryInfo(&pSql->cmd, false, pSql->self);
SQueryInfo* pQueryInfo = tscGetQueryInfoS(&pSql->cmd);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, 0);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
@ -3173,7 +3167,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
pParentObj->res.code = TSDB_CODE_SUCCESS;
tscResetSqlCmd(&pParentObj->cmd, false);
tscResetSqlCmd(&pParentObj->cmd, false, pParentObj->self);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.

View File

@ -821,17 +821,22 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* p
// filter data if needed
if (pFilterInfo) {
//doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
doSetFilterColInfo(pFilterInfo, pBlock);
filterSetColFieldData(pFilterInfo, pBlock->info.numOfCols, pBlock->pDataBlock);
bool gotNchar = false;
filterConverNcharColumns(pFilterInfo, pBlock->info.rows, &gotNchar);
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
int8_t* p = NULL;
//bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
bool all = filterExecute(pFilterInfo, pBlock->info.rows, p);
bool all = filterExecute(pFilterInfo, pBlock->info.rows, &p, NULL, 0);
if (gotNchar) {
filterFreeNcharColumns(pFilterInfo);
}
if (!all) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
if (p) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
} else {
pBlock->info.rows = 0;
pBlock->pBlockStatis = NULL; // clean the block statistics info
}
}
tfree(p);
@ -1228,11 +1233,9 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
if (pCond && pCond->cond) {
createQueryFilter(pCond->cond, pCond->len, &pFilters);
}
//createInputDataFlterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
}
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
pOutput->precision = pSqlObjList[0]->res.precision;
SSchema* schema = NULL;
@ -1332,12 +1335,13 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
}
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeCachedMeta, uint64_t id) {
if (pCmd == NULL) {
return;
}
SQueryInfo* pQueryInfo = pCmd->pQueryInfo;
while(pQueryInfo != NULL) {
SQueryInfo* p = pQueryInfo->sibling;
@ -1346,7 +1350,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
SQueryInfo* pUpQueryInfo = taosArrayGetP(pQueryInfo->pUpstream, i);
freeQueryInfoImpl(pUpQueryInfo);
clearAllTableMetaInfo(pUpQueryInfo, removeMeta);
clearAllTableMetaInfo(pUpQueryInfo, removeCachedMeta, id);
if (pUpQueryInfo->pQInfo != NULL) {
qDestroyQueryInfo(pUpQueryInfo->pQInfo);
pUpQueryInfo->pQInfo = NULL;
@ -1362,7 +1366,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
}
freeQueryInfoImpl(pQueryInfo);
clearAllTableMetaInfo(pQueryInfo, removeMeta);
clearAllTableMetaInfo(pQueryInfo, removeCachedMeta, id);
if (pQueryInfo->pQInfo != NULL) {
qDestroyQueryInfo(pQueryInfo->pQInfo);
@ -1391,7 +1395,7 @@ void destroyTableNameList(SInsertStatementParam* pInsertParam) {
tfree(pInsertParam->pTableNameList);
}
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta, uint64_t id) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@ -1405,19 +1409,8 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
tfree(pCmd->insertParam.tagData.data);
pCmd->insertParam.tagData.dataLen = 0;
tscFreeQueryInfo(pCmd, clearCachedMeta);
if (pCmd->pTableMetaMap != NULL) {
STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
while (p) {
taosArrayDestroy(p->vgroupIdList);
tfree(p->pTableMeta);
p = taosHashIterate(pCmd->pTableMetaMap, p);
}
taosHashCleanup(pCmd->pTableMetaMap);
pCmd->pTableMetaMap = NULL;
}
tscFreeQueryInfo(pCmd, clearCachedMeta, id);
pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
}
void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) {
@ -1513,8 +1506,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscFreeMetaSqlObj(&pSql->metaRid);
tscFreeMetaSqlObj(&pSql->svgroupRid);
tscFreeSubobj(pSql);
SSqlCmd* pCmd = &pSql->cmd;
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_GLOBALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
@ -1522,6 +1513,8 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscRemoveFromSqlList(pSql);
}
tscFreeSubobj(pSql);
pSql->signature = NULL;
pSql->fp = NULL;
tfree(pSql->sqlstr);
@ -1532,9 +1525,8 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->self = 0;
tscFreeSqlResult(pSql);
tscResetSqlCmd(pCmd, false);
tscResetSqlCmd(pCmd, false, pSql->self);
memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload);
pCmd->allocSize = 0;
@ -1808,101 +1800,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
return TSDB_CODE_SUCCESS;
}
static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
SSchema* pSchema = pBuilder->pSchema;
char* p = (char*)pBuilder->buf;
int toffset = 0;
uint16_t nCols = pBuilder->nCols;
uint8_t memRowType = payloadType(p);
uint16_t nColsBound = payloadNCols(p);
if (pBuilder->nCols <= 0 || nColsBound <= 0) {
return NULL;
}
char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p));
SMemRow* memRow = (SMemRow)pBuilder->pDataBlock;
memRowSetType(memRow, memRowType);
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
* |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
* +-----------+----------+----------+--------------------------------------|--------------------|
* | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
* +-----------+----------+----------+--------------------------------------+--------------------|
* 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
* 2. dataTLen: total length including the header and body.
*/
if (memRowType == SMEM_ROW_DATA) {
SDataRow trow = (SDataRow)memRowDataBody(memRow);
dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen));
dataRowSetVersion(trow, pBuilder->sversion);
p = (char*)payloadBody(pBuilder->buf);
uint16_t i = 0, j = 0;
while (j < nCols) {
if (i >= nColsBound) {
break;
}
int16_t colId = payloadColId(p);
if (colId == pSchema[j].colId) {
// ASSERT(payloadColType(p) == pSchema[j].type);
tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
p = payloadNextCol(p);
++i;
++j;
} else if (colId < pSchema[j].colId) {
p = payloadNextCol(p);
++i;
} else {
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
}
while (j < nCols) {
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
#if 0 // no need anymore
while (i < nColsBound) {
p = payloadNextCol(p);
++i;
}
#endif
} else if (memRowType == SMEM_ROW_KV) {
SKVRow kvRow = (SKVRow)memRowKvBody(memRow);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound));
kvRowSetNCols(kvRow, nColsBound);
memRowSetKvVersion(memRow, pBuilder->sversion);
p = (char*)payloadBody(pBuilder->buf);
int i = 0;
while (i < nColsBound) {
int16_t colId = payloadColId(p);
uint8_t colType = payloadColType(p);
tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset);
//toffset += sizeof(SColIdx);
p = payloadNextCol(p);
++i;
}
} else {
ASSERT(0);
}
int32_t rowTLen = memRowTLen(memRow);
pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row
pBuilder->pSubmitBlk->dataLen += rowTLen;
return memRow;
}
// Erase the empty space reserved for binary data
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam,
SBlockKeyTuple* blkKeyTuple) {
@ -1934,10 +1831,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
int32_t schemaSize = sizeof(STColumn) * numOfCols;
pBlock->schemaLen = schemaSize;
} else {
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
flen += TYPE_BYTES[pSchema[j].type];
if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
flen += TYPE_BYTES[pSchema[j].type];
}
}
pBlock->schemaLen = 0;
}
@ -1964,18 +1862,19 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
pBlock->dataLen += memRowTLen(memRow);
}
} else {
SMemRowBuilder rowBuilder;
rowBuilder.pSchema = pSchema;
rowBuilder.sversion = pTableMeta->sversion;
rowBuilder.flen = flen;
rowBuilder.nCols = tinfo.numOfColumns;
rowBuilder.pDataBlock = pDataBlock;
rowBuilder.pSubmitBlk = pBlock;
rowBuilder.buf = p;
for (int32_t i = 0; i < numOfRows; ++i) {
rowBuilder.buf = (blkKeyTuple + i)->payloadAddr;
tdGenMemRowFromBuilder(&rowBuilder);
char* payload = (blkKeyTuple + i)->payloadAddr;
if (isNeedConvertRow(payload)) {
convertSMemRow(pDataBlock, payload, pTableDataBlock);
TDRowTLenT rowTLen = memRowTLen(pDataBlock);
pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
pBlock->dataLen += rowTLen;
} else {
TDRowTLenT rowTLen = memRowTLen(payload);
memcpy(pDataBlock, payload, rowTLen);
pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
pBlock->dataLen += rowTLen;
}
}
}
@ -1988,9 +1887,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE;
int32_t columns = tscGetNumOfColumns(pTableMeta);
int32_t columns = tscGetNumOfColumns(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
for(int32_t i = 0; i < columns; i++) {
for (int32_t i = 0; i < columns; i++) {
if (IS_VAR_DATA_TYPE((pSchema + i)->type)) {
result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
}
@ -2036,7 +1935,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
if (pBlocks->numOfRows > 0) {
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0;
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
@ -2049,7 +1948,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
return ret;
}
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize +
sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) {
dataBuf->nAllocSize = (uint32_t)(destSize * 1.5);
@ -2093,7 +1993,9 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
}
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
int32_t len = pBlocks->numOfRows *
(isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) +
sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);
@ -3469,20 +3371,15 @@ SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
return pa;
}
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta, uint64_t id) {
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tscRemoveCachedTableMeta(pTableMetaInfo, id);
}
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
tscClearTableMetaInfo(pTableMetaInfo);
free(pTableMetaInfo);
}
tfree(pQueryInfo->pTableMetaInfo);
@ -3549,10 +3446,12 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo) {
}
tfree(pTableMetaInfo->pTableMeta);
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscColumnListDestroy(pTableMetaInfo->tagColList);
pTableMetaInfo->tagColList = NULL;
free(pTableMetaInfo);
}
void tscResetForNextRetrieve(SSqlRes* pRes) {
@ -3722,7 +3621,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->distinct = pQueryInfo->distinct;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
if (pNewQueryInfo->buf == NULL) {
@ -3950,13 +3850,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
// todo refactor
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
SSqlCmd* pParentCmd = &pParentSql->cmd;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0);
tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap);
pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++;
@ -3975,7 +3869,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
return;
}
SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd);
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
executeQuery(pParentSql, pQueryInfo);
return;
}
@ -5099,7 +4993,7 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
return info;
}
void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id) {
char fname[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, fname);

View File

@ -186,6 +186,7 @@ typedef void *SDataRow;
#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535
#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r))
#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row);
void tdInitDataRow(SDataRow row, STSchema *pSchema);
SDataRow tdDataRowDup(SDataRow row);
// offset here not include dataRow header length
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type,
int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
if (IS_VAR_DATA_TYPE(type)) {
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
if (isCopyVarData) {
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
}
dataRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t
return 0;
}
// offset here not include dataRow header length
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
return tdAppendDataColVal(row, value, true, type, offset);
}
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
if (IS_VAR_DATA_TYPE(type)) {
@ -472,9 +483,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
}
// offset here not include kvRow header length
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) {
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type,
int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE;
int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
@ -482,10 +494,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE
if (IS_VAR_DATA_TYPE(type)) {
memcpy(ptr, value, varDataTLen(value));
if (isCopyValData) {
memcpy(ptr, value, varDataTLen(value));
}
kvRowLen(row) += varDataTLen(value);
} else {
if (*offset == 0) {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
@ -494,7 +508,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
}
kvRowLen(row) += TYPE_BYTES[type];
}
*offset += sizeof(SColIdx);
return 0;
}
@ -589,12 +602,24 @@ typedef void *SMemRow;
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define SMEM_ROW_DATA 0U // SDataRow
#define SMEM_ROW_KV 1U // SKVRow
#define SMEM_ROW_DATA 0x0U // SDataRow
#define SMEM_ROW_KV 0x01U // SKVRow
#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag
#define memRowType(r) (*(uint8_t *)(r))
#define KVRatioKV (0.2f) // all bool
#define KVRatioPredict (0.4f)
#define KVRatioData (0.75f) // all bigint
#define KVRatioConvert (0.9f)
#define memRowType(r) ((*(uint8_t *)(r)) & 0x01)
#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory
#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit
#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01))
#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01))
#define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)
#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
#define memRowKvBody(r) \
@ -611,6 +636,14 @@ typedef void *SMemRow;
#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r))
#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen
static FORCE_INLINE char *memRowEnd(SMemRow row) {
if (isDataRow(row)) {
return (char *)dataRowEnd(memRowDataBody(row));
} else {
return (char *)kvRowEnd(memRowKvBody(row));
}
}
#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r))
#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
@ -628,7 +661,6 @@ typedef void *SMemRow;
} \
} while (0)
#define memRowSetType(r, t) (memRowType(r) = (t))
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
@ -661,12 +693,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_
}
}
static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset,
int32_t *kvOffset) {
static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
int8_t type, int32_t offset) {
if (isDataRow(row)) {
tdAppendColVal(memRowDataBody(row), value, type, offset);
tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset);
} else {
tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset);
tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset);
}
return 0;
}
@ -688,6 +720,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value
return len;
}
/**
* 1. calculate the delta of AllNullLen for SDataRow.
* 2. calculate the real len for SKVRow.
*/
static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) {
switch (colType) {
case TSDB_DATA_TYPE_BINARY: {
int32_t varLen = varDataLen(value);
*dataLen += (varLen - CHAR_BYTES);
*kvLen += (varLen + sizeof(SColIdx));
break;
}
case TSDB_DATA_TYPE_NCHAR: {
int32_t varLen = varDataLen(value);
*dataLen += (varLen - TSDB_NCHAR_SIZE);
*kvLen += (varLen + sizeof(SColIdx));
break;
}
default: {
*kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx));
break;
}
}
}
typedef struct {
int16_t colId;
@ -703,7 +759,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c
SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
#if 0
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
@ -749,6 +805,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); }
#endif
#ifdef __cplusplus
}
#endif

View File

@ -41,6 +41,7 @@ extern char tsArbitrator[];
extern int8_t tsArbOnline;
extern int64_t tsArbOnlineTimestamp;
extern int32_t tsDnodeId;
extern int64_t tsDnodeStartTime;
// common
extern int tsRpcTimer;
@ -59,6 +60,7 @@ extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[];
//query buffer management

View File

@ -851,7 +851,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
int16_t k;
for (k = 0; k < nKvNCols; ++k) {
SColInfo *pColInfo = taosArrayGet(stashRow, k);
tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset);
tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset);
toffset += sizeof(SColIdx);
}
ASSERT(kvLen == memRowTLen(tRow));
}

View File

@ -46,6 +46,7 @@ int8_t tsArbOnline = 0;
int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME;
char tsEmail[TSDB_FQDN_LEN] = {0};
int32_t tsDnodeId = 0;
int64_t tsDnodeStartTime = 0;
// common
int32_t tsRpcTimer = 300;
@ -86,6 +87,9 @@ int32_t tsMaxNumOfOrderedResults = 100000;
// 10 ms for sliding time, the value will changed in case of time precision changed
int32_t tsMinSlidingTime = 10;
// the maxinum number of distict query result
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 us for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
@ -543,6 +547,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "maxNumOfDistinctRes";
cfg.ptr = &tsMaxNumOfDistinctResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10*10000;
cfg.maxValue = 10000*10000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -991,7 +1006,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = TSDB_MAX_ALLOWED_SQL_LEN;
cfg.maxValue = TSDB_MAX_FIELD_LEN;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg);

View File

@ -70,12 +70,11 @@ SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFil
memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
for (int32_t j = 0; j < numOfFilters; ++j) {
if (pFilter[j].filterstr) {
size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
pFilter[j].pz = (int64_t) calloc(1, len);
memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t) pFilter[j].len);
}
}

@ -1 +1 @@
Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206
Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d

View File

@ -0,0 +1,570 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.After;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.text.Format;
import java.text.SimpleDateFormat;
public class TimestampPrecisonInNanoRestTest {
private static final String host = "127.0.0.1";
private static final String ns_timestamp_db = "ns_precision_test";
private static final long timestamp1 = System.currentTimeMillis();
private static final long timestamp2 = timestamp1 * 1000_000 + 123455;
private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456;
private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
private static final String date1 = format.format(new Date(timestamp1));
private static final String date4 = format.format(new Date(timestamp1 + 10L));
private static final String date2 = date1 + "123455";
private static final String date3 = date4 + "123456";
private static Connection conn;
@BeforeClass
public static void beforeClass() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn = DriverManager.getConnection(url, properties);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists " + ns_timestamp_db);
stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
stmt.close();
}
@After
public void afterEach() throws SQLException {
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists " + ns_timestamp_db);
stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
stmt.close();
}
@AfterClass
public static void afterClass() {
try {
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
private void checkCount(long count, ResultSet rs) throws SQLException {
if (count == 0) {
Assert.fail();
}
rs.next();
long test_count = rs.getLong(1);
Assert.assertEquals(count, test_count);
}
private void checkTime(long ts, ResultSet rs) throws SQLException {
rs.next();
int nanos = rs.getTimestamp(1).getNanos();
Assert.assertEquals(ts % 1000_000_000l, nanos);
long test_ts = rs.getLong(1);
Assert.assertEquals(ts / 1000_000l, test_ts);
}
@Test
public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
checkTime(timestamp3, rs);
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
checkTime(timestamp1 * 1000_000l + 123123l, rs);
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
checkTime(timestamp1 * 1000_000l + 123123l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
checkTime(timestamp2, rs);
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) {
long timestamp4 = timestamp1 * 1000_000 + 123123;
stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
checkTime(timestamp4, rs);
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
checkTime(timestamp4, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canSelectLastRowFromWeatherForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canSelectLastRowFromWeatherForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canSelectFirstRowFromWeatherForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canSelectFirstRowFromWeatherForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanInDateTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanInNumberTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanInDateTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanInNumberTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanOrEqualToInDateTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanOrEqualToInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'");
checkCount(2l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryBetweenAndInDateTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryBetweenAndInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryBetweenAndInNumberTypeForFirstCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryBetweenAndInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
checkTime(timestamp3, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryNotEqualToInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryNotEqualToInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryNotEqualInDateTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canQueryNotEqualInNumberTypeForSecondCol() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
checkCount(1l, rs);
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
checkTime(timestamp2, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather");
checkCount(3l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
rs.next();
long sum = rs.getLong(2);
Assert.assertEquals(127l, sum);
rs.next();
sum = rs.getLong(2);
Assert.assertEquals(128l, sum);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
rs.next();
long sum = rs.getLong(2);
Assert.assertEquals(127l, sum);
rs.next();
sum = rs.getLong(2);
Assert.assertEquals(128l, sum);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testDataOutOfRangeExceptionForFirstCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)");
} catch (SQLException e) {
Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
}
}
@Test
public void testDataOutOfRangeExceptionForSecondCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)");
} catch (SQLException e) {
Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
}
}
@Test
public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'");
checkCount(1l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'");
checkCount(1l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'");
checkCount(1l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'");
checkCount(1l, rs);
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
return res;
}
function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
while (currOffset < data.length) {
let len = data.readIntLE(currOffset, 2);
let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
if (dataEntry[0] == 255) {
res.push(null)
} else {
res.push(dataEntry.toString("utf-8"));
}
currOffset += nbytes;
}
return res;
}
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
while (currOffset < data.length) {
let len = data.readIntLE(currOffset, 2);
let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
res.push(dataEntry.toString("utf-8"));
if (dataEntry[0] == 255 && dataEntry[1] == 255) {
res.push(null)
} else {
res.push(dataEntry.toString("utf-8"));
}
currOffset += nbytes;
}
return res;
@ -132,7 +154,7 @@ let convertFunctions = {
[FieldTypes.C_BIGINT]: convertBigint,
[FieldTypes.C_FLOAT]: convertFloat,
[FieldTypes.C_DOUBLE]: convertDouble,
[FieldTypes.C_BINARY]: convertNchar,
[FieldTypes.C_BINARY]: convertBinary,
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
[FieldTypes.C_NCHAR]: convertNchar
}

View File

@ -47,7 +47,8 @@ class TaosTimestamp extends Date {
super(Math.floor(date / 1000));
this.precisionExtras = date % 1000;
} else if (precision === 2) {
super(parseInt(date / 1000000));
// use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected
super(parseInt(BigInt(date) / 1000000n));
// use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
this.precisionExtras = parseInt(BigInt(date) % 1000000n);
} else {

View File

@ -1,6 +1,6 @@
{
"name": "td2.0-connector",
"version": "2.0.9",
"version": "2.0.10",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"directories": {

View File

@ -0,0 +1,33 @@
const taos = require('../tdengine');
var conn = taos.connect({ host: "localhost" });
var c1 = conn.cursor();
function checkData(data, row, col, expect) {
let checkdata = data[row][col];
if (checkdata == expect) {
// console.log('check pass')
}
else {
console.log('check failed, expect ' + expect + ', but is ' + checkdata)
}
}
c1.execute('drop database if exists testnodejsnchar')
c1.execute('create database testnodejsnchar')
c1.execute('use testnodejsnchar');
c1.execute('create table tb (ts timestamp, value float, text binary(200))')
c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") -
c1.execute('insert into tb values(1623254400150, 24.7, NULL);')
c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");')
sql = 'select * from tb;'
console.log('*******************************************')
c1.execute(sql);
data = c1.fetchall();
console.log(data)
//check data about insert data
checkData(data, 0, 2, '中文10000000000000000000000')
checkData(data, 1, 2, null)
checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000')

View File

@ -166,7 +166,6 @@ int32_t dnodeInitSystem() {
taosInitGlobalCfg();
taosReadGlobalLogCfg();
taosSetCoreDump();
taosInitNotes();
dnodeInitTmr();
if (dnodeCreateDir(tsLogDir) < 0) {
@ -188,6 +187,8 @@ int32_t dnodeInitSystem() {
dInfo("start to initialize TDengine");
taosInitNotes();
if (dnodeInitComponents() != 0) {
return -1;
}
@ -195,6 +196,7 @@ int32_t dnodeInitSystem() {
dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING);
moduleStart();
tsDnodeStartTime = taosGetTimestampMs();
dnodeReportStep("TDengine", "initialized successfully", 1);
dInfo("TDengine is initialized successfully");

View File

@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) {
}
} else if (strcmp(argv[i], "-C") == 0) {
dump_config = 1;
} else if (strcmp(argv[i], "--force-keep-file") == 0) {
tsdbForceKeepFile = true;
} else if (strcmp(argv[i], "--compact-mnode-wal") == 0) {
tsCompactMnodeWal = 1;
} else if (strcmp(argv[i], "-V") == 0) {

View File

@ -471,6 +471,7 @@ typedef struct {
bool stableQuery; // super table query or not
bool topBotQuery; // TODO used bitwise flag
bool interpQuery; // interp query or not
bool groupbyColumn; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
bool timeWindowInterpo;// if the time window start/end required interpolation
@ -878,7 +879,7 @@ typedef struct {
uint64_t sqlObjId;
int32_t pid;
char fqdn[TSDB_FQDN_LEN];
bool stableQuery;
uint8_t stableQuery;
int32_t numOfSub;
char subSqlInfo[TSDB_SHOW_SUBQUERY_LEN]; //include subqueries' index, Obj IDs and states(C-complete/I-imcomplete)
} SQueryDesc;

View File

@ -41,9 +41,16 @@ typedef struct {
int64_t avail;
} SFSMeta;
typedef struct {
int64_t size;
int64_t used;
int64_t free;
int16_t nAvailDisks; // # of Available disks
} STierMeta;
int tfsInit(SDiskCfg *pDiskCfg, int ndisk);
void tfsDestroy();
void tfsUpdateInfo(SFSMeta *pFSMeta);
void tfsUpdateInfo(SFSMeta *pFSMeta, STierMeta *tierMetas, int8_t numLevels);
void tfsGetMeta(SFSMeta *pMeta);
void tfsAllocDisk(int expLevel, int *level, int *id);

View File

@ -32,7 +32,7 @@ typedef enum {
typedef struct {
int8_t msgType;
int8_t sver;
int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility
int8_t reserved[2];
int32_t len;
uint64_t version;

View File

@ -64,6 +64,10 @@ void printHelp() {
exit(EXIT_SUCCESS);
}
char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
char g_password[MAX_PASSWORD_SIZE];
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
wordexp_t full_path;
for (int i = 1; i < argc; i++) {
@ -77,10 +81,21 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
}
}
// for password
else if (strcmp(argv[i], "-p") == 0) {
arguments->is_use_passwd = true;
else if (strncmp(argv[i], "-p", 2) == 0) {
strcpy(tsOsName, "Darwin");
printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info());
if (strlen(argv[i]) == 2) {
printf("Enter password: ");
if (scanf("%s", g_password) > 1) {
fprintf(stderr, "password read error\n");
}
getchar();
} else {
tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE);
}
arguments->password = g_password;
}
// for management port
// for management port
else if (strcmp(argv[i], "-P") == 0) {
if (i < argc - 1) {
arguments->port = atoi(argv[++i]);

View File

@ -65,7 +65,15 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut
*/
TAOS *shellInit(SShellArguments *_args) {
printf("\n");
printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
if (!_args->is_use_passwd) {
#ifdef TD_WINDOWS
strcpy(tsOsName, "Windows");
#elif defined(TD_DARWIN)
strcpy(tsOsName, "Darwin");
#endif
printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
}
fflush(stdout);
// set options before initializing
@ -73,9 +81,7 @@ TAOS *shellInit(SShellArguments *_args) {
taos_options(TSDB_OPTION_TIMEZONE, _args->timezone);
}
if (_args->is_use_passwd) {
if (_args->password == NULL) _args->password = getpass("Enter password: ");
} else {
if (!_args->is_use_passwd) {
_args->password = TSDB_DEFAULT_PASS;
}

View File

@ -34,7 +34,7 @@ static char doc[] = "";
static char args_doc[] = "";
static struct argp_option options[] = {
{"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."},
{"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
{"password", 'p', 0, 0, "The password to use when connecting to the server."},
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
{"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."},
@ -65,8 +65,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->host = arg;
break;
case 'p':
arguments->is_use_passwd = true;
if (arg) arguments->password = arg;
break;
case 'P':
if (arg) {
@ -173,12 +171,41 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
/* Our argp parser. */
static struct argp argp = {options, parse_opt, args_doc, doc};
char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
char g_password[MAX_PASSWORD_SIZE];
static void parse_password(
int argc, char *argv[], SShellArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strncmp(argv[i], "-p", 2) == 0) {
strcpy(tsOsName, "Linux");
printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info());
if (strlen(argv[i]) == 2) {
printf("Enter password: ");
if (scanf("%20s", g_password) > 1) {
fprintf(stderr, "password reading error\n");
}
getchar();
} else {
tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE);
}
arguments->password = g_password;
arguments->is_use_passwd = true;
}
}
}
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
static char verType[32] = {0};
sprintf(verType, "version: %s\n", version);
argp_program_version = verType;
if (argc > 1) {
parse_password(argc, argv, arguments);
}
argp_parse(&argp, argc, argv, 0, 0, arguments);
if (arguments->abort) {
#ifndef _ALPINE

View File

@ -71,7 +71,9 @@ int checkVersion() {
// Global configurations
SShellArguments args = {
.host = NULL,
#ifndef TD_WINDOWS
.password = NULL,
#endif
.user = NULL,
.database = NULL,
.timezone = NULL,

View File

@ -19,6 +19,9 @@
extern char configDir[];
char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
void printVersion() {
printf("version: %s\n", version);
}
@ -65,6 +68,8 @@ void printHelp() {
exit(EXIT_SUCCESS);
}
char g_password[MAX_PASSWORD_SIZE];
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
for (int i = 1; i < argc; i++) {
// for host
@ -77,11 +82,20 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
}
}
// for password
else if (strcmp(argv[i], "-p") == 0) {
arguments->is_use_passwd = true;
if (i < argc - 1 && argv[i + 1][0] != '-') {
arguments->password = argv[++i];
}
else if (strncmp(argv[i], "-p", 2) == 0) {
arguments->is_use_passwd = true;
strcpy(tsOsName, "Windows");
printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info());
if (strlen(argv[i]) == 2) {
printf("Enter password: ");
if (scanf("%s", g_password) > 1) {
fprintf(stderr, "password read error!\n");
}
getchar();
} else {
tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE);
}
arguments->password = g_password;
}
// for management port
else if (strcmp(argv[i], "-P") == 0) {

File diff suppressed because it is too large Load Diff

View File

@ -206,9 +206,9 @@ static struct argp_option options[] = {
{"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
{"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
#ifdef _TD_POWER_
{"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
{"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0},
#else
{"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
{"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0},
#endif
{"port", 'P', "PORT", 0, "Port to connect", 0},
{"cversion", 'v', "CVERION", 0, "client version", 0},
@ -248,12 +248,14 @@ static struct argp_option options[] = {
{0}
};
#define MAX_PASSWORD_SIZE 20
/* Used by main to communicate with parse_opt. */
typedef struct arguments {
// connection option
char *host;
char *user;
char *password;
char password[MAX_PASSWORD_SIZE];
uint16_t port;
char cversion[12];
uint16_t mysqlFlag;
@ -376,7 +378,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.user = arg;
break;
case 'p':
g_args.password = arg;
break;
case 'P':
g_args.port = atoi(arg);
@ -554,6 +555,23 @@ static void parse_precision_first(
}
}
static void parse_password(
int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strncmp(argv[i], "-p", 2) == 0) {
if (strlen(argv[i]) == 2) {
printf("Enter password: ");
if(scanf("%20s", arguments->password) > 1) {
errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__);
}
} else {
tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE);
}
argv[i] = "";
}
}
}
static void parse_timestamp(
int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
@ -616,9 +634,10 @@ int main(int argc, char *argv[]) {
int ret = 0;
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
if (argc > 2) {
if (argc > 1) {
parse_precision_first(argc, argv, &g_args);
parse_timestamp(argc, argv, &g_args);
parse_password(argc, argv, &g_args);
}
argp_parse(&argp, argc, argv, 0, 0, &g_args);

View File

@ -253,6 +253,10 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
int32_t connId = htonl(pHBMsg->connId);
SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort);
if (pConn == NULL) {
pHBMsg->pid = htonl(pHBMsg->pid);
pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort, pHBMsg->pid, pHBMsg->appName);
}
if (pConn == NULL) {
// do not close existing links, otherwise

View File

@ -65,7 +65,14 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_MSG_NOT_PROCESSED;
}
int32_t code = mnodeInitMsg(pMsg);
int32_t code = grantCheck(TSDB_GRANT_TIME);
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType],
tstrerror(code));
return code;
}
code = mnodeInitMsg(pMsg);
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType],
tstrerror(code));

View File

@ -28,8 +28,11 @@ typedef struct {
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize);
int32_t taosGetCpuCores();
void taosGetSystemInfo();
bool taosReadProcIO(int64_t* rchars, int64_t* wchars);
bool taosGetProcIO(float *readKB, float *writeKB);
bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes);
bool taosGetBandSpeed(float *bandSpeedKb);
void taosGetDisk();
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ;

View File

@ -164,6 +164,10 @@ void taosKillSystem() {
exit(0);
}
int32_t taosGetCpuCores() {
return sysconf(_SC_NPROCESSORS_ONLN);
}
void taosGetSystemInfo() {
// taosGetProcInfos();
@ -185,12 +189,25 @@ void taosGetSystemInfo() {
taosGetSystemLocale();
}
bool taosReadProcIO(int64_t *rchars, int64_t *wchars) {
if (rchars) *rchars = 0;
if (wchars) *wchars = 0;
return true;
}
bool taosGetProcIO(float *readKB, float *writeKB) {
*readKB = 0;
*writeKB = 0;
return true;
}
bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) {
if (bytes) *bytes = 0;
if (rbytes) *rbytes = 0;
if (tbytes) *tbytes = 0;
return true;
}
bool taosGetBandSpeed(float *bandSpeedKb) {
*bandSpeedKb = 0;
return true;

View File

@ -277,7 +277,7 @@ static void taosGetSystemLocale() { // get and set default locale
}
}
static int32_t taosGetCpuCores() { return (int32_t)sysconf(_SC_NPROCESSORS_ONLN); }
int32_t taosGetCpuCores() { return (int32_t)sysconf(_SC_NPROCESSORS_ONLN); }
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) {
static uint64_t lastSysUsed = 0;
@ -332,7 +332,7 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) {
}
}
static bool taosGetCardInfo(int64_t *bytes) {
bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) {
*bytes = 0;
FILE *fp = fopen(tsSysNetFile, "r");
if (fp == NULL) {
@ -347,9 +347,9 @@ static bool taosGetCardInfo(int64_t *bytes) {
while (!feof(fp)) {
memset(line, 0, len);
int64_t rbytes = 0;
int64_t o_rbytes = 0;
int64_t rpackts = 0;
int64_t tbytes = 0;
int64_t o_tbytes = 0;
int64_t tpackets = 0;
int64_t nouse1 = 0;
int64_t nouse2 = 0;
@ -374,8 +374,10 @@ static bool taosGetCardInfo(int64_t *bytes) {
sscanf(line,
"%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64
" %" PRId64,
nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &tbytes, &tpackets);
*bytes += (rbytes + tbytes);
nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets);
if (rbytes) *rbytes = o_rbytes;
if (tbytes) *tbytes = o_tbytes;
*bytes += (o_rbytes + o_tbytes);
}
tfree(line);
@ -390,7 +392,7 @@ bool taosGetBandSpeed(float *bandSpeedKb) {
int64_t curBytes = 0;
time_t curTime = time(NULL);
if (!taosGetCardInfo(&curBytes)) {
if (!taosGetCardInfo(&curBytes, NULL, NULL)) {
return false;
}
@ -420,7 +422,7 @@ bool taosGetBandSpeed(float *bandSpeedKb) {
return true;
}
static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) {
bool taosReadProcIO(int64_t *rchars, int64_t *wchars) {
FILE *fp = fopen(tsProcIOFile, "r");
if (fp == NULL) {
uError("open file:%s failed", tsProcIOFile);
@ -441,10 +443,10 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) {
break;
}
if (strstr(line, "rchar:") != NULL) {
sscanf(line, "%s %" PRId64, tmp, readbyte);
sscanf(line, "%s %" PRId64, tmp, rchars);
readIndex++;
} else if (strstr(line, "wchar:") != NULL) {
sscanf(line, "%s %" PRId64, tmp, writebyte);
sscanf(line, "%s %" PRId64, tmp, wchars);
readIndex++;
} else {
}

View File

@ -115,7 +115,7 @@ static void taosGetSystemLocale() {
}
}
static int32_t taosGetCpuCores() {
int32_t taosGetCpuCores() {
SYSTEM_INFO info;
GetSystemInfo(&info);
return (int32_t)info.dwNumberOfProcessors;
@ -146,6 +146,13 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) {
}
}
bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) {
if (bytes) *bytes = 0;
if (rbytes) *rbytes = 0;
if (tbytes) *tbytes = 0;
return true;
}
bool taosGetBandSpeed(float *bandSpeedKb) {
*bandSpeedKb = 0;
return true;

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_HTTPMETRICSHANDLE_H
#define TDENGINE_HTTPMETRICSHANDLE_H
#include "http.h"
#include "httpInt.h"
#include "httpUtil.h"
#include "httpResp.h"
void metricsInitHandle(HttpServer* httpServer);
bool metricsProcessRequest(struct HttpContext* httpContext);
#endif // TDENGINE_HTTPMETRICHANDLE_H

View File

@ -0,0 +1,184 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "taoserror.h"
#include "tfs.h"
#include "httpMetricsHandle.h"
#include "dnode.h"
#include "httpLog.h"
static HttpDecodeMethod metricsDecodeMethod = {"metrics", metricsProcessRequest};
void metricsInitHandle(HttpServer* pServer) {
httpAddMethod(pServer, &metricsDecodeMethod);
}
bool metricsProcessRequest(HttpContext* pContext) {
httpDebug("context:%p, fd:%d, user:%s, process admin grant msg", pContext, pContext->fd, pContext->user);
JsonBuf* jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) {
httpError("failed to allocate memory for metrics");
httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
return false;
}
httpInitJsonBuf(jsonBuf, pContext);
httpWriteJsonBufHead(jsonBuf);
httpJsonToken(jsonBuf, JsonObjStt);
{
char* keyDisks = "tags";
httpJsonPairHead(jsonBuf, keyDisks, (int32_t)strlen(keyDisks));
httpJsonToken(jsonBuf, JsonArrStt);
{
httpJsonItemToken(jsonBuf);
httpJsonToken(jsonBuf, JsonObjStt);
char* keyTagName = "name";
char* keyTagValue = "value";
httpJsonPairOriginString(jsonBuf, keyTagName, (int32_t)strlen(keyTagName), "\"dnode_id\"",
(int32_t)strlen("\"dnode_id\""));
int32_t dnodeId = dnodeGetDnodeId();
httpJsonPairIntVal(jsonBuf, keyTagValue, (int32_t)strlen(keyTagValue), dnodeId);
httpJsonToken(jsonBuf, JsonObjEnd);
}
httpJsonToken(jsonBuf, JsonArrEnd);
}
{
if (tsDnodeStartTime != 0) {
int64_t now = taosGetTimestampMs();
int64_t upTime = now-tsDnodeStartTime;
char* keyUpTime = "up_time";
httpJsonPairInt64Val(jsonBuf, keyUpTime, (int32_t)strlen(keyUpTime), upTime);
}
}
{
int32_t cpuCores = taosGetCpuCores();
char* keyCpuCores = "cpu_cores";
httpJsonPairIntVal(jsonBuf, keyCpuCores, (int32_t)strlen(keyCpuCores), cpuCores);
float sysCpuUsage = 0;
float procCpuUsage = 0;
bool succeeded = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage);
if (!succeeded) {
httpError("failed to get cpu usage");
} else {
if (sysCpuUsage <= procCpuUsage) {
sysCpuUsage = procCpuUsage + 0.1f;
}
char* keyCpuSystem = "cpu_system";
char* keyCpuEngine = "cpu_engine";
httpJsonPairFloatVal(jsonBuf, keyCpuSystem, (int32_t)strlen(keyCpuSystem), sysCpuUsage);
httpJsonPairFloatVal(jsonBuf, keyCpuEngine, (int32_t)strlen(keyCpuEngine), procCpuUsage);
}
}
{
float sysMemoryUsedMB = 0;
bool succeeded = taosGetSysMemory(&sysMemoryUsedMB);
if (!succeeded) {
httpError("failed to get sys memory info");
} else {
char* keyMemSystem = "mem_system";
httpJsonPairFloatVal(jsonBuf, keyMemSystem, (int32_t)strlen(keyMemSystem), sysMemoryUsedMB);
}
float procMemoryUsedMB = 0;
succeeded = taosGetProcMemory(&procMemoryUsedMB);
if (!succeeded) {
httpError("failed to get proc memory info");
} else {
char* keyMemEngine = "mem_engine";
httpJsonPairFloatVal(jsonBuf, keyMemEngine, (int32_t)strlen(keyMemEngine), procMemoryUsedMB);
}
}
{
int64_t bytes = 0, rbytes = 0, tbytes = 0;
bool succeeded = taosGetCardInfo(&bytes, &rbytes, &tbytes);
if (!succeeded) {
httpError("failed to get network info");
} else {
char* keyNetIn = "net_in";
char* keyNetOut = "net_out";
httpJsonPairInt64Val(jsonBuf, keyNetIn, (int32_t)strlen(keyNetIn), rbytes);
httpJsonPairInt64Val(jsonBuf, keyNetOut, (int32_t)strlen(keyNetOut), tbytes);
}
}
{
int64_t rchars = 0;
int64_t wchars = 0;
bool succeeded = taosReadProcIO(&rchars, &wchars);
if (!succeeded) {
httpError("failed to get io info");
} else {
char* keyIORead = "io_read";
char* keyIOWrite = "io_write";
httpJsonPairInt64Val(jsonBuf, keyIORead, (int32_t)strlen(keyIORead), rchars);
httpJsonPairInt64Val(jsonBuf, keyIOWrite, (int32_t)strlen(keyIOWrite), wchars);
}
}
{
const int8_t numTiers = 3;
SFSMeta fsMeta;
STierMeta* tierMetas = calloc(numTiers, sizeof(STierMeta));
tfsUpdateInfo(&fsMeta, tierMetas, numTiers);
{
char* keyDiskUsed = "disk_used";
char* keyDiskTotal = "disk_total";
httpJsonPairInt64Val(jsonBuf, keyDiskTotal, (int32_t)strlen(keyDiskTotal), fsMeta.tsize);
httpJsonPairInt64Val(jsonBuf, keyDiskUsed, (int32_t)strlen(keyDiskUsed), fsMeta.used);
char* keyDisks = "disks";
httpJsonPairHead(jsonBuf, keyDisks, (int32_t)strlen(keyDisks));
httpJsonToken(jsonBuf, JsonArrStt);
for (int i = 0; i < numTiers; ++i) {
httpJsonItemToken(jsonBuf);
httpJsonToken(jsonBuf, JsonObjStt);
char* keyDataDirLevelUsed = "datadir_used";
char* keyDataDirLevelTotal = "datadir_total";
httpJsonPairInt64Val(jsonBuf, keyDataDirLevelUsed, (int32_t)strlen(keyDataDirLevelUsed), tierMetas[i].used);
httpJsonPairInt64Val(jsonBuf, keyDataDirLevelTotal, (int32_t)strlen(keyDataDirLevelTotal), tierMetas[i].size);
httpJsonToken(jsonBuf, JsonObjEnd);
}
httpJsonToken(jsonBuf, JsonArrEnd);
}
free(tierMetas);
}
{
SStatisInfo info = dnodeGetStatisInfo();
{
char* keyReqHttp = "req_http";
char* keyReqSelect = "req_select";
char* keyReqInsert = "req_insert";
httpJsonPairInt64Val(jsonBuf, keyReqHttp, (int32_t)strlen(keyReqHttp), info.httpReqNum);
httpJsonPairInt64Val(jsonBuf, keyReqSelect, (int32_t)strlen(keyReqSelect), info.queryReqNum);
httpJsonPairInt64Val(jsonBuf, keyReqInsert, (int32_t)strlen(keyReqInsert), info.submitReqNum);
}
}
httpJsonToken(jsonBuf, JsonObjEnd);
httpWriteJsonBufEnd(jsonBuf);
pContext->reqType = HTTP_REQTYPE_OTHERS;
httpFreeJsonBuf(pContext);
return false;
}

View File

@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) {
}
static void httpCleanupString(HttpString *str) {
free(str->str);
str->str = NULL;
str->pos = 0;
str->size = 0;
if (str->str) {
free(str->str);
str->str = NULL;
str->pos = 0;
str->size = 0;
}
}
static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) {
char *new_str = NULL;
if (str->size == 0) {
str->pos = 0;
str->size = len + 1;
@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) {
} else if (str->pos + len + 1 >= str->size) {
str->size += len;
str->size *= 4;
str->str = realloc(str->str, str->size);
new_str = realloc(str->str, str->size);
if (new_str == NULL && str->str) {
// if str->str was not NULL originally,
// the old allocated memory was left unchanged,
// see man 3 realloc
free(str->str);
}
str->str = new_str;
} else {
}
@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
HttpContext *pContext = parser->pContext;
HttpString * buf = &parser->body;
HttpString *buf = &parser->body;
if (parser->parseCode != TSDB_CODE_SUCCESS) return -1;
if (buf->size <= 0) {
@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
}
int32_t newSize = buf->pos + len + 1;
char *newStr = NULL;
if (newSize >= buf->size) {
if (buf->size >= HTTP_BUFFER_SIZE) {
httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size);
@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
newSize = MAX(newSize, HTTP_BUFFER_INIT);
newSize *= 4;
newSize = MIN(newSize, HTTP_BUFFER_SIZE);
buf->str = realloc(buf->str, newSize);
newStr = realloc(buf->str, newSize);
if (newStr == NULL && buf->str) {
free(buf->str);
}
buf->str = newStr;
buf->size = newSize;
if (buf->str == NULL) {
@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) {
static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) {
HttpStack *stack = &parser->stacks;
int8_t *newStacks = NULL;
if (stack->size == 0) {
stack->pos = 0;
stack->size = 32;
stack->stacks = malloc(stack->size * sizeof(int8_t));
} else if (stack->pos + 1 > stack->size) {
stack->size *= 2;
stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t));
newStacks = realloc(stack->stacks, stack->size * sizeof(int8_t));
if (newStacks == NULL && stack->stacks) {
free(stack->stacks);
}
stack->stacks = newStacks;
} else {
}

View File

@ -30,6 +30,7 @@
#include "httpGcHandle.h"
#include "httpRestHandle.h"
#include "httpTgHandle.h"
#include "httpMetricsHandle.h"
#ifndef _ADMIN
void adminInitHandle(HttpServer* pServer) {}
@ -52,7 +53,7 @@ int32_t httpInitSystem() {
gcInitHandle(&tsHttpServer);
tgInitHandle(&tsHttpServer);
opInitHandle(&tsHttpServer);
metricsInitHandle(&tsHttpServer);
return 0;
}

View File

@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS
bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (cmdSize > HTTP_MAX_CMD_SIZE) {
if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) {
httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user,
cmdSize, HTTP_MAX_CMD_SIZE);
return false;
}
multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd));
HttpSqlCmd *new_cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd));
if (new_cmds == NULL && multiCmds->cmds) {
free(multiCmds->cmds);
}
multiCmds->cmds = new_cmds;
if (multiCmds->cmds == NULL) {
httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize);
return false;
@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) {
bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (bufferSize > HTTP_MAX_BUFFER_SIZE) {
if (bufferSize <= 0 || bufferSize > HTTP_MAX_BUFFER_SIZE) {
httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd,
pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE);
return false;
}
multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize);
char *new_buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize);
if (new_buffer == NULL && multiCmds->buffer) {
free(multiCmds->buffer);
}
multiCmds->buffer = new_buffer;
if (multiCmds->buffer == NULL) {
httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize);
return false;

View File

@ -333,6 +333,9 @@ enum OPERATOR_TYPE_E {
OP_Distinct = 20,
OP_Join = 21,
OP_StateWindow = 22,
OP_AllTimeWindow = 23,
OP_AllMultiTableTimeInterval = 24,
OP_Order = 25,
};
typedef struct SOperatorInfo {
@ -420,7 +423,6 @@ typedef struct STableScanInfo {
int32_t *rowCellInfoOffset;
SExprInfo *pExpr;
SSDataBlock block;
bool loadExternalRows; // load external rows (prev & next rows)
int32_t numOfOutput;
int64_t elapsedTime;
@ -511,7 +513,13 @@ typedef struct SStateWindowOperatorInfo {
int32_t start;
char* prevData; // previous data
bool reptScan;
} SStateWindowOperatorInfo ;
} SStateWindowOperatorInfo;
typedef struct SDistinctDataInfo {
int32_t index;
int32_t type;
int32_t bytes;
} SDistinctDataInfo;
typedef struct SDistinctOperatorInfo {
SHashObj *pSet;
@ -519,7 +527,9 @@ typedef struct SDistinctOperatorInfo {
bool recordNullVal; //has already record the null value, no need to try again
int64_t threshold;
int64_t outputCapacity;
int32_t colIndex;
int32_t totalBytes;
char* buf;
SArray* pDistinctDataInfo;
} SDistinctOperatorInfo;
struct SGlobalMerger;
@ -544,6 +554,13 @@ typedef struct SMultiwayMergeInfo {
SArray *udfInfo;
} SMultiwayMergeInfo;
// todo support the disk-based sort
typedef struct SOrderOperatorInfo {
int32_t colIndex;
int32_t order;
SSDataBlock *pDataBlock;
} SOrderOperatorInfo;
void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream);
SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime);
@ -554,11 +571,13 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera
SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream);
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createAllMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
@ -571,6 +590,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput);
SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal);
SSDataBlock* doGlobalAggregate(void* param, bool* newgroup);
SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup);
@ -578,7 +598,6 @@ SSDataBlock* doSLimit(void* param, bool* newgroup);
int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo, uint64_t qId);
void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock);
void doSetFilterColInfo(SFilterInfo *pFilters, SSDataBlock* pBlock);
bool doFilterDataBlock(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t numOfRows, int8_t* p);
void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p);

View File

@ -227,6 +227,8 @@ typedef int (*__col_compar_fn_t)(tOrderDescriptor *, int32_t numOfRows, int32_t
void tColDataQSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType);
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
int32_t compare_sa(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data);
int32_t compare_sd(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data);

View File

@ -31,10 +31,11 @@ extern "C" {
#define FILTER_DEFAULT_GROUP_UNIT_SIZE 2
#define FILTER_DUMMY_EMPTY_OPTR 127
#define FILTER_DUMMY_RANGE_OPTR 126
#define MAX_NUM_STR_SIZE 40
#define FILTER_RM_UNIT_MIN_ROWS 100
enum {
FLD_TYPE_COLUMN = 1,
FLD_TYPE_VALUE = 2,
@ -70,6 +71,12 @@ enum {
FI_STATUS_CLONED = 8,
};
enum {
FI_STATUS_BLK_ALL = 1,
FI_STATUS_BLK_EMPTY = 2,
FI_STATUS_BLK_ACTIVE = 4,
};
enum {
RANGE_TYPE_UNIT = 1,
RANGE_TYPE_VAR_HASH = 2,
@ -98,7 +105,7 @@ typedef struct SFilterColRange {
typedef bool (*rangeCompFunc) (const void *, const void *, const void *, const void *, __compar_fn_t);
typedef int32_t(*filter_desc_compare_func)(const void *, const void *);
typedef bool(*filter_exec_func)(void *, int32_t, int8_t*);
typedef bool(*filter_exec_func)(void *, int32_t, int8_t**, SDataStatis *, int16_t);
typedef struct SFilterRangeCompare {
int64_t s;
@ -197,6 +204,7 @@ typedef struct SFilterComUnit {
void *colData;
void *valData;
void *valData2;
uint16_t colId;
uint16_t dataSize;
uint8_t dataType;
uint8_t optr;
@ -225,6 +233,10 @@ typedef struct SFilterInfo {
uint8_t *unitFlags; // got result
SFilterRangeCtx **colRange;
filter_exec_func func;
uint8_t blkFlag;
uint16_t blkGroupNum;
uint16_t *blkUnits;
int8_t *blkUnitRes;
SFilterPCtx pctx;
} SFilterInfo;
@ -265,12 +277,13 @@ typedef struct SFilterInfo {
#define CHK_RET(c, r) do { if (c) { return r; } } while (0)
#define CHK_JMP(c) do { if (c) { goto _return; } } while (0)
#define CHK_LRETV(c,...) do { if (c) { qError(__VA_ARGS__); return; } } while (0)
#define CHK_LRET(c, r,...) do { if (c) { qError(__VA_ARGS__); return r; } } while (0)
#define CHK_LRET(c, r,...) do { if (c) { if (r) {qError(__VA_ARGS__); } else { qDebug(__VA_ARGS__); } return r; } } while (0)
#define FILTER_GET_FIELD(i, id) (&((i)->fields[(id).type].fields[(id).idx]))
#define FILTER_GET_COL_FIELD(i, idx) (&((i)->fields[FLD_TYPE_COLUMN].fields[idx]))
#define FILTER_GET_COL_FIELD_TYPE(fi) (((SSchema *)((fi)->desc))->type)
#define FILTER_GET_COL_FIELD_SIZE(fi) (((SSchema *)((fi)->desc))->bytes)
#define FILTER_GET_COL_FIELD_ID(fi) (((SSchema *)((fi)->desc))->colId)
#define FILTER_GET_COL_FIELD_DESC(fi) ((SSchema *)((fi)->desc))
#define FILTER_GET_COL_FIELD_DATA(fi, ri) ((char *)(fi)->data + ((SSchema *)((fi)->desc))->bytes * (ri))
#define FILTER_GET_VAL_FIELD_TYPE(fi) (((tVariant *)((fi)->desc))->nType)
@ -280,10 +293,12 @@ typedef struct SFilterInfo {
#define FILTER_GROUP_UNIT(i, g, uid) ((i)->units + (g)->unitIdxs[uid])
#define FILTER_UNIT_LEFT_FIELD(i, u) FILTER_GET_FIELD(i, (u)->left)
#define FILTER_UNIT_RIGHT_FIELD(i, u) FILTER_GET_FIELD(i, (u)->right)
#define FILTER_UNIT_RIGHT2_FIELD(i, u) FILTER_GET_FIELD(i, (u)->right2)
#define FILTER_UNIT_DATA_TYPE(u) ((u)->compare.type)
#define FILTER_UNIT_COL_DESC(i, u) FILTER_GET_COL_FIELD_DESC(FILTER_UNIT_LEFT_FIELD(i, u))
#define FILTER_UNIT_COL_DATA(i, u, ri) FILTER_GET_COL_FIELD_DATA(FILTER_UNIT_LEFT_FIELD(i, u), ri)
#define FILTER_UNIT_COL_SIZE(i, u) FILTER_GET_COL_FIELD_SIZE(FILTER_UNIT_LEFT_FIELD(i, u))
#define FILTER_UNIT_COL_ID(i, u) FILTER_GET_COL_FIELD_ID(FILTER_UNIT_LEFT_FIELD(i, u))
#define FILTER_UNIT_VAL_DATA(i, u) FILTER_GET_VAL_FIELD_DATA(FILTER_UNIT_RIGHT_FIELD(i, u))
#define FILTER_UNIT_COL_IDX(u) ((u)->left.idx)
#define FILTER_UNIT_OPTR(u) ((u)->compare.optr)
@ -309,8 +324,8 @@ typedef struct SFilterInfo {
extern int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t options);
extern bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t* p);
extern int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data);
extern bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols);
extern int32_t filterSetColFieldData(SFilterInfo *info, int32_t numOfCols, SArray* pDataBlock);
extern int32_t filterGetTimeRange(SFilterInfo *info, STimeWindow *win);
extern int32_t filterConverNcharColumns(SFilterInfo* pFilterInfo, int32_t rows, bool *gotNchar);
extern int32_t filterFreeNcharColumns(SFilterInfo* pFilterInfo);

View File

@ -39,7 +39,6 @@
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1)
int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr);
@ -60,6 +59,7 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t
void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr);
void* freeColumnInfo(SColumnInfo* pColumnInfo, int32_t numOfCols);
int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable);
static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) {
assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size);
@ -70,7 +70,7 @@ static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage*
int32_t offset) {
assert(rowOffset >= 0 && pQueryAttr != NULL);
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery);
int32_t numOfRows = (int32_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery);
return ((char *)page->data) + rowOffset + offset * numOfRows;
}

View File

@ -3708,27 +3708,59 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
}
} else {
// no data generated yet
if (pCtx->size == 1) {
if (pCtx->size < 1) {
return;
}
// check the timestamp in input buffer
TSKEY skey = GET_TS_DATA(pCtx, 0);
TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
return;
}
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
if (type == TSDB_FILL_PREV) {
if (skey > pCtx->startTs) {
return;
}
if (pCtx->size > 1) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
if (ekey > skey && ekey <= pCtx->startTs) {
skey = ekey;
}
}
assignVal(pCtx->pOutput, pCtx->pInput, pCtx->outputBytes, pCtx->inputType);
} else if (type == TSDB_FILL_NEXT) {
char* val = ((char*)pCtx->pInput) + pCtx->inputBytes;
TSKEY ekey = skey;
char* val = NULL;
if (ekey < pCtx->startTs) {
if (pCtx->size > 1) {
ekey = GET_TS_DATA(pCtx, 1);
if (ekey < pCtx->startTs) {
return;
}
val = ((char*)pCtx->pInput) + pCtx->inputBytes;
} else {
return;
}
} else {
val = (char*)pCtx->pInput;
}
assignVal(pCtx->pOutput, val, pCtx->outputBytes, pCtx->inputType);
} else if (type == TSDB_FILL_LINEAR) {
if (pCtx->size <= 1) {
return;
}
TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
return;
}
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1);
@ -4030,12 +4062,15 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD
pDist->maxRows = pSrc->maxRows;
pDist->minRows = pSrc->minRows;
int32_t numSteps = tsMaxRowsInFileBlock/TSDB_BLOCK_DIST_STEP_ROWS;
pDist->dataBlockInfos = taosArrayInit(numSteps, sizeof(SFileBlockInfo));
taosArraySetSize(pDist->dataBlockInfos, numSteps);
int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS;
if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) {
++maxSteps;
}
pDist->dataBlockInfos = taosArrayInit(maxSteps, sizeof(SFileBlockInfo));
taosArraySetSize(pDist->dataBlockInfos, maxSteps);
}
size_t steps = taosArrayGetSize(pDist->dataBlockInfos);
size_t steps = taosArrayGetSize(pSrc->dataBlockInfos);
for (int32_t i = 0; i < steps; ++i) {
int32_t srcNumBlocks = ((SFileBlockInfo*)taosArrayGet(pSrc->dataBlockInfos, i))->numBlocksOfStep;
SFileBlockInfo* blockInfo = (SFileBlockInfo*)taosArrayGet(pDist->dataBlockInfos, i);
@ -4047,9 +4082,9 @@ void block_func_merge(SQLFunctionCtx* pCtx) {
STableBlockDist info = {0};
int32_t len = *(int32_t*) pCtx->pInput;
blockDistInfoFromBinary(((char*)pCtx->pInput) + sizeof(int32_t), len, &info);
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
mergeTableBlockDist(pResInfo, &info);
taosArrayDestroy(info.dataBlockInfos);
pResInfo->numOfRes = 1;
pResInfo->hasResult = DATA_SET_FLAG;

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,6 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qExtbuffer.h"
#include "os.h"
#include "qAggMain.h"
#include "queryLog.h"
@ -21,6 +20,8 @@
#include "taosmsg.h"
#include "tulog.h"
#include "qExecutor.h"
#include "qExtbuffer.h"
#include "tcompare.h"
#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \
(data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes)
@ -767,6 +768,60 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
free(buf);
}
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
int32_t bytes = pSchema[index].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
memcpy(dest, ((char*)pCols[index]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
qsort(buf, numOfRows, size, compareFn);
int32_t prevLength = 0;
char* p = NULL;
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
if (i == index) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = (char*) pCols[i] + (j * bytes1);
memcpy(dest, src, bytes1);
}
} else {
// make sure memory buffer is enough
if (prevLength < bytes1) {
char *tmp = realloc(p, bytes1 * numOfRows);
assert(tmp);
p = tmp;
prevLength = bytes1;
}
memcpy(p, pCols[i], bytes1 * numOfRows);
for(int32_t j = 0; j < numOfRows; ++j){
char* dest = (char*) pCols[i] + bytes1 * j;
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
char* src = p + (newPos * bytes1);
memcpy(dest, src, bytes1);
}
}
}
tfree(buf);
tfree(p);
}
/*
* deep copy of sschema
*/

View File

@ -206,6 +206,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
} else {
assert(pFillInfo->currentKey == ts);
initBeforeAfterDataBuf(pFillInfo, prev);
if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) {
initBeforeAfterDataBuf(pFillInfo, next);
++pFillInfo->index;
copyCurrentRowIntoBuf(pFillInfo, srcData, *next);
--pFillInfo->index;
}
// assign rows to dst buffer
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
@ -227,6 +233,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
assignVal(output, src, pCol->col.bytes, pCol->col.type);
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
if (*next) {
assignVal(output, *next + pCol->col.offset, pCol->col.bytes, pCol->col.type);
} else {
setNull(output, pCol->col.type, pCol->col.bytes);
}
} else {
assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
}

View File

@ -1521,7 +1521,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
int32_t type = FILTER_UNIT_DATA_TYPE(unit);
int32_t len = 0;
int32_t tlen = 0;
char str[256] = {0};
char str[512] = {0};
SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit);
SSchema *sch = left->desc;
@ -1540,6 +1540,24 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
}
strcat(str, "]");
if (unit->compare.optr2) {
strcat(str, " && ");
sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str);
if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) {
SFilterField *right = FILTER_UNIT_RIGHT2_FIELD(info, unit);
char *data = right->data;
if (IS_VAR_DATA_TYPE(type)) {
tlen = varDataLen(data);
data += VARSTR_HEADER_SIZE;
}
converToStr(str + strlen(str), type, data, tlen > 32 ? 32 : tlen, &tlen);
} else {
strcat(str, "NULL");
}
strcat(str, "]");
}
qDebug("%s", str); //TODO
}
@ -1556,37 +1574,63 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
return;
}
qDebug("%s - RANGE info:", msg);
if (options == 1) {
qDebug("%s - RANGE info:", msg);
qDebug("RANGE Num:%u", info->colRangeNum);
for (uint16_t i = 0; i < info->colRangeNum; ++i) {
SFilterRangeCtx *ctx = info->colRange[i];
qDebug("Column ID[%d] RANGE: isnull[%d],notnull[%d],range[%d]", ctx->colId, ctx->isnull, ctx->notnull, ctx->isrange);
if (ctx->isrange) {
SFilterRangeNode *r = ctx->rs;
while (r) {
char str[256] = {0};
int32_t tlen = 0;
if (FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) {
strcat(str,"(NULL)");
} else {
FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
converToStr(str + strlen(str), ctx->type, &r->ra.s, tlen > 32 ? 32 : tlen, &tlen);
FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
}
strcat(str, " - ");
if (FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_NULL)) {
strcat(str, "(NULL)");
} else {
FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
converToStr(str + strlen(str), ctx->type, &r->ra.e, tlen > 32 ? 32 : tlen, &tlen);
FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
}
qDebug("range: %s", str);
qDebug("RANGE Num:%u", info->colRangeNum);
for (uint16_t i = 0; i < info->colRangeNum; ++i) {
SFilterRangeCtx *ctx = info->colRange[i];
qDebug("Column ID[%d] RANGE: isnull[%d],notnull[%d],range[%d]", ctx->colId, ctx->isnull, ctx->notnull, ctx->isrange);
if (ctx->isrange) {
SFilterRangeNode *r = ctx->rs;
while (r) {
char str[256] = {0};
int32_t tlen = 0;
if (FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) {
strcat(str,"(NULL)");
} else {
FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
converToStr(str + strlen(str), ctx->type, &r->ra.s, tlen > 32 ? 32 : tlen, &tlen);
FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
}
strcat(str, " - ");
if (FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_NULL)) {
strcat(str, "(NULL)");
} else {
FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
converToStr(str + strlen(str), ctx->type, &r->ra.e, tlen > 32 ? 32 : tlen, &tlen);
FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
}
qDebug("range: %s", str);
r = r->next;
r = r->next;
}
}
}
return;
}
qDebug("%s - Block Filter info:", msg);
if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL)) {
qDebug("Flag:%s", "ALL");
return;
} else if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY)) {
qDebug("Flag:%s", "EMPTY");
return;
} else if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ACTIVE)){
qDebug("Flag:%s", "ACTIVE");
}
qDebug("GroupNum:%d", info->blkGroupNum);
uint16_t *unitIdx = info->blkUnits;
for (uint16_t i = 0; i < info->blkGroupNum; ++i) {
qDebug("Group[%d] UnitNum: %d:", i, *unitIdx);
uint16_t unitNum = *(unitIdx++);
for (uint16_t m = 0; m < unitNum; ++m) {
qDebug("uidx[%d]", *(unitIdx++));
}
}
}
}
@ -1674,6 +1718,8 @@ void filterFreeInfo(SFilterInfo *info) {
CHK_RETV(info == NULL);
tfree(info->cunits);
tfree(info->blkUnitRes);
tfree(info->blkUnits);
for (int32_t i = 0; i < FLD_TYPE_MAX; ++i) {
for (uint16_t f = 0; f < info->fields[i].num; ++f) {
@ -2485,6 +2531,8 @@ int32_t filterGenerateComInfo(SFilterInfo *info) {
uint16_t n = 0;
info->cunits = malloc(info->unitNum * sizeof(*info->cunits));
info->blkUnitRes = malloc(sizeof(*info->blkUnitRes) * info->unitNum);
info->blkUnits = malloc(sizeof(*info->blkUnits) * (info->unitNum + 1) * info->groupNum);
for (uint16_t i = 0; i < info->unitNum; ++i) {
SFilterUnit *unit = &info->units[i];
@ -2493,6 +2541,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) {
info->cunits[i].rfunc = filterGetRangeCompFuncFromOptrs(unit->compare.optr, unit->compare.optr2);
info->cunits[i].optr = FILTER_UNIT_OPTR(unit);
info->cunits[i].colData = NULL;
info->cunits[i].colId = FILTER_UNIT_COL_ID(info, unit);
if (unit->right.type == FLD_TYPE_VALUE) {
info->cunits[i].valData = FILTER_UNIT_VAL_DATA(info, unit);
@ -2541,36 +2590,317 @@ int32_t filterUpdateComUnits(SFilterInfo *info) {
}
static FORCE_INLINE bool filterExecuteImplAll(void *info, int32_t numOfRows, int8_t* p) {
int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t numOfCols, int32_t numOfRows) {
int32_t rmUnit = 0;
memset(info->blkUnitRes, 0, sizeof(*info->blkUnitRes) * info->unitNum);
for (int32_t k = 0; k < info->unitNum; ++k) {
int32_t index = -1;
SFilterComUnit *cunit = &info->cunits[k];
if (FILTER_NO_MERGE_DATA_TYPE(cunit->dataType)) {
continue;
}
for(int32_t i = 0; i < numOfCols; ++i) {
if (pDataStatis[i].colId == cunit->colId) {
index = i;
break;
}
}
if (index == -1) {
continue;
}
if (pDataStatis[index].numOfNull <= 0) {
if (cunit->optr == TSDB_RELATION_ISNULL) {
info->blkUnitRes[k] = -1;
rmUnit = 1;
continue;
}
if (cunit->optr == TSDB_RELATION_NOTNULL) {
info->blkUnitRes[k] = 1;
rmUnit = 1;
continue;
}
} else {
if (pDataStatis[index].numOfNull == numOfRows) {
if (cunit->optr == TSDB_RELATION_ISNULL) {
info->blkUnitRes[k] = 1;
rmUnit = 1;
continue;
}
info->blkUnitRes[k] = -1;
rmUnit = 1;
continue;
}
}
if (cunit->optr == TSDB_RELATION_ISNULL || cunit->optr == TSDB_RELATION_NOTNULL
|| cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE
|| cunit->optr == TSDB_RELATION_NOT_EQUAL) {
continue;
}
SDataStatis* pDataBlockst = &pDataStatis[index];
void *minVal, *maxVal;
if (cunit->dataType == TSDB_DATA_TYPE_FLOAT) {
float minv = (float)(*(double *)(&pDataBlockst->min));
float maxv = (float)(*(double *)(&pDataBlockst->max));
minVal = &minv;
maxVal = &maxv;
} else {
minVal = &pDataBlockst->min;
maxVal = &pDataBlockst->max;
}
bool minRes = false, maxRes = false;
if (cunit->rfunc >= 0) {
minRes = (*gRangeCompare[cunit->rfunc])(minVal, minVal, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
maxRes = (*gRangeCompare[cunit->rfunc])(maxVal, maxVal, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
if (minRes && maxRes) {
info->blkUnitRes[k] = 1;
rmUnit = 1;
} else if ((!minRes) && (!maxRes)) {
minRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_LESS_EQUAL, minVal, cunit->valData);
maxRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_GREATER_EQUAL, maxVal, cunit->valData2);
if (minRes && maxRes) {
continue;
}
info->blkUnitRes[k] = -1;
rmUnit = 1;
}
} else {
minRes = filterDoCompare(gDataCompare[cunit->func], cunit->optr, minVal, cunit->valData);
maxRes = filterDoCompare(gDataCompare[cunit->func], cunit->optr, maxVal, cunit->valData);
if (minRes && maxRes) {
info->blkUnitRes[k] = 1;
rmUnit = 1;
} else if ((!minRes) && (!maxRes)) {
if (cunit->optr == TSDB_RELATION_EQUAL) {
minRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_GREATER, minVal, cunit->valData);
maxRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_LESS, maxVal, cunit->valData);
if (minRes || maxRes) {
info->blkUnitRes[k] = -1;
rmUnit = 1;
}
continue;
}
info->blkUnitRes[k] = -1;
rmUnit = 1;
}
}
}
CHK_LRET(rmUnit == 0, TSDB_CODE_SUCCESS, "NO Block Filter APPLY");
info->blkGroupNum = info->groupNum;
uint16_t *unitNum = info->blkUnits;
uint16_t *unitIdx = unitNum + 1;
int32_t all = 0, empty = 0;
for (uint32_t g = 0; g < info->groupNum; ++g) {
SFilterGroup *group = &info->groups[g];
*unitNum = group->unitNum;
all = 0;
empty = 0;
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint16_t uidx = group->unitIdxs[u];
if (info->blkUnitRes[uidx] == 1) {
--(*unitNum);
all = 1;
continue;
} else if (info->blkUnitRes[uidx] == -1) {
*unitNum = 0;
empty = 1;
break;
}
*(unitIdx++) = uidx;
}
if (*unitNum == 0) {
--info->blkGroupNum;
assert(empty || all);
if (empty) {
FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY);
} else {
FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL);
goto _return;
}
continue;
}
unitNum = unitIdx;
++unitIdx;
}
if (info->blkGroupNum) {
FILTER_CLR_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY);
FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_ACTIVE);
}
_return:
filterDumpInfoToString(info, "Block Filter", 2);
return TSDB_CODE_SUCCESS;
}
bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
uint16_t *unitIdx = NULL;
*p = calloc(numOfRows, sizeof(int8_t));
for (int32_t i = 0; i < numOfRows; ++i) {
//FILTER_UNIT_CLR_F(info);
unitIdx = info->blkUnits;
for (uint32_t g = 0; g < info->blkGroupNum; ++g) {
uint16_t unitNum = *(unitIdx++);
for (uint32_t u = 0; u < unitNum; ++u) {
SFilterComUnit *cunit = &info->cunits[*(unitIdx + u)];
void *colData = (char *)cunit->colData + cunit->dataSize * i;
//if (FILTER_UNIT_GET_F(info, uidx)) {
// p[i] = FILTER_UNIT_GET_R(info, uidx);
//} else {
uint8_t optr = cunit->optr;
if (isNull(colData, cunit->dataType)) {
(*p)[i] = optr == TSDB_RELATION_ISNULL ? true : false;
} else {
if (optr == TSDB_RELATION_NOTNULL) {
(*p)[i] = 1;
} else if (optr == TSDB_RELATION_ISNULL) {
(*p)[i] = 0;
} else if (cunit->rfunc >= 0) {
(*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
} else {
(*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
}
//FILTER_UNIT_SET_R(info, uidx, p[i]);
//FILTER_UNIT_SET_F(info, uidx);
}
if ((*p)[i] == 0) {
break;
}
}
if ((*p)[i]) {
break;
}
unitIdx += unitNum;
}
if ((*p)[i] == 0) {
all = false;
}
}
return all;
}
int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols, bool* all) {
if (statis && numOfRows >= FILTER_RM_UNIT_MIN_ROWS) {
info->blkFlag = 0;
filterRmUnitByRange(info, statis, numOfCols, numOfRows);
if (info->blkFlag) {
if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL)) {
*all = true;
goto _return;
} else if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY)) {
*all = false;
goto _return;
}
assert(info->unitNum > 1);
*all = filterExecuteBasedOnStatisImpl(info, numOfRows, p, statis, numOfCols);
goto _return;
}
}
return 1;
_return:
info->blkFlag = 0;
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE bool filterExecuteImplAll(void *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
return true;
}
static FORCE_INLINE bool filterExecuteImplEmpty(void *info, int32_t numOfRows, int8_t* p) {
static FORCE_INLINE bool filterExecuteImplEmpty(void *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
return false;
}
static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, int8_t* p) {
static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
return all;
}
*p = calloc(numOfRows, sizeof(int8_t));
for (int32_t i = 0; i < numOfRows; ++i) {
uint16_t uidx = info->groups[0].unitIdxs[0];
void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i;
p[i] = isNull(colData, info->cunits[uidx].dataType);
if (p[i] == 0) {
(*p)[i] = isNull(colData, info->cunits[uidx].dataType);
if ((*p)[i] == 0) {
all = false;
}
}
return all;
}
static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows, int8_t* p) {
static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
return all;
}
*p = calloc(numOfRows, sizeof(int8_t));
for (int32_t i = 0; i < numOfRows; ++i) {
uint16_t uidx = info->groups[0].unitIdxs[0];
void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i;
p[i] = !isNull(colData, info->cunits[uidx].dataType);
if (p[i] == 0) {
(*p)[i] = !isNull(colData, info->cunits[uidx].dataType);
if ((*p)[i] == 0) {
all = false;
}
}
@ -2578,7 +2908,7 @@ static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows
return all;
}
bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
uint16_t dataSize = info->cunits[0].dataSize;
@ -2588,6 +2918,12 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
void *valData2 = info->cunits[0].valData2;
__compar_fn_t func = gDataCompare[info->cunits[0].func];
if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
return all;
}
*p = calloc(numOfRows, sizeof(int8_t));
for (int32_t i = 0; i < numOfRows; ++i) {
if (isNull(colData, info->cunits[0].dataType)) {
all = false;
@ -2595,9 +2931,9 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
continue;
}
p[i] = (*rfunc)(colData, colData, valData, valData2, func);
(*p)[i] = (*rfunc)(colData, colData, valData, valData2, func);
if (p[i] == 0) {
if ((*p)[i] == 0) {
all = false;
}
@ -2607,10 +2943,16 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
return all;
}
bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t* p) {
bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
return all;
}
*p = calloc(numOfRows, sizeof(int8_t));
for (int32_t i = 0; i < numOfRows; ++i) {
uint16_t uidx = info->groups[0].unitIdxs[0];
void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i;
@ -2619,9 +2961,9 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t* p) {
continue;
}
p[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
(*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
if (p[i] == 0) {
if ((*p)[i] == 0) {
all = false;
}
}
@ -2630,10 +2972,16 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t* p) {
}
bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t* p) {
bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
return all;
}
*p = calloc(numOfRows, sizeof(int8_t));
for (int32_t i = 0; i < numOfRows; ++i) {
//FILTER_UNIT_CLR_F(info);
@ -2650,33 +2998,33 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t* p) {
uint8_t optr = cunit->optr;
if (isNull(colData, cunit->dataType)) {
p[i] = optr == TSDB_RELATION_ISNULL ? true : false;
(*p)[i] = optr == TSDB_RELATION_ISNULL ? true : false;
} else {
if (optr == TSDB_RELATION_NOTNULL) {
p[i] = 1;
(*p)[i] = 1;
} else if (optr == TSDB_RELATION_ISNULL) {
p[i] = 0;
(*p)[i] = 0;
} else if (cunit->rfunc >= 0) {
p[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
(*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
} else {
p[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
(*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
}
//FILTER_UNIT_SET_R(info, uidx, p[i]);
//FILTER_UNIT_SET_F(info, uidx);
}
if (p[i] == 0) {
if ((*p)[i] == 0) {
break;
}
}
if (p[i]) {
if ((*p)[i]) {
break;
}
}
if (p[i] == 0) {
if ((*p)[i] == 0) {
all = false;
}
}
@ -2684,8 +3032,9 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t* p) {
return all;
}
FORCE_INLINE bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t* p) {
return (*info->func)(info, numOfRows, p);
FORCE_INLINE bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
return (*info->func)(info, numOfRows, p, statis, numOfCols);
}
int32_t filterSetExecFunc(SFilterInfo *info) {
@ -2767,7 +3116,7 @@ _return:
return TSDB_CODE_SUCCESS;
}
int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data) {
int32_t filterSetColFieldData(SFilterInfo *info, int32_t numOfCols, SArray* pDataBlock) {
CHK_LRET(info == NULL, TSDB_CODE_QRY_APP_ERROR, "info NULL");
CHK_LRET(info->fields[FLD_TYPE_COLUMN].num <= 0, TSDB_CODE_QRY_APP_ERROR, "no column fileds");
@ -2778,10 +3127,14 @@ int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data) {
for (uint16_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) {
SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i];
SSchema* sch = fi->desc;
if (sch->colId == colId) {
fi->data = data;
break;
for (int32_t j = 0; j < numOfCols; ++j) {
SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, j);
if (sch->colId == pColInfo->info.colId) {
fi->data = pColInfo->pData;
break;
}
}
}
@ -2880,16 +3233,17 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num
// no statistics data, load the true data block
if (index == -1) {
return true;
break;
}
// not support pre-filter operation on binary/nchar data type
if (FILTER_NO_MERGE_DATA_TYPE(ctx->type)) {
return true;
break;
}
if ((pDataStatis[index].numOfNull <= 0) && (ctx->isnull && !ctx->notnull && !ctx->isrange)) {
return false;
ret = false;
break;
}
// all data in current column are NULL, no need to check its boundary value
@ -2897,7 +3251,8 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num
// if isNULL query exists, load the null data column
if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) {
return false;
ret = false;
break;
}
continue;

View File

@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
}
pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
pBucket->comparFn = getKeyComparFunc(pBucket->type);
pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC);
pBucket->hashFunc = getHashFunc(pBucket->type);
if (pBucket->hashFunc == NULL) {

View File

@ -557,20 +557,27 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
int32_t op = 0;
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
if (onlyQueryTags(pQueryAttr)) {
op = OP_TagScan;
taosArrayPush(plan, &op);
}
op = OP_TagScan;
taosArrayPush(plan, &op);
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->interval.interval > 0) {
if (pQueryAttr->stableQuery) {
op = OP_MultiTableTimeInterval;
if (pQueryAttr->pointInterpQuery) {
op = OP_AllMultiTableTimeInterval;
} else {
op = OP_MultiTableTimeInterval;
}
taosArrayPush(plan, &op);
} else {
op = OP_TimeWindow;
if (pQueryAttr->pointInterpQuery) {
op = OP_AllTimeWindow;
} else {
op = OP_TimeWindow;
}
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
@ -578,7 +585,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
}
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
if (pQueryAttr->fillType != TSDB_FILL_NONE) {
op = OP_Fill;
taosArrayPush(plan, &op);
}
@ -643,8 +650,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
}
}
}
// outer query order by support
int32_t orderColId = pQueryAttr->order.orderColId;
if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) {
op = OP_Order;
taosArrayPush(plan, &op);
}
}
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
op = OP_Limit;

View File

@ -30,6 +30,18 @@ typedef struct SCompSupporter {
int32_t order;
} SCompSupporter;
int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) {
if (pQueryAttr && (!stable)) {
for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) {
return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64;
}
}
}
return 1;
}
int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) {
int32_t size = 0;

View File

@ -215,7 +215,6 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
return code;
}
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
SQInfo *pQInfo = (SQInfo *)qinfo;
assert(pQInfo && pQInfo->signature == pQInfo);
@ -256,7 +255,11 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
bool newgroup = false;
publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_BEFORE_OPERATOR_EXEC);
int64_t st = taosGetTimestampUs();
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup);
pQInfo->summary.elapsedTime += (taosGetTimestampUs() - st);
publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC);
pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv);

View File

@ -0,0 +1,124 @@
#include <gtest/gtest.h>
#include <iostream>
#include "taos.h"
#include "tsdb.h"
#include "qExtbuffer.h"
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wsign-compare"
namespace {
int32_t comp(const void* p1, const void* p2) {
int32_t* x1 = (int32_t*) p1;
int32_t* x2 = (int32_t*) p2;
if (*x1 == *x2) {
return 0;
} else {
return (*x1 > *x2)? 1:-1;
}
}
int32_t comp1(const void* p1, const void* p2) {
int32_t ret = strncmp((char*) p1, (char*) p2, 20);
if (ret == 0) {
return 0;
} else {
return ret > 0 ? 1:-1;
}
}
}
TEST(testCase, colunmnwise_sort_test) {
// void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn)
void* pCols[2] = {0};
SSchema s[2] = {{0}};
s[0].type = TSDB_DATA_TYPE_INT;
s[0].bytes = 4;
s[0].colId = 0;
strcpy(s[0].name, "col1");
s[1].type = TSDB_DATA_TYPE_BINARY;
s[1].bytes = 20;
s[1].colId = 1;
strcpy(s[1].name, "col2");
int32_t* p = (int32_t*) calloc(5, sizeof(int32_t));
p[0] = 12;
p[1] = 8;
p[2] = 99;
p[3] = 7;
p[4] = 1;
char* t1 = (char*) calloc(5, 20);
strcpy(t1, "abc");
strcpy(t1 + 20, "def");
strcpy(t1 + 40, "xyz");
strcpy(t1 + 60, "klm");
strcpy(t1 + 80, "hij");
pCols[0] = (char*) p;
pCols[1] = (char*) t1;
taoscQSort(reinterpret_cast<void**>(pCols), s, 2, 5, 0, comp);
int32_t* px = (int32_t*) pCols[0];
ASSERT_EQ(px[0], 1);
ASSERT_EQ(px[1], 7);
ASSERT_EQ(px[2], 8);
ASSERT_EQ(px[3], 12);
ASSERT_EQ(px[4], 99);
char* px1 = (char*) pCols[1];
ASSERT_STRCASEEQ(px1 + 20 * 0, "hij");
ASSERT_STRCASEEQ(px1 + 20 * 1, "klm");
ASSERT_STRCASEEQ(px1 + 20 * 2, "def");
ASSERT_STRCASEEQ(px1 + 20 * 3, "abc");
ASSERT_STRCASEEQ(px1 + 20 * 4, "xyz");
taoscQSort(pCols, s, 2, 5, 1, comp1);
px = (int32_t*) pCols[0];
ASSERT_EQ(px[0], 12);
ASSERT_EQ(px[1], 8);
ASSERT_EQ(px[2], 1);
ASSERT_EQ(px[3], 7);
ASSERT_EQ(px[4], 99);
px1 = (char*) pCols[1];
ASSERT_STRCASEEQ(px1 + 20 * 0, "abc");
ASSERT_STRCASEEQ(px1 + 20 * 1, "def");
ASSERT_STRCASEEQ(px1 + 20 * 2, "hij");
ASSERT_STRCASEEQ(px1 + 20 * 3, "klm");
ASSERT_STRCASEEQ(px1 + 20 * 4, "xyz");
}
TEST(testCase, columnsort_test) {
SSchema field[1] = {
{TSDB_DATA_TYPE_INT, "k", sizeof(int32_t)},
};
const int32_t num = 2000;
int32_t *d = (int32_t *)malloc(sizeof(int32_t) * num);
for (int32_t i = 0; i < num; ++i) {
d[i] = i % 4;
}
const int32_t numOfOrderCols = 1;
int32_t orderColIdx = 0;
SColumnModel *pModel = createColumnModel(field, 1, 1000);
tOrderDescriptor *pDesc = tOrderDesCreate(&orderColIdx, numOfOrderCols, pModel, 1);
tColDataQSort(pDesc, num, 0, num - 1, (char *)d, 1);
for (int32_t i = 0; i < num; ++i) {
printf("%d\t", d[i]);
}
printf("\n");
destroyColumnModel(pModel);
}

View File

@ -330,7 +330,7 @@ void intDataTest() {
filterAddRange(h, ra + i, TSDB_RELATION_AND);
}
filterGetRangeNum(h, &num);
ASSERT_EQ(num, 0);
ASSERT_EQ(num, 1);
filterFreeRangeCtx(h);

View File

@ -1,6 +1,4 @@
#include "os.h"
#include <gtest/gtest.h>
#include <cassert>
#include <iostream>
#include "taos.h"

View File

@ -397,7 +397,11 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
SThreadObj *pThreadObj = pClientObj->pThreadObj[index];
SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip);
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
if (fd == (SOCKET)-1) return NULL;
#else
if (fd <= 0) return NULL;
#endif
struct sockaddr_in sin;
uint16_t localPort = 0;

View File

@ -65,12 +65,7 @@ SDisk *tfsFreeDisk(SDisk *pDisk);
int tfsUpdateDiskInfo(SDisk *pDisk);
// ttier.c ======================================================
typedef struct {
int64_t size;
int64_t used;
int64_t free;
int16_t nAvailDisks; // # of Available disks
} STierMeta;
typedef struct STier {
pthread_spinlock_t lock;
int level;

View File

@ -101,7 +101,7 @@ int tfsInit(SDiskCfg *pDiskCfg, int ndisk) {
return -1;
}
tfsUpdateInfo(NULL);
tfsUpdateInfo(NULL, NULL, 0);
for (int level = 0; level < TFS_NLEVEL(); level++) {
tfsPosNextId(TFS_TIER_AT(level));
}
@ -119,7 +119,7 @@ void tfsDestroy() {
}
}
void tfsUpdateInfo(SFSMeta *pFSMeta) {
void tfsUpdateInfo(SFSMeta *pFSMeta, STierMeta *tierMetas, int8_t numTiers) {
SFSMeta fsMeta;
STierMeta tierMeta;
@ -130,11 +130,16 @@ void tfsUpdateInfo(SFSMeta *pFSMeta) {
memset(pFSMeta, 0, sizeof(*pFSMeta));
for (int level = 0; level < TFS_NLEVEL(); level++) {
STierMeta *pTierMeta = &tierMeta;
if (tierMetas && level < numTiers) {
pTierMeta = tierMetas + level;
}
STier *pTier = TFS_TIER_AT(level);
tfsUpdateTierInfo(pTier, &tierMeta);
pFSMeta->tsize += tierMeta.size;
pFSMeta->avail += tierMeta.free;
pFSMeta->used += tierMeta.used;
tfsUpdateTierInfo(pTier, pTierMeta);
pFSMeta->tsize += pTierMeta->size;
pFSMeta->avail += pTierMeta->free;
pFSMeta->used += pTierMeta->used;
}
tfsLock();
@ -595,7 +600,7 @@ void taosGetDisk() {
SFSMeta fsMeta;
if (tscEmbedded) {
tfsUpdateInfo(&fsMeta);
tfsUpdateInfo(&fsMeta, NULL, 0);
tsTotalDataDirGB = (float)(fsMeta.tsize / unit);
tsUsedDataDirGB = (float)(fsMeta.used / unit);
tsAvailDataDirGB = (float)(fsMeta.avail / unit);

View File

@ -24,8 +24,7 @@ typedef struct STable {
tstr* name; // NOTE: there a flexible string here
uint64_t suid;
struct STable* pSuper; // super table pointer
uint8_t numOfSchemas;
STSchema* schema[TSDB_MAX_TABLE_SCHEMAS];
SArray* schema;
STSchema* tagSchema;
SKVRow tagVal;
SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index
@ -107,10 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock,
if (lock) TSDB_RLOCK_TABLE(pDTable);
if (_version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
pTSchema = *(STSchema **)taosArrayGetLast(pDTable->schema);
} else { // get the schema with version
void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;

View File

@ -37,6 +37,8 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired);
static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
static int tsdbCreateMeta(STsdbRepo *pRepo);
// For backward compatibility
bool tsdbForceKeepFile = false;
// ================== CURRENT file header info
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
int tlen = 0;
@ -1048,6 +1050,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) {
return -1;
}
if (tsdbForceKeepFile) {
struct stat tfstat;
// Get real file size
if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
tsdbCloseMFile(pfs->cstatus->pmf);
tfsClosedir(tdir);
regfree(&regex);
return -1;
}
if (pfs->cstatus->pmf->info.size != tfstat.st_size) {
int64_t tfsize = pfs->cstatus->pmf->info.size;
pfs->cstatus->pmf->info.size = tfstat.st_size;
tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size);
}
}
tsdbCloseMFile(pfs->cstatus->pmf);
}
} else if (code == REG_NOMATCH) {
@ -1212,6 +1234,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
return -1;
}
if (tsdbForceKeepFile) {
struct stat tfstat;
// Get real file size
if (fstat(pDFile->fd, &tfstat) < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
taosArrayDestroy(fArray);
return -1;
}
if (pDFile->info.size != tfstat.st_size) {
int64_t tfsize = pDFile->info.size;
pDFile->info.size = tfstat.st_size;
tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size);
}
}
tsdbCloseDFile(pDFile);
index++;
}

View File

@ -43,6 +43,8 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
static int tsdbAddSchema(STable *pTable, STSchema *pSchema);
static void tsdbFreeTableSchema(STable *pTable);
// ------------------ OUTER FUNCTIONS ------------------
int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
@ -722,17 +724,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
STsdbMeta *pMeta = pRepo->tsdbMeta;
STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1]));
ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pCTable->schema)));
TSDB_WLOCK_TABLE(pCTable);
if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
pCTable->schema[pCTable->numOfSchemas++] = pSchema;
} else {
ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
tdFreeSchema(pCTable->schema[0]);
memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
pCTable->schema[pCTable->numOfSchemas - 1] = pSchema;
}
tsdbAddSchema(pCTable, pSchema);
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
@ -828,9 +823,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
TABLE_TID(pTable) = -1;
TABLE_SUID(pTable) = -1;
pTable->pSuper = NULL;
pTable->numOfSchemas = 1;
pTable->schema[0] = tdDupSchema(pCfg->schema);
if (pTable->schema[0] == NULL) {
if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
@ -841,7 +834,8 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
}
pTable->tagVal = NULL;
STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN);
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey);
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL,
SL_ALLOW_DUP_KEY, getTagIndexKey);
if (pTable->pIndex == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
@ -870,9 +864,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
}
} else {
TABLE_SUID(pTable) = -1;
pTable->numOfSchemas = 1;
pTable->schema[0] = tdDupSchema(pCfg->schema);
if (pTable->schema[0] == NULL) {
if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
@ -906,9 +898,7 @@ static void tsdbFreeTable(STable *pTable) {
TABLE_UID(pTable));
tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
tdFreeSchema(pTable->schema[i]);
}
tsdbFreeTableSchema(pTable);
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tdFreeSchema(pTable->tagSchema);
@ -1260,9 +1250,10 @@ static int tsdbEncodeTable(void **buf, STable *pTable) {
tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable));
tlen += tdEncodeKVRow(buf, pTable->tagVal);
} else {
tlen += taosEncodeFixedU8(buf, pTable->numOfSchemas);
for (int i = 0; i < pTable->numOfSchemas; i++) {
tlen += tdEncodeSchema(buf, pTable->schema[i]);
tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema));
for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) {
STSchema *pSchema = taosArrayGetP(pTable->schema, i);
tlen += tdEncodeSchema(buf, pSchema);
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
@ -1293,9 +1284,12 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable));
buf = tdDecodeKVRow(buf, &(pTable->tagVal));
} else {
buf = taosDecodeFixedU8(buf, &(pTable->numOfSchemas));
for (int i = 0; i < pTable->numOfSchemas; i++) {
buf = tdDecodeSchema(buf, &(pTable->schema[i]));
uint8_t nSchemas;
buf = taosDecodeFixedU8(buf, &nSchemas);
for (int i = 0; i < nSchemas; i++) {
STSchema *pSchema;
buf = tdDecodeSchema(buf, &pSchema);
tsdbAddSchema(pTable, pSchema);
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
@ -1457,3 +1451,38 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) {
return 0;
}
static int tsdbAddSchema(STable *pTable, STSchema *pSchema) {
ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE);
if (pTable->schema == NULL) {
pTable->schema = taosArrayInit(TSDB_MAX_TABLE_SCHEMAS, sizeof(SSchema *));
if (pTable->schema == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
}
ASSERT(taosArrayGetSize(pTable->schema) == 0 ||
schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema)));
if (taosArrayPush(pTable->schema, &pSchema) == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
return 0;
}
static void tsdbFreeTableSchema(STable *pTable) {
ASSERT(pTable != NULL);
if (pTable->schema) {
for (size_t i = 0; i < taosArrayGetSize(pTable->schema); i++) {
STSchema *pSchema = taosArrayGetP(pTable->schema, i);
tdFreeSchema(pSchema);
}
taosArrayDestroy(pTable->schema);
}
}

View File

@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) {
STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
if (pNew->numOfTables == 0) {
tsdbDebug("update query time range to invalidate time window");
assert(taosArrayGetSize(pNew->pGroupList) == 0);
bool asc = ASCENDING_TRAVERSE(pCond->order);
if (asc) {
pCond->twindow.ekey = pCond->twindow.skey - 1;
} else {
pCond->twindow.skey = pCond->twindow.ekey - 1;
}
}
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef);
pQueryHandle->loadExternalRow = true;
pQueryHandle->currentLoadExternalRows = true;
@ -2693,7 +2705,7 @@ static void destroyHelper(void* param) {
free(param);
}
static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) {
static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) {
if (pQueryHandle->checkFiles) {
// check if the query range overlaps with the file data block
bool exists = true;

View File

@ -47,7 +47,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con
int32_t doCompare(const char* a, const char* b, int32_t type, size_t size);
__compar_fn_t getKeyComparFunc(int32_t keyType);
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
__compar_fn_t getComparFunc(int32_t type, int32_t optr);

View File

@ -81,6 +81,7 @@ typedef struct {
extern SGlobalCfg tsGlobalConfig[];
extern int32_t tsGlobalConfigNum;
extern char * tsCfgStatusStr[];
extern bool tsdbForceKeepFile;
void taosReadGlobalLogCfg();
bool taosReadGlobalCfg();

View File

@ -16,7 +16,6 @@
#include "os.h"
#include "ttype.h"
#include "tcompare.h"
#include "tarray.h"
#include "hash.h"
int32_t setCompareBytes1(const void *pLeft, const void *pRight) {
@ -35,18 +34,15 @@ int32_t setCompareBytes8(const void *pLeft, const void *pRight) {
return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0;
}
int32_t compareInt32Val(const void *pLeft, const void *pRight) {
int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight);
int32_t compareInt8Val(const void *pLeft, const void *pRight) {
int8_t left = GET_INT8_VAL(pLeft), right = GET_INT8_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareInt64Val(const void *pLeft, const void *pRight) {
int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) {
return compareInt8Val(pRight, pLeft);
}
int32_t compareInt16Val(const void *pLeft, const void *pRight) {
@ -56,13 +52,32 @@ int32_t compareInt16Val(const void *pLeft, const void *pRight) {
return 0;
}
int32_t compareInt8Val(const void *pLeft, const void *pRight) {
int8_t left = GET_INT8_VAL(pLeft), right = GET_INT8_VAL(pRight);
int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) {
return compareInt16Val(pRight, pLeft);
}
int32_t compareInt32Val(const void *pLeft, const void *pRight) {
int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) {
return compareInt32Val(pRight, pLeft);
}
int32_t compareInt64Val(const void *pLeft, const void *pRight) {
int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) {
return compareInt64Val(pRight, pLeft);
}
int32_t compareUint32Val(const void *pLeft, const void *pRight) {
uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight);
if (left > right) return 1;
@ -70,6 +85,10 @@ int32_t compareUint32Val(const void *pLeft, const void *pRight) {
return 0;
}
int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) {
return compareUint32Val(pRight, pLeft);
}
int32_t compareUint64Val(const void *pLeft, const void *pRight) {
uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight);
if (left > right) return 1;
@ -77,6 +96,10 @@ int32_t compareUint64Val(const void *pLeft, const void *pRight) {
return 0;
}
int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) {
return compareUint64Val(pRight, pLeft);
}
int32_t compareUint16Val(const void *pLeft, const void *pRight) {
uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight);
if (left > right) return 1;
@ -84,6 +107,10 @@ int32_t compareUint16Val(const void *pLeft, const void *pRight) {
return 0;
}
int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) {
return compareUint16Val(pRight, pLeft);
}
int32_t compareUint8Val(const void* pLeft, const void* pRight) {
uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight);
if (left > right) return 1;
@ -91,6 +118,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) {
return 0;
}
int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) {
return compareUint8Val(pRight, pLeft);
}
int32_t compareFloatVal(const void *pLeft, const void *pRight) {
float p1 = GET_FLOAT_VAL(pLeft);
float p2 = GET_FLOAT_VAL(pRight);
@ -112,6 +143,10 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) {
return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareFloatValDesc(const void* pLeft, const void* pRight) {
return compareFloatVal(pRight, pLeft);
}
int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
double p1 = GET_DOUBLE_VAL(pLeft);
double p2 = GET_DOUBLE_VAL(pRight);
@ -133,6 +168,10 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
return compareDoubleVal(pRight, pLeft);
}
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
@ -149,6 +188,10 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
}
}
int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) {
return compareLenPrefixedStr(pRight, pLeft);
}
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
@ -165,6 +208,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
}
}
int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) {
return compareLenPrefixedWStr(pRight, pLeft);
}
/*
* Compare two strings
* TSDB_MATCH: Match
@ -281,9 +328,9 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
char pattern[128] = {0};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN);
char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
assert(varDataLen(pRight) < 128);
size_t sz = varDataLen(pLeft);
char *buf = malloc(sz + 1);
@ -292,6 +339,7 @@ int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
int32_t ret = patternMatch(pattern, buf, sz, &pInfo);
free(buf);
free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
@ -302,10 +350,6 @@ int32_t taosArrayCompareString(const void* a, const void* b) {
return compareLenPrefixedStr(x, y);
}
//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) {
// const SArray* arr = (const SArray*) pRight;
// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1;
//}
int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0;
}
@ -313,13 +357,13 @@ int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
wchar_t pattern[128] = {0};
assert(TSDB_PATTERN_STRING_MAX_LEN < 128);
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
assert(varDataLen(pRight) < 128);
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
@ -394,50 +438,50 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
return comparFn;
}
__compar_fn_t getKeyComparFunc(int32_t keyType) {
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
__compar_fn_t comparFn = NULL;
switch (keyType) {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_BOOL:
comparFn = compareInt8Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc;
break;
case TSDB_DATA_TYPE_SMALLINT:
comparFn = compareInt16Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc;
break;
case TSDB_DATA_TYPE_INT:
comparFn = compareInt32Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
comparFn = compareInt64Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc;
break;
case TSDB_DATA_TYPE_FLOAT:
comparFn = compareFloatVal;
comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc;
break;
case TSDB_DATA_TYPE_DOUBLE:
comparFn = compareDoubleVal;
comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc;
break;
case TSDB_DATA_TYPE_UTINYINT:
comparFn = compareUint8Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc;
break;
case TSDB_DATA_TYPE_USMALLINT:
comparFn = compareUint16Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc;
break;
case TSDB_DATA_TYPE_UINT:
comparFn = compareUint32Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc;
break;
case TSDB_DATA_TYPE_UBIGINT:
comparFn = compareUint64Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc;
break;
case TSDB_DATA_TYPE_BINARY:
comparFn = compareLenPrefixedStr;
comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc;
break;
case TSDB_DATA_TYPE_NCHAR:
comparFn = compareLenPrefixedWStr;
comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc;
break;
default:
comparFn = compareInt32Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
}

View File

@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
pSkipList->keyFn = fn;
pSkipList->seed = rand();
if (comparFn == NULL) {
pSkipList->comparFn = getKeyComparFunc(keyType);
pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC);
} else {
pSkipList->comparFn = comparFn;
}

View File

@ -17,6 +17,7 @@
#define TAOS_RANDOM_FILE_FAIL_TEST
#include "os.h"
#include "taoserror.h"
#include "taosmsg.h"
#include "tchecksum.h"
#include "tfile.h"
#include "twal.h"
@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) {
#if defined(WAL_CHECKSUM_WHOLE)
static void walUpdateChecksum(SWalHead *pHead) {
pHead->sver = 1;
pHead->sver = 2;
pHead->cksum = 0;
pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len);
}
@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) {
static int walValidateChecksum(SWalHead *pHead) {
if (pHead->sver == 0) { // for compatible with wal before sver 1
return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead));
} else if (pHead->sver == 1) {
} else if (pHead->sver >= 1) {
uint32_t cksum = pHead->cksum;
pHead->cksum = 0;
return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum);
@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd,
return TSDB_CODE_SUCCESS;
}
if (pHead->sver == 1) {
if (pHead->sver >= 1) {
if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) {
wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos);
return TSDB_CODE_WAL_FILE_CORRUPTED;
@ -306,7 +307,115 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd,
return TSDB_CODE_WAL_FILE_CORRUPTED;
}
// Add SMemRowType ahead of SDataRow
static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) {
// copy the header firstly
memcpy(pDest, pSrc, sizeof(SSubmitBlk));
int32_t nRows = htons(pDest->numOfRows);
int32_t dataLen = htonl(pDest->dataLen);
if ((nRows <= 0) || (dataLen <= 0)) {
return;
}
char *pDestData = pDest->data;
char *pSrcData = pSrc->data;
for (int32_t i = 0; i < nRows; ++i) {
memRowSetType(pDestData, SMEM_ROW_DATA);
memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData));
pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData));
pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData));
++(*lenExpand);
}
pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t));
}
// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen
static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) {
if ((nRows <= 0) || (dataLen <= 0)) {
return true;
}
int32_t len = 0, kvLen = 0;
for (int i = 0; i < nRows; ++i) {
len += dataRowLen(pBlkData);
if (len > dataLen) {
return false;
}
/**
* For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict.
* For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario
* - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check
*/
if (dataRowLen(pBlkData) == 257) {
SMemRow memRow = pBlkData;
SKVRow kvRow = memRowKvBody(memRow);
int nCols = kvRowNCols(kvRow);
uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols);
uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset;
if (calcTsOffset == realTsOffset) {
kvLen += memRowKvTLen(memRow);
}
}
pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData));
}
if (len != dataLen) {
return false;
}
if (kvLen == dataLen) {
return false;
}
return true;
}
// for WAL SMemRow/SDataRow compatibility
static int walSMemRowCheck(SWalHead *pHead) {
if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) {
SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont;
int32_t numOfBlocks = htonl(pMsg->numOfBlocks);
if (numOfBlocks <= 0) {
return 0;
}
int32_t nTotalRows = 0;
SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks;
for (int32_t i = 0; i < numOfBlocks; ++i) {
int32_t dataLen = htonl(pBlk->dataLen);
int32_t nRows = htons(pBlk->numOfRows);
nTotalRows += nRows;
if (!walIsSDataRow(pBlk->data, nRows, dataLen)) {
return 0;
}
pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen);
}
ASSERT(nTotalRows >= 0);
SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1);
if (pWalHead == NULL) {
return -1;
}
memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg));
SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont;
SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks;
SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks;
int32_t lenExpand = 0;
for (int32_t i = 0; i < numOfBlocks; ++i) {
expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand);
pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk));
pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk));
}
if (lenExpand > 0) {
pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand;
pDestMsg->length = htonl(pDestMsg->header.contLen);
pWalHead->len = pWalHead->len + lenExpand;
}
memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len);
tfree(pWalHead);
}
return 0;
}
static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) {
int32_t size = WAL_MAX_SIZE;
@ -346,7 +455,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
}
#if defined(WAL_CHECKSUM_WHOLE)
if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) {
if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) {
wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
pHead->version, pHead->len, offset);
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
@ -379,7 +488,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
continue;
}
if (pHead->sver == 1 && !walValidateChecksum(pHead)) {
if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) {
wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
pHead->version, pHead->len, offset);
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
@ -432,6 +541,13 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
pWal->version = pHead->version;
//wInfo("writeFp: %ld", offset);
if (0 != walSMemRowCheck(pHead)) {
wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);
tfClose(tfd);
tfree(buffer);
return TAOS_SYSTEM_ERROR(errno);
}
(*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL);
}

View File

@ -101,7 +101,14 @@ function runQueryPerfTest {
python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT
echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT
echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT
echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT
}

View File

@ -0,0 +1,67 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import string
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def genColList(self):
'''
generate column list
'''
col_list = list()
for i in range(1, 18):
col_list.append(f'c{i}')
return col_list
def genIncreaseValue(self, input_value):
'''
add ', 1' to end of value every loop
'''
value_list = list(input_value)
value_list.insert(-1, ", 1")
return ''.join(value_list)
def insertAlter(self):
'''
after each alter and insert, when execute 'select * from {tbname};' taosd will coredump
'''
tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7))
input_value = '(now, 1)'
tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);')
tdSql.execute(f'insert into {tbname} values {input_value};')
for col in self.genColList():
input_value = self.genIncreaseValue(input_value)
tdSql.execute(f'alter table {tbname} add column {col} int;')
tdSql.execute(f'insert into {tbname} values {input_value};')
tdSql.query(f'select * from {tbname};')
tdSql.checkRows(18)
def run(self):
tdSql.prepare()
self.insertAlter()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -35,6 +35,8 @@ class TDTestCase:
ret = tdSql.query('select server_status() as result')
tdSql.checkData(0, 0, 1)
time.sleep(1)
ret = tdSql.query('show dnodes')
dnodeId = tdSql.getData(0, 0);
@ -43,6 +45,7 @@ class TDTestCase:
ret = tdSql.execute('alter dnode "%s" debugFlag 135' % dnodeId)
tdLog.info('alter dnode "%s" debugFlag 135 -> ret: %d' % (dnodeId, ret))
time.sleep(1)
ret = tdSql.query('show mnodes')
tdSql.checkRows(1)
@ -63,6 +66,9 @@ class TDTestCase:
tdSql.execute('create stable st (ts timestamp, f int) tags(t int)')
tdSql.execute('create table ct1 using st tags(1)');
tdSql.execute('create table ct2 using st tags(2)');
time.sleep(1)
ret = tdSql.query('show vnodes "{}"'.format(dnodeEndpoint))
tdSql.checkRows(1)
tdSql.checkData(0, 0, 2)

View File

@ -17743,3 +17743,369 @@
fun:taosCheckGlobalCfg
fun:taos_init_imp
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/bin/python3.8
fun:PyObject_GetItem
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:PyCode_NewWithPosOnlyArgs
fun:PyCode_New
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/local/lib/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun: malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun: malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
obj:/usr/bin/python3.8
fun:PyObject_CallFunctionObjArgs
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_GetAttr
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8)
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8)
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8)
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8)
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8)
obj:/usr/bin/python3.8)
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
...
obj:/usr/local/lib/python3.8/dist-packages/pandas/*
...
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:PyObject_GetAttr
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
}

View File

@ -212,6 +212,7 @@ python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
#query
python3 test.py -f query/distinctOneColTb.py
python3 ./test.py -f query/filter.py
python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py
@ -343,6 +344,7 @@ python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py -f functions/function_twa_test2.py
python3 ./test.py -f functions/function_stddev_td2555.py
python3 ./test.py -f functions/showOfflineThresholdIs864000.py
python3 ./test.py -f functions/function_interp.py
python3 ./test.py -f insert/metadataUpdate.py
python3 ./test.py -f query/last_cache.py
python3 ./test.py -f query/last_row_cache.py
@ -385,6 +387,7 @@ python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f alter/alterColMultiTimes.py
#======================p4-end===============

View File

@ -0,0 +1,46 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 10
self.ts = 1537146000000
def run(self):
tdSql.prepare()
tdSql.execute("create table t(ts timestamp, k int)")
tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);")
tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 12)
tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -414,7 +414,7 @@ class TDTestCase:
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary%s', 'nchar%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.randint(-127, 127), random.randint(-99, 99), random.randint(-9999, 9999),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
@ -422,7 +422,7 @@ class TDTestCase:
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, false, 'binary%s', 'nchar%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.randint(-127, 127), random.randint(-99, 99), random.randint(-9999, 9999),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdLog.info("========== regular_table ==========")

View File

@ -414,7 +414,7 @@ class TDTestCase:
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary%s', 'nchar%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.randint(-127, 127), random.randint(-99, 99), random.randint(-9999, 9999),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
@ -422,7 +422,7 @@ class TDTestCase:
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, false, 'binary%s', 'nchar%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.randint(-127, 127), random.randint(-99, 99), random.randint(-9999, 9999),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdLog.info("========== regular_table ==========")

View File

@ -414,7 +414,7 @@ class TDTestCase:
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary%s', 'nchar%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.randint(-127, 127), random.randint(-99, 99), random.randint(-9999, 9999),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
@ -422,7 +422,7 @@ class TDTestCase:
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, false, 'binary%s', 'nchar%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.randint(-127, 127), random.randint(-99, 99), random.randint(-9999, 9999),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdLog.info("========== regular_table ==========")

View File

@ -0,0 +1,265 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def tb_all_query(self, num, sql="tb_all", where=""):
tdSql.query(
f"select distinct ts2 from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cint from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cbigint from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct csmallint from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct ctinyint from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cfloat from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cdouble from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cbool from {sql} {where}")
if num < 2:
tdSql.checkRows(num)
else:
tdSql.checkRows(2)
tdSql.query(
f"select distinct cbinary from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cnchar from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct ts2 as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cint as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cbigint as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct csmallint as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct ctinyint as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cfloat as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cdouble as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cbool as a from {sql} {where}")
if num < 2:
tdSql.checkRows(num)
else:
tdSql.checkRows(2)
tdSql.query(
f"select distinct cbinary as a from {sql} {where}")
tdSql.checkRows(num)
tdSql.query(
f"select distinct cnchar as a from {sql} {where}")
tdSql.checkRows(num)
def tb_all_query_sub(self, num, sql="tb_all", where="",colName = "c1"):
tdSql.query(
f"select distinct {colName} from {sql} {where}")
tdSql.checkRows(num)
def errorCheck(self, sql='tb_all'):
tdSql.error(f"select distinct from {sql}")
tdSql.error(f"distinct ts2 from {sql}")
tdSql.error(f"distinct c1 from")
tdSql.error(f"select distinct ts2, avg(cint) from {sql}")
## the following line is going to core dump
#tdSql.error(f"select avg(cint), distinct ts2 from {sql}")
tdSql.error(f"select distinct ts2 from {sql} group by cint")
tdSql.error(f"select distinct ts2 from {sql} interval(1s)")
tdSql.error(f"select distinct ts2 from {sql} slimit 1 soffset 1")
tdSql.error(f"select distinct ts2 from {sql} slimit 1")
##order by is not supported but not being prohibited
#tdSql.error(f"select distinct ts2 from {sql} order by desc")
#tdSql.error(f"select distinct ts2 from {sql} order by asc")
##distinct should not use on first ts, but it can be applied
#tdSql.error(f"select distinct ts from {sql}")
##distinct should not be used in inner query, but error did not occur
# tdSql.error(f"select distinct ts2 from (select distinct ts2 from {sql})")
def query_all_tb(self, whereNum=0,inNum=0,maxNum=0,tbName="tb_all"):
self.tb_all_query(maxNum,sql=tbName)
self.tb_all_query(num=whereNum,where="where ts2 = '2021-08-06 18:01:50.550'",sql=tbName)
self.tb_all_query(num=whereNum,where="where cint = 1073741820",sql=tbName)
self.tb_all_query(num=whereNum,where="where cbigint = 1125899906842620",sql=tbName)
self.tb_all_query(num=whereNum,where="where csmallint = 150",sql=tbName)
self.tb_all_query(num=whereNum,where="where ctinyint = 10",sql=tbName)
self.tb_all_query(num=whereNum,where="where cfloat = 0.10",sql=tbName)
self.tb_all_query(num=whereNum,where="where cdouble = 0.130",sql=tbName)
self.tb_all_query(num=whereNum,where="where cbool = true",sql=tbName)
self.tb_all_query(num=whereNum,where="where cbinary = 'adc1'",sql=tbName)
self.tb_all_query(num=whereNum,where="where cnchar = '双精度浮'",sql=tbName)
self.tb_all_query(num=inNum,where="where ts2 in ( '2021-08-06 18:01:50.550' , '2021-08-06 18:01:50.551' )",sql=tbName)
self.tb_all_query(num=inNum,where="where cint in (1073741820,1073741821)",sql=tbName)
self.tb_all_query(num=inNum,where="where cbigint in (1125899906842620,1125899906842621)",sql=tbName)
self.tb_all_query(num=inNum,where="where csmallint in (150,151)",sql=tbName)
self.tb_all_query(num=inNum,where="where ctinyint in (10,11)",sql=tbName)
self.tb_all_query(num=inNum,where="where cfloat in (0.10,0.11)",sql=tbName)
self.tb_all_query(num=inNum,where="where cdouble in (0.130,0.131)",sql=tbName)
self.tb_all_query(num=inNum,where="where cbinary in ('adc0','adc1')",sql=tbName)
self.tb_all_query(num=inNum,where="where cnchar in ('','双精度浮')",sql=tbName)
self.tb_all_query(num=inNum,where="where cbool in (0, 1)",sql=tbName)
self.tb_all_query(num=whereNum,where="where cnchar like '双__浮'",sql=tbName)
self.tb_all_query(num=maxNum,where="where cnchar like '%'",sql=tbName)
self.tb_all_query(num=maxNum,where="where cbinary like 'adc_'",sql=tbName)
self.tb_all_query(num=maxNum,where="where cbinary like 'a%'",sql=tbName)
self.tb_all_query(num=whereNum,where="limit 1",sql=tbName)
self.tb_all_query(num=whereNum,where="limit 1 offset 1",sql=tbName)
#subquery
self.tb_all_query(num=maxNum,sql=f'(select * from {tbName})')
#subquery with where in outer query
self.tb_all_query(num=whereNum,where="where ts2 = '2021-08-06 18:01:50.550'",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cint = 1073741820",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cbigint = 1125899906842620",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where csmallint = 150",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where ctinyint = 10",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cfloat = 0.10",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cdouble = 0.130",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cbool = true",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cbinary = 'adc1'",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cnchar = '双精度浮'",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where ts2 in ( '2021-08-06 18:01:50.550' , '2021-08-06 18:01:50.551' )",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cint in (1073741820,1073741821)",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cbigint in (1125899906842620,1125899906842621)",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where csmallint in (150,151)",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where ctinyint in (10,11)",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cfloat in (0.10,0.11)",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cdouble in (0.130,0.131)",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cbinary in ('adc0','adc1')",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cnchar in ('','双精度浮')",sql=f'(select * from {tbName})')
self.tb_all_query(num=inNum,where="where cbool in (0, 1)",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="where cnchar like '双__浮'",sql=f'(select * from {tbName})')
self.tb_all_query(num=maxNum,where="where cnchar like '%'",sql=f'(select * from {tbName})')
self.tb_all_query(num=maxNum,where="where cbinary like 'adc_'",sql=f'(select * from {tbName})')
self.tb_all_query(num=maxNum,where="where cbinary like 'a%'",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="limit 1",sql=f'(select * from {tbName})')
self.tb_all_query(num=whereNum,where="limit 1 offset 1",sql=f'(select * from {tbName})')
#table query with inner query has error
tdSql.error('select distinct ts2 from (select )')
#table query with error option
self.errorCheck()
def run(self):
tdSql.prepare()
tdLog.notice(
"==============phase1 distinct col1 with no values==========")
tdSql.execute("create stable if not exists stb_all (ts timestamp, ts2 timestamp, cint int, cbigint bigint, csmallint smallint, ctinyint tinyint,cfloat float, cdouble double, cbool bool, cbinary binary(32), cnchar nchar(32)) tags(tint int)")
tdSql.execute(
"create table if not exists tb_all using stb_all tags(1)")
self.query_all_tb()
tdLog.notice(
"==============phase1 finished ==========\n\n\n")
tdLog.notice(
"==============phase2 distinct col1 all values are null==========")
tdSql.execute(
"insert into tb_all values(now,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)")
tdSql.execute(
"insert into tb_all values(now,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)")
tdSql.execute(
"insert into tb_all values(now,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)")
# normal query
self.query_all_tb()
tdLog.notice(
"==============phase2 finished ==========\n\n\n")
tdLog.notice(
"==============phase3 distinct with distinct values ==========\n\n\n")
tdSql.execute(
"insert into tb_all values(now+1s,'2021-08-06 18:01:50.550',1073741820,1125899906842620,150,10,0.10,0.130, true,'adc0','')")
tdSql.execute(
"insert into tb_all values(now+2s,'2021-08-06 18:01:50.551',1073741821,1125899906842621,151,11,0.11,0.131,false,'adc1','双精度浮')")
tdSql.execute(
"insert into tb_all values(now+3s,'2021-08-06 18:01:50.552',1073741822,1125899906842622,152,12,0.12,0.132,NULL,'adc2','双精度')")
tdSql.execute(
"insert into tb_all values(now+4s,'2021-08-06 18:01:50.553',1073741823,1125899906842623,153,13,0.13,0.133,NULL,'adc3','双精')")
tdSql.execute(
"insert into tb_all values(now+5s,'2021-08-06 18:01:50.554',1073741824,1125899906842624,154,14,0.14,0.134,NULL,'adc4','双精度浮点型')")
# normal query
self.query_all_tb(maxNum=5,inNum=2,whereNum=1)
tdLog.notice(
"==============phase3 finishes ==========\n\n\n")
tdLog.notice(
"==============phase4 distinct with some values the same values ==========\n\n\n")
tdSql.execute(
"insert into tb_all values(now+10s,'2021-08-06 18:01:50.550',1073741820,1125899906842620,150,10,0.10,0.130, true,'adc0','')")
tdSql.execute(
"insert into tb_all values(now+20s,'2021-08-06 18:01:50.551',1073741821,1125899906842621,151,11,0.11,0.131,false,'adc1','双精度浮')")
tdSql.execute(
"insert into tb_all values(now+30s,'2021-08-06 18:01:50.552',1073741822,1125899906842622,152,12,0.12,0.132,NULL,'adc2','双精度')")
tdSql.execute(
"insert into tb_all values(now+40s,'2021-08-06 18:01:50.553',1073741823,1125899906842623,153,13,0.13,0.133,NULL,'adc3','双精')")
tdSql.execute(
"insert into tb_all values(now+50s,'2021-08-06 18:01:50.554',1073741824,1125899906842624,154,14,0.14,0.134,NULL,'adc4','双精度浮点型')")
# normal query
self.query_all_tb(maxNum=5,inNum=2,whereNum=1)
tdLog.notice(
"==============phase4 finishes ==========\n\n\n")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -152,7 +152,13 @@ class TDTestCase:
#tdSql.query("select * from tb")
#tdSql.checkRows(1)
# For jira: https://jira.taosdata.com:18080/browse/TD-6038
tdSql.query('select count(ts) from db.tb where c1 = 0 and c3 in ("nchar0")')
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdSql.query('select * from db.tb where c1 = 2 and c3 in ("nchar0")')
tdSql.checkRows(0)
def stop(self):
tdSql.close()

View File

@ -287,13 +287,9 @@ class TDTestCase:
tdLog.info(len(sql))
tdSql.error(sql)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
os.system("rm -rf query/long_where_query.py.sql")
#os.system("rm -rf query/long_where_query.py.sql")
def stop(self):

View File

@ -157,19 +157,6 @@ class TDTestCase:
tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")')
tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");')
# TODO sc1 leave a bug ---> TD-5918
# sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"',
# f'select * from {table_name} where bi1 like "{lp_name}"',
# f'select * from {table_name} where bi1 like "{ul_name}"',
# f'select * from {table_name} where nc1 like "{hp_name}"',
# f'select * from {table_name} where nc1 like "{lp_name}"',
# f'select * from {table_name} where nc1 like "{ul_name}"',
# f'select * from {table_name} where si1 like "{hp_name}"',
# f'select * from {table_name} where si1 like "{lp_name}"',
# f'select * from {table_name} where si1 like "{ul_name}"',
# f'select * from {table_name} where sc1 like "{hp_name}"',
# f'select * from {table_name} where sc1 like "{lp_name}"',
# f'select * from {table_name} where sc1 like "{ul_name}"']
sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"',
f'select * from {table_name} where bi1 like "{lp_name}"',
f'select * from {table_name} where bi1 like "{ul_name}"',
@ -178,7 +165,11 @@ class TDTestCase:
f'select * from {table_name} where nc1 like "{ul_name}"',
f'select * from {table_name} where si1 like "{hp_name}"',
f'select * from {table_name} where si1 like "{lp_name}"',
f'select * from {table_name} where si1 like "{ul_name}"']
f'select * from {table_name} where si1 like "{ul_name}"',
f'select * from {table_name} where sc1 like "{hp_name}"',
f'select * from {table_name} where sc1 like "{lp_name}"',
f'select * from {table_name} where sc1 like "{ul_name}"']
for sql in sql_list:
tdSql.query(sql)
if len(table_name) >= 1:
@ -211,7 +202,6 @@ class TDTestCase:
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

Some files were not shown because too many files have changed in this diff Show More