handle conflict

This commit is contained in:
yihaoDeng 2021-01-22 16:40:13 +00:00
commit a1b81053e2
981 changed files with 30097 additions and 18410 deletions

View File

@ -34,6 +34,7 @@ matrix:
- psmisc
- unixodbc
- unixodbc-dev
- mono-complete
before_script:
- export TZ=Asia/Harbin
@ -59,6 +60,18 @@ matrix:
pip3 install guppy3
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs || travis_terminate $?
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
sleep 5
mono taosdemo -Q DEFAULT -y || travis_terminate $?
pkill -KILL -x taosd
fuser -k -n tcp 6030
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests
./test-all.sh smoke || travis_terminate $?
sleep 1
@ -74,6 +87,7 @@ matrix:
./valgrind-test.sh 2>&1 > mem-error-out.log
sleep 1
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
@ -146,7 +160,7 @@ matrix:
branch_pattern: coverity_scan
- os: linux
dist: xenial
dist: trusty
language: c
git:
- depth: 1
@ -156,8 +170,9 @@ matrix:
packages:
- build-essential
- cmake
- binutils-2.26
env:
- DESC="xenial build"
- DESC="trusty/gcc-4.8/bintuils-2.26 build"
before_script:
- export TZ=Asia/Harbin
@ -168,7 +183,7 @@ matrix:
script:
- cmake .. > /dev/null
- make
- export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
- os: linux
dist: bionic
@ -200,7 +215,7 @@ matrix:
dist: bionic
language: c
compiler: clang
env: DESC="linux/clang build"
env: DESC="arm64 linux/clang build"
git:
- depth: 1
@ -238,7 +253,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="xenial build"
- DESC="arm64 xenial build"
before_script:
- export TZ=Asia/Harbin
@ -255,19 +270,20 @@ matrix:
fi
- make > /dev/null
# - os: osx
# language: c
# compiler: clang
# env: DESC="mac/clang build"
# git:
# - depth: 1
# addons:
# homebrew:
# - cmake
#
# script:
# - cd ${TRAVIS_BUILD_DIR}
# - mkdir debug
# - cd debug
# - cmake .. > /dev/null
# - make > /dev/null
- os: osx
osx_image: xcode11.4
language: c
compiler: clang
env: DESC="mac/clang build"
git:
- depth: 1
addons:
homebrew:
- cmake
script:
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
@ -13,7 +13,7 @@ ENDIF ()
SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE)
SET(TD_MQTT TRUE)
SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_COVER FALSE)
@ -29,6 +29,11 @@ MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR})
INCLUDE(cmake/input.inc)
INCLUDE(cmake/platform.inc)
IF (TD_WINDOWS OR TD_DARWIN)
SET(TD_SOMODE_STATIC TRUE)
ENDIF ()
INCLUDE(cmake/define.inc)
INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)

147
Jenkinsfile vendored
View File

@ -41,13 +41,10 @@ def pre_test(){
cd ${WKC}
git checkout develop
git reset --hard HEAD~10
git reset --hard HEAD~10 >/dev/null
git pull
git fetch
git checkout ${CHANGE_BRANCH}
git reset --hard HEAD~10
git pull
git merge develop
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
cd ${WK}
git reset --hard HEAD~10
git checkout develop
@ -87,11 +84,14 @@ pipeline {
steps {
pre_test()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
timeout(time: 90, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
}
}
}
stage('python_2') {
@ -104,16 +104,22 @@ pipeline {
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p2
date'''
sh '''
cd ${WKC}/tests
./test-all.sh b4fq
'''
}
}
stage('test_b1') {
agent{label 'b1'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
steps {
timeout(time: 90, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
}
}
}
@ -133,12 +139,14 @@ pipeline {
./handle_crash_gen_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
timeout(time: 90, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
}
}
}
@ -153,12 +161,14 @@ pipeline {
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
}
timeout(time: 90, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
}
}
}
@ -166,5 +176,84 @@ pipeline {
}
}
}
post {
success {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>提交信息:${CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>提交信息:${CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
}
}

View File

@ -33,11 +33,17 @@ To build TDengine, use [CMake](https://cmake.org/) 3.5 or higher versions in the
## Install tools
### Ubuntu & Debian:
### Ubuntu 16.04 and above & Debian:
```bash
sudo apt-get install -y gcc cmake build-essential git
```
### Ubuntu 14.04:
```bash
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
export PATH=/usr/lib/binutils-2.26/bin:$PATH
```
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
To install openjdk-8:
```bash
@ -120,44 +126,57 @@ cmake .. -DCPUTYPE=aarch32 && cmake --build .
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
```cmd
mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
If you use the Visual Studio 2019, please open a command window by executing "cmd.exe".
If you use the Visual Studio 2019 or 2017:
please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
```cmd
mkdir debug && cd debug
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
Or, you can open a command window by clicking Visual Studio 2019 menu "Tools -> Command Line -> Developer Command Prompt" or "Tools -> Command Line -> Developer PowerShell" then execute commands as follows:
```
Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows:
```cmd
mkdir debug && cd debug
cmake .. -G "NMake Makefiles"
nmake
```
### On Mac OS X platform
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
```shell
mkdir debug && cd debug
cmake .. && cmake --build .
```
# Quick Run
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```cmd
```bash
./build/bin/taosd -c test/cfg
```
In another terminal, use the TDengine shell to connect the server:
```
```bash
./build/bin/taos -c test/cfg
```
option "-c test/cfg" specifies the system configuration file directory.
# Installing
After building successfully, TDengine can be installed by:
```cmd
```bash
make install
```
Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. It should be noted that installing from source code does not configure service management for TDengine.

View File

@ -128,6 +128,8 @@ IF (TD_DARWIN_64)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
ENDIF ()
IF (TD_WINDOWS)
@ -139,6 +141,9 @@ IF (TD_WINDOWS)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.15-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.18-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

18
cmake/version.inc Normal file → Executable file
View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.12.0")
SET(TD_VER_NUMBER "2.0.14.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
@ -16,13 +16,25 @@ ENDIF ()
IF (DEFINED GITINFO)
SET(TD_VER_GIT ${GITINFO})
ELSE ()
SET(TD_VER_GIT "community")
execute_process(
COMMAND git log -1 --format=%H
WORKING_DIRECTORY ${TD_COMMUNITY_DIR}
OUTPUT_VARIABLE GIT_COMMITID
)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT ${GIT_COMMITID})
ENDIF ()
IF (DEFINED GITINFOI)
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
ELSE ()
SET(TD_VER_GIT_INTERNAL "internal")
execute_process(
COMMAND git log -1 --format=%H
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMITID
)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID})
ENDIF ()
IF (DEFINED VERDATE)

8
deps/CMakeLists.txt vendored
View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
ADD_SUBDIRECTORY(zlib-1.2.11)
@ -12,4 +12,8 @@ ADD_SUBDIRECTORY(MsvcLibX)
IF (TD_LINUX AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
ENDIF ()
IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
# MQTT-C build options
option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
IF (TD_WINDOWS)

View File

@ -5,6 +5,10 @@
#include <stdint.h>
#include "gzguts.h"
#ifndef O_BINARY
#define O_BINARY 0
#endif
#if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__)
# define LSEEK _lseeki64
#else
@ -240,9 +244,9 @@ local gzFile gz_open(path, fd, mode)
/* open the file with the appropriate flags (or just use fd) */
state->fd = fd > -1 ? fd : (
#ifdef WIDECHAR
fd == -2 ? _wopen(path, oflag, 0666) :
fd == -2 ? _wopen(path, oflag | O_BINARY, 0666) :
#endif
open((const char *)path, oflag, 0666));
open((const char *)path, oflag | O_BINARY, 0666));
if (state->fd == -1) {
free(state->path);
free(state);

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View File

@ -179,7 +179,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
## **支持平台列表**
## 支持平台列表
### TDengine服务器支持的平台列表

View File

@ -4,6 +4,8 @@
TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库的设计超级表和普通表的设计。本节不讨论细致的语法规则只介绍概念。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>
## 创建库
不同类型的数据采集点往往具有不同的数据特征包括数据采集频率的高低数据保留时间的长短副本的数目数据块的大小是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里因为每个库可以配置不同的存储策略。创建一个库时除SQL标准的选项外应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如
@ -60,4 +62,3 @@ TDengine支持多列模型只要物理量是一个数据采集点同时采集
TDengine建议尽可能采用多列模型因为插入效率以及存储效率更高。但对于有些场景一个采集点的采集量的种类经常变化这个时候如果采用多列模型就需要频繁修改超级表的结构定义让应用变的复杂这个时候采用单列模型会显得简单。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>

View File

@ -9,7 +9,7 @@
TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行Command Line Interface, CLI工具 TAOS Shell 手动执行 SQL 即席查询Ad-Hoc Query。TDengine 支持如下查询功能:
- 单列、多列数据查询
- 标签和数值的多种过滤条件:\>, \<, =, \<>, like 等
- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 聚合结果的分组Group by、排序Order by、约束输出Limit/Offset
- 数值列及聚合结果的四则运算
- 时间戳对齐的连接查询Join Query: 隐式连接)操作

View File

@ -58,24 +58,26 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
- **创建数据库**
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
```
说明:
1) KEEP是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33
4) 一条SQL 语句的最大长度为65480个字符
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
```
说明:
1) KEEP是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33
4) 一条SQL 语句的最大长度为65480个字符
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
- **显示系统当前参数**
```mysql
SHOW VARIABLES;
```
```mysql
SHOW VARIABLES;
```
- **使用数据库**
@ -119,6 +121,7 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
- **显示系统所有数据库**
```mysql
SHOW DATABASES;
```
@ -130,12 +133,37 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]);
```
说明:
1) 表的第一个字段必须是TIMESTAMP并且系统自动将其设为主键
2) 表名最大长度为193
2) 表名最大长度为192
3) 表的每行长度不能超过16k个字符;
4) 子表名只能由字母、数字和下划线组成,且不能以数字开头
5) 使用数据类型binary或nchar需指定其最长的字节数如binary(20)表示20字节
- **以超级表为模板创建数据表**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
```
以指定的超级表为模板,指定 tags 的值来创建数据表。
- **批量创建数据表**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
以更快的速度批量创建大量数据表。(服务器端 2.0.14 及以上版本)
说明:
1批量建表方式要求数据表必须以超级表为模板。
2在不超出 SQL 语句长度限制的前提下,单条语句中的建表数量建议控制在 10003000 之间,将会获得比较理想的建表速度。
- **删除数据表**
```mysql
@ -148,7 +176,11 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
SHOW TABLES [LIKE tb_name_wildcar];
```
显示当前数据库下的所有数据表信息。说明可在like中使用通配符进行名称的匹配。 通配符匹配1% (百分号)匹配0到任意个字符2_下划线匹配一个字符。
显示当前数据库下的所有数据表信息。
说明可在like中使用通配符进行名称的匹配这一通配符字符串最长不能超过24字节。
通配符匹配1% (百分号)匹配0到任意个字符2\_下划线匹配一个字符。
- **在线修改显示字符宽度**
@ -168,8 +200,10 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
ALTER TABLE tb_name ADD COLUMN field_name data_type;
```
说明:
1) 列的最大个数为1024最小个数为2
2) 列名最大长度为65
2) 列名最大长度为64
- **表删除列**
@ -187,10 +221,14 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
创建STable, 与创建表的SQL语法相似但需指定TAGS字段的名称和类型
说明:
1) TAGS 列的数据类型不能是timestamp类型
2) TAGS 列名不能与其他列名相同;
3) TAGS 列名不能为预留关键字;
4) TAGS 最多允许128个可以0个总长度不超过16k个字符
4) TAGS 最多允许128个至少1个总长度不超过16k个字符。
- **删除超级表**
@ -263,33 +301,33 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
- **插入一条记录,数据对应到指定的列**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value, ...)
```
向表tb_name中插入一条记录数据对应到指定的列。SQL语句中没有出现的列数据库将自动填充为NULL。主键时间戳不能为NULL。
- **插入多条记录**
```mysql
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
向表tb_name中插入多条记录
- **按指定的列插入多条记录**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
向表tb_name中按指定的列插入多条记录
- **向多个表插入多条记录**
```mysql
INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
同时向表tb1_name和tb2_name中分别插入多条记录
- **同时向多个表按列插入多条记录**
```mysql
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...)
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...);
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
@ -316,25 +354,27 @@ SELECT select_expr [, select_expr ...]
[LIMIT limit_val [, OFFSET offset_val]]
[>> export_file]
```
说明:针对 insert 类型的 SQL 语句我们采用的流式解析策略在发现后面的错误之前前面正确的部分SQL仍会执行。下面的sql中insert语句是无效的但是d1001仍会被创建。
```mysql
taos> create table meters(ts timestamp, current float, voltage int, phase float) tags(location binary(30), groupId int);
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
Query OK, 0 row(s) affected (0.008245s)
taos> show stables;
taos> SHOW STABLES;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 |
Query OK, 1 row(s) in set (0.001029s)
taos> show tables;
taos> SHOW TABLES;
Query OK, 0 row(s) in set (0.000946s)
taos> insert into d1001 using meters tags('Beijing.Chaoyang', 2);
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2);
DB error: invalid SQL: keyword VALUES or FILE required
taos> show tables;
taos> SHOW TABLES;
table_name | created_time | columns | stable_name |
======================================================================================================
d1001 | 2020-08-06 17:52:02.097 | 4 | meters |
@ -397,27 +437,41 @@ Query OK, 1 row(s) in set (0.020443s)
在使用SQL函数来进行查询过程中部分SQL函数支持通配符操作。其中的区别在于
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
taos> select count(*) from d1001;
```mysql
taos> SELECT COUNT(*) FROM d1001;
count(*) |
========================
3 |
Query OK, 1 row(s) in set (0.001035s)
```
```
taos> select first(*) from d1001;
```mysql
taos> SELECT FIRST(*) FROM d1001;
first(ts) | first(current) | first(voltage) | first(phase) |
=========================================================================================
2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 |
Query OK, 1 row(s) in set (0.000849s)
```
##### 标签列
从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```mysql
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
location | groupid | current |
======================================================================
Beijing.Chaoyang | 2 | 10.30000 |
Beijing.Chaoyang | 2 | 12.60000 |
Query OK, 2 row(s) in set (0.003112s)
```
注意:普通表的通配符 * 中并不包含 _标签列_
#### 结果集列名
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
```
taos> select ts, ts as primary_key_ts from d1001;
```mysql
taos> SELECT ts, ts AS primary_key_ts FROM d1001;
ts | primary_key_ts |
====================================================
2018-10-03 14:38:05.000 | 2018-10-03 14:38:05.000 |
@ -434,53 +488,53 @@ Query OK, 3 row(s) in set (0.001191s)
FROM关键字后面可以是若干个表超级表列表也可以是子查询的结果。
如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```power.d1001``` 方式来跨库使用表。
```
```mysql
SELECT * FROM power.d1001;
------------------------------
use power;
USE power;
SELECT * FROM d1001;
```
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
```
taos> SELECT database();
```mysql
taos> SELECT DATABASE();
database() |
=================================
power |
Query OK, 1 row(s) in set (0.000079s)
```
如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据则返回NULL。
```
taos> SELECT database();
```mysql
taos> SELECT DATABASE();
database() |
=================================
NULL |
Query OK, 1 row(s) in set (0.000184s)
```
获取服务器和客户端版本号:
```
taos> SELECT client_version();
```mysql
taos> SELECT CLIENT_VERSION();
client_version() |
===================
2.0.0.0 |
Query OK, 1 row(s) in set (0.000070s)
taos> SELECT server_version();
taos> SELECT SERVER_VERSION();
server_version() |
===================
2.0.0.0 |
Query OK, 1 row(s) in set (0.000077s)
```
服务器状态检测语句。如果服务器正常,返回一个数字(例如 1。如果服务器异常返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。
```
taos> SELECT server_status();
```mysql
taos> SELECT SERVER_STATUS();
server_status() |
==================
1 |
Query OK, 1 row(s) in set (0.000074s)
taos> SELECT server_status() as status;
taos> SELECT SERVER_STATUS() AS status;
status |
==============
1 |
@ -493,15 +547,15 @@ Query OK, 1 row(s) in set (0.000081s)
#### 小技巧
获取一个超级表所有的子表名及相关的标签信息:
```
```mysql
SELECT TBNAME, location FROM meters;
```
统计超级表下辖子表数量:
```
```mysql
SELECT COUNT(TBNAME) FROM meters;
```
以上两个查询均只支持在Where条件子句中添加针对标签TAGS的过滤条件。例如
```
```mysql
taos> SELECT TBNAME, location FROM meters;
tbname | location |
==================================================================
@ -511,7 +565,7 @@ taos> SELECT TBNAME, location FROM meters;
d1001 | Beijing.Chaoyang |
Query OK, 4 row(s) in set (0.000881s)
taos> SELECT count(tbname) FROM meters WHERE groupId > 2;
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
count(tbname) |
========================
2 |
@ -537,15 +591,15 @@ Query OK, 1 row(s) in set (0.001091s)
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件暂不支持OR连接的不同列之间的查询过滤条件。
2. 针对某一字段的过滤只支持单一时间区间过滤条件。但是针对其他的(普通)列或标签列,可以使用``` OR``` 条件进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12))
1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用``` OR``` 关键字进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12))
### SQL 示例
- 对于下面的例子表tb1用以下语句创建
```mysql
CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50));
CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
```
- 查询tb1刚过去的一个小时的所有记录
@ -563,7 +617,7 @@ Query OK, 1 row(s) in set (0.001091s)
- 查询col1与col2的和并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2结果输出仅仅10条记录从第5条开始
```mysql
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' and col2 > 1.2 LIMIT 10 OFFSET 5;
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
- 查询过去10分钟的记录col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`.
@ -583,20 +637,30 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
```
功能说明:统计表/超级表中记录行数或某列的非空值个数。
返回结果数据类型长整型INT64。
应用字段:应用全部字段。
适用于:表、超级表。
说明1可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。2针对同一表的不包含NULL值字段查询结果均相同。3如果统计对象是具体的列则返回该列中非NULL值的记录数量。
说明:
1可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。
2针对同一表的不包含NULL值字段查询结果均相同。
3如果统计对象是具体的列则返回该列中非NULL值的记录数量。
示例:
```mysql
taos> SELECT COUNT(*), COUNT(VOLTAGE) FROM meters;
taos> SELECT COUNT(*), COUNT(voltage) FROM meters;
count(*) | count(voltage) |
================================================
9 | 9 |
Query OK, 1 row(s) in set (0.004475s)
taos> SELECT COUNT(*), COUNT(VOLTAGE) FROM d1001;
taos> SELECT COUNT(*), COUNT(voltage) FROM d1001;
count(*) | count(voltage) |
================================================
3 | 3 |
@ -608,8 +672,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT AVG(field_name) FROM tb_name [WHERE clause];
```
功能说明:统计表/超级表中某列的平均值。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool字段。
适用于:表、超级表。
示例:
@ -620,7 +687,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
11.466666751 | 220.444444444 | 0.293333333 |
Query OK, 1 row(s) in set (0.004135s)
taos> SELECT AVG(current), AVG(voltage), AVG(phase) from d1001;
taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001;
avg(current) | avg(voltage) | avg(phase) |
====================================================================================
11.733333588 | 219.333333333 | 0.316666673 |
@ -631,30 +698,35 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause;
```
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
功能说明:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:时间加权平均(time weighted average, TWA查询需要指定查询时间段的 _开始时间__结束时间_
适用于:表、超级表
适用于:表。
- **SUM**
```mysql
SELECT SUM(field_name) FROM tb_name [WHERE clause];
```
功能说明:统计表/超级表中某列的和。
返回结果数据类型双精度浮点数Double和长整型INT64。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表、超级表。
示例:
```mysql
taos> SELECT SUM(current), SUM(voltage), SUM(phase) from meters;
taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters;
sum(current) | sum(voltage) | sum(phase) |
================================================================================
103.200000763 | 1984 | 2.640000001 |
Query OK, 1 row(s) in set (0.001702s)
taos> SELECT SUM(current), SUM(voltage), SUM(phase) from d1001;
taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001;
sum(current) | sum(voltage) | sum(phase) |
================================================================================
35.200000763 | 658 | 0.950000018 |
@ -666,9 +738,12 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
```
功能说明:统计表中某列的均方差。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表。
适用于:表。(从 2.0.15 版本开始,本函数也支持超级表)
示例:
```mysql
@ -684,9 +759,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
```
功能说明统计表中某列的值是主键时间戳的拟合直线方程。start_val是自变量初始值step_val是自变量的步长值。
返回结果数据类型:字符串表达式(斜率, 截距)。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:自变量是时间戳,因变量是该列的值。
适用于:表。
示例:
@ -705,7 +784,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
```
功能说明:统计表/超级表中某列的值最小值。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
示例:
@ -728,7 +809,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的值最大值。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
示例:
@ -751,9 +834,18 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的值最先写入的非NULL值。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明1如果要返回各个列的首个时间戳最小非NULL值可以使用FIRST(*)2) 如果结果集中的某列全部为NULL值则该列的返回结果也是NULL3) 如果结果集中所有列全部为NULL值则不返回结果。
说明:
1如果要返回各个列的首个时间戳最小非NULL值可以使用FIRST(\*)
2) 如果结果集中的某列全部为NULL值则该列的返回结果也是NULL
3) 如果结果集中所有列全部为NULL值则不返回结果。
示例:
```mysql
@ -775,9 +867,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明1如果要返回各个列的最后时间戳最大一个非NULL值可以使用LAST(*)2如果结果集中的某列全部为NULL值则该列的返回结果也是NULL如果结果集中所有列全部为NULL值则不返回结果。
说明:
1如果要返回各个列的最后时间戳最大一个非NULL值可以使用LAST(\*)
2如果结果集中的某列全部为NULL值则该列的返回结果也是NULL如果结果集中所有列全部为NULL值则不返回结果。
示例:
```mysql
@ -799,9 +898,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大则返回时间戳小的。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明1*k*值取值范围1≤*k*≤1002系统同时返回该记录关联的时间戳列。
说明:
1*k*值取值范围1≤*k*≤100
2系统同时返回该记录关联的时间戳列。
示例:
```mysql
@ -826,9 +932,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小则返回时间戳小的。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明1*k*值取值范围1≤*k*≤1002系统同时返回该记录关联的时间戳列。
说明:
1*k*值取值范围1≤*k*≤100
2系统同时返回该记录关联的时间戳列。
示例:
```mysql
@ -852,8 +965,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:*P*值取值范围0≤*P*≤100为0的时候等同于MIN为100的时候等同于MAX。
示例:
@ -870,9 +986,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明统计表中某列的值百分比分位数与PERCENTILE函数相似但是返回近似结果。
返回结果数据类型: 双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:*P*值取值范围0≤*P*≤100为0的时候等同于MIN为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
```mysql
taos> SELECT APERCENTILE(current, 20) FROM d1001;
apercentile(current, 20) |
@ -886,8 +1006,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
功能说明:返回表(超级表)的最后一条记录。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明与last函数不同last_row不支持时间范围限制强制返回最后一条记录。
示例:
@ -911,8 +1034,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT DIFF(field_name) FROM tb_name [WHERE clause];
```
功能说明:统计表中某列的值与前一行对应值的差。
返回结果数据类型: 同应用字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
示例:
@ -930,8 +1056,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的最大值和最小值之差。
返回结果数据类型: 双精度浮点数。
应用字段不能应用在binary、nchar、bool类型字段。
说明可用于TIMESTAMP字段此时表示记录的时间覆盖范围。
示例:
@ -955,9 +1084,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
返回结果数据类型:双精度浮点数。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明1支持两列或多列之间进行计算可使用括号控制计算优先级;2NULL字段不参与计算如果参与计算的某行中包含NULL该行的计算结果为NULL。
说明:
1支持两列或多列之间进行计算可使用括号控制计算优先级
2NULL字段不参与计算如果参与计算的某行中包含NULL该行的计算结果为NULL。
```mysql
taos> SELECT current + voltage * phase FROM d1001;
@ -989,11 +1125,8 @@ SELECT function_list FROM stb_name
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。
2. VALUE填充固定值填充此时需要指定填充的数值。例如fill(value, 1.23)。
3. NULL填充使用NULL填充数据。例如fill(null)。
4. PREV填充使用前一个非NULL值填充数据。例如fill(prev)。
说明:
@ -1004,15 +1137,15 @@ SELECT function_list FROM stb_name
**示例:** 智能电表的建表语句如下:
```mysql
CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
针对智能电表采集的数据以10分钟为一个阶段计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值用前一个非NULL值填充。
使用的查询语句如下:
```mysql
SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
WHERE TS>=NOW-1d
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
WHERE ts>=NOW-1d
INTERVAL(10m)
FILL(PREV);
```
@ -1021,7 +1154,7 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
- 数据库名最大长度为32
- 表名最大长度为192每行数据最大长度16k个字符
- 列名最大长度为64最多允许1024列最少需要2列第一列必须是时间戳
- 标签最多允许128个可以0标签总长度不超过16k个字符
- 标签最多允许128个可以1标签总长度不超过16k个字符
- SQL语句最大长度65480个字符但可通过系统配置参数maxSQLLength修改最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制

View File

@ -105,7 +105,7 @@ taosd -C
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为字节。
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程最大值2表示最大建立2倍CPU核数的查询线程。默认为1表示最大和CPU核数相等的查询线程。该值可以为小数即0.5表示最大建立CPU核数一半的查询线程。
**注意:**对于端口TDengine会使用从serverPort起13个连续的TCP和UDP端口号请务必在防火墙打开。因此如果是缺省配置需要打开从60306042共13个端口而且必须TCP和UDP都打开。
**注意:**对于端口TDengine会使用从serverPort起13个连续的TCP和UDP端口号请务必在防火墙打开。因此如果是缺省配置需要打开从60306042共13个端口而且必须TCP和UDP都打开。
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数
@ -124,10 +124,10 @@ taosd -C
对于一个应用场景可能有多种数据特征的数据并存最佳的设计是将具有相同数据特征的表放在一个库里这样一个应用有多个库而每个库可以配置不同的存储参数从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数如果指定该参数就将覆盖对应的系统配置参数。举例有下述SQL
```
create database demo days 10 cache 32 blocks 8 replica 3;
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
```
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3而其他参数与系统配置完全一致。
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3允许更新,而其他参数与系统配置完全一致。
TDengine集群中加入一个新的dnode时涉及集群相关的一些参数必须与已有集群的配置相同否则不能成功加入到集群中。会进行校验的参数如下
@ -255,7 +255,7 @@ taos -C 或 taos --dump-config
CREATE USER <user_name> PASS <'password'>;
```
创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
创建用户,并指定用户名和密码,密码需要用单引号引起来单引号为英文半角
```sql
DROP USER <user_name>;
@ -267,13 +267,15 @@ DROP USER <user_name>;
ALTER USER <user_name> PASS <'password'>;
```
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
修改用户密码,为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```sql
ALTER USER <user_name> PRIVILEGE <super|write|read>;
ALTER USER <user_name> PRIVILEGE <write|read>;
```
修改用户权限为super/write/read不需要添加单引号
修改用户权限为write 或 read不需要添加单引号
说明:系统内共有 super/write/read 三种权限级别,但目前不允许通过 alter 指令把 super 权限赋予用户。
```mysql
SHOW USERS;
@ -432,11 +434,12 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- 数据库名:不能包含“.”以及特殊字符不能超过32个字符
- 表名:不能包含“.”以及特殊字符与所属数据库名一起不能超过192个字符
- 表的列名不能包含特殊字符不能超过64个字符
- 数据库名、表名、列名,都不能以数字开头
- 表的列数不能超过1024列
- 记录的最大长度包括时间戳8 byte不能超过16KB
- 单条SQL语句默认最大字符串长度65480 byte
- 数据库副本数不能超过3
- 用户名不能超过20个byte
- 用户名不能超过23个byte
- 用户密码不能超过15个byte
- 标签(Tags)数量不能超过128个
- 标签的总长度不能超过16Kbyte

View File

@ -197,7 +197,7 @@ select * from meters where ts > now - 1d and current > 10;
且`restart`是 **false****0**),用户程序就不会读到之前已经读取的数据了。
`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。
在同步模式下,如前后两次调用`taos_consume`的时间间隔小于此时间,
在同步模式下,如前后两次调用`taos_consume`的时间间隔小于此时间,
`taos_consume`会阻塞,直到间隔超过此时间。
异步模式下,这个时间是两次调用回调函数的最小时间间隔。
@ -414,7 +414,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接
TDengine分配固定大小的内存空间作为缓存空间缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点virtual node创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
TDengine将内存池按块划分进行管理数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2保证每张表平均至少能分配两个内存块。
TDengine将内存池按块划分进行管理数据在内存块里是以行row的形式存储。一个vnode的内存池是在vnode创建时按块分配好而且每个内存块按照先进先出的原则进行管理。在创建内存池时块的大小由系统配置参数cache决定每个vnode中内存块的数目则由配置参数blocks决定。因此对于一个vnode总的内存大小为`cache * blocks`。一个cache block需要保证每张表能存储至少几十条以上记录,才会有效率。
你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录这样很便于在大屏显示各设备的实时状态或采集值。例如

View File

@ -248,7 +248,7 @@ Master Vnode遵循下面的写入流程
1. Master vnode收到应用的数据插入请求验证OK进入下一步
2. 如果系统配置参数walLevel大于0vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2而且fsync设置为0TDengine还将WAL数据立即落盘以保证即使宕机也能从数据库日志文件中恢复数据避免数据的丢失
3. 如果有多个副本vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version)
4. 写入内存,并记录加入到skip list
4. 写入内存,并记录加入到skip list
5. Master vnode返回确认信息给应用表示写入成功。
6. 如果第234步中任何一步失败将直接返回错误给应用。
@ -372,7 +372,7 @@ select count(*) from d1001 interval(1h);
select count(*) from d1001 interval(1h) fill(prev);
```
针对d1001设备采集数据统计每小时记录数如果某一个小时不存在数据返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。
针对d1001设备采集数据统计每小时记录数如果某一个小时不存在数据返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。
### 多表聚合查询
TDengine对每个数据采集点单独建表但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作TDengine引入超级表STable的概念。超级表用来代表一特定类型的数据采集点它是包含多张表的表集合集合里每张表的模式schema完全一致但每张表都带有自己的静态标签标签可以多个可以随时增加、删除和修改。 应用可通过指定标签的过滤条件对一个STable下的全部或部分表进行聚合或统计操作这样大大简化应用的开发。其具体流程如下图所示

View File

@ -6,6 +6,8 @@
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>
## 准备工作
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】
@ -216,15 +218,14 @@ SHOW MNODES;
如果一个数据节点离线TDengine集群将自动检测到。有如下两种情况
- 该数据节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 该数据节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后在offlineThreshold的时长内重新上线系统将自动启动数据恢复流程等数据完全恢复后该节点将开始正常工作。
**注意:**如果一个虚拟节点组包括mnode组里所归属的每个数据节点都处于离线或unsynced状态必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后才能选出Master该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点副本数为3如果3个数据节点都宕机然后2个数据节点重启是无法工作的只有等3个数据节点都重启成功才能对外服务。
## Arbitrator的使用
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了Arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含Arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到Arbitrator那么节点B就能正常工作。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。如果副本数为奇数即使配置了arbitrator, 系统也不会去建立连接。
TDengine提供一个执行程序,名为 tarbitrator找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的Arbitrator。如果副本数为奇数即使配置了Arbitrator系统也不会去建立连接。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>

View File

@ -1,142 +0,0 @@
#集群安装、管理
多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。
集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令“hostname"获取。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
##安装、创建第一个节点
集群是由一个一个dnode组成的是从一个dnode的创建开始的。创建第一个节点很简单就按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装、启动即可。
启动后请执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time |
=====================================================================================
1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
Query OK, 1 row(s) in set (0.006385s)
taos>
```
上述命令里可以看到这个刚启动的这个节点的End Point是h1.taos.com:6030
## 安装、创建后续节点
将新的节点添加到现有集群,具体有以下几步:
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装但不要启动taosd
2. 如果是使用涛思数据的官方安装包进行安装在安装结束时会询问集群的End Port, 输入第一个节点的End Point即可。如果是源码安装请编辑配置文件taos.cfg(缺省是在/etc/taos/目录),增加一行:
```
firstEp h1.taos.com:6030
```
请注意将示例的“h1.taos.com:6030" 替换为你自己第一个节点的End Point
3. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法启动taosd
4. 在Linux shell里执行命令"hostname"找出本机的FQDN, 假设为h2.taos.com。如果无法找到可以查看taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)fqdn以及port都会打印出来。
5. 在第一个节点使用CLI程序taos, 登录进TDengine系统, 使用命令:
```
CREATE DNODE "h2.taos.com:6030";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
6. 使用命令
```
SHOW DNODES;
```
查看新节点是否被成功加入。
按照上述步骤可以源源不断的将新的节点加入到集群。
**提示:**
- firstEp, secondEp这两个参数仅仅在该节点第一次加入集群时有作用加入集群后该节点会保存最新的mnode的End Point列表不再依赖这两个参数。
- 两个没有配置first, second参数的dnode启动后会独立运行起来。这个时候无法将其中一个节点加入到另外一个节点形成集群。**无法将两个独立的集群合并成为新的集群**。
##节点管理
###添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。
###删除节点
执行CLI程序taos, 使用root账号登录进TDengine系统执行
```
DROP DNODE "fqdn:port";
```
其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号
###查看节点
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个节点后可以使用该命令查看。
###查看虚拟节点组
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW VGROUPS;
```
##高可用性
TDengine通过多副本的机制来提供系统的高可用性。副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
```
CREATE DATABASE demo replica 3;
```
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务。
一个dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的d读写操作。
因为vnode的引入无法简单的给出结论“集群中过半dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个节点不工作那整个集群就无法正常工作了。
##Mnode的高可用
TDengine集群是由mnode (taosd的一个模块逻辑节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
```
SHOW MNODES;
```
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
当集群中第一个节点启动时该节点一定会运行一个mnode实例否则该dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的如果numOfMnodes大于2复制参数quorum自动设为2也就是说至少要保证有两个副本写入数据成功才通知客户端应用写入成功。
##负载均衡
有三种情况,将触发负载均衡,而且都无需人工干预。
- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
- 如果一个节点过热数据量过大系统将自动进行负载均衡将该节点的一些vnode自动挪到其他节点。
当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
##节点离线处理
如果一个节点离线TDengine集群将自动检测到。有如下两种情况
- 改节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后在offlineThreshold的时长内重新上线系统将自动启动数据恢复流程等数据完全恢复后该节点将开始正常工作。
##Arbitrator的使用
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6030。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。

View File

@ -178,11 +178,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
获取客户端版本信息。
- `TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, int port)`
- `TAOS *taos_connect(const char *host, const char *user, const char *pass, const char *db, int port)`
创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含:
- ipTDengine管理主节点的IP地址
- hostTDengine管理主节点的FQDN
- user用户名
- pass密码
- db数据库名字如果用户没有提供也可以正常连接用户可以通过该连接创建新的数据库如果用户提供了数据库名字则说明该数据库用户已经创建好缺省使用该数据库
@ -252,7 +252,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- `void taos_free_result(TAOS_RES *res)`
释放查询结果集以及相关的资源。查询完成后务必调用该API释放资源否则可能导致应用内存泄露。
释放查询结果集以及相关的资源。查询完成后务必调用该API释放资源否则可能导致应用内存泄露。但也需注意,释放资源后,如果再调用`taos_consume`等获取查询结果的函数将导致应用Crash。
- `char *taos_errstr(TAOS_RES *res)`
@ -262,11 +262,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
获取最近一次API调用失败的原因返回值为错误代码。
**注意**对于每个数据库应用2.0及以上版本 TDengine 推荐只建立一个连接。同时在应用中将该连接 (TAOS*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性。C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。
**注意**2.0及以上版本 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池。而不推荐在应用中将该连接 (TAOS\*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性,但 “USE statement” 等状态量有可能在线程之间相互干扰此外,C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。
### 异步查询API
同步API之外TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式在系统真正完成某个具体数据库操作前立即返回。调用的线程可以去处理其他工作从而可以提升整个应用的性能。异步API在网络延迟严重的情况下优点尤为突出。
同步API之外TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下异步API处理数据插入的速度比同步API快2\~4倍。异步API采用非阻塞式的调用方式在系统真正完成某个具体数据库操作前立即返回。调用的线程可以去处理其他工作从而可以提升整个应用的性能。异步API在网络延迟严重的情况下优点尤为突出。
异步API都需要应用提供相应的回调函数回调函数参数设置如下前两个参数都是一致的第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的用于回调时应用能够找回具体操作的上下文依具体实现而定。第二个参数是SQL操作的结果集如果为空比如insert操作表示没有记录返回如果不为空比如select操作表示有记录返回。
@ -288,13 +288,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
* res`taos_query_a`回调时返回的结果集
* fp回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理直到返回的记录数numOfRows为零结果返回完成或记录数为负值查询出错
- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
异步获取一条记录。其中:
* res`taos_query_a`回调时返回的结果集
* fp回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据更加简单但数据提取性能不及批量获取的API。
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表并可以同时对每张打开的表进行查询或者插入操作。需要指出的是**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
### 参数绑定API
@ -425,7 +418,7 @@ cd C:\TDengine\connector\python\windows
python -m pip install python3\
```
*如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
* 如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
@ -442,7 +435,7 @@ import taos
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
* <em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
* 写入数据
```python
@ -510,17 +503,17 @@ conn.close()
用户可通过python的帮助信息直接查看模块的使用信息或者参考tests/examples/python中的示例程序。以下为部分常用类和方法
- _TDengineConnection_类
- _TDengineConnection_
参考python中help(taos.TDengineConnection)。
这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下这个连接实例可以是每个线程申请一个,也可以多线程共享一个连接。
这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。
- _TDengineCursor_类
- _TDengineCursor_
参考python中help(taos.TDengineCursor)。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能夸线程共享使用,否则会导致返回结果出现错误。
- _connect_方法
- _connect_ 方法
用于生成taos.TDengineConnection的实例。
@ -800,7 +793,7 @@ go env -w GOPROXY=https://goproxy.io,direct
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
该API用来打开DB返回一个类型为*DB的对象一般情况下DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
该API用来打开DB返回一个类型为\*DB的对象一般情况下DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
**注意** 该API成功创建的时候并没有做权限等检查只有在真正执行Query或者Exec的时候才能真正的去创建连接并同时检查user/password/host/port是不是合法。 另外由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以sql.Open本身特别轻量。
@ -822,7 +815,7 @@ go env -w GOPROXY=https://goproxy.io,direct
- `func (s *Stmt) Query(args ...interface{}) (*Rows, error)`
sql.Open内置的方法Query executes a prepared query statement with the given arguments and returns the query results as a *Rows.
sql.Open内置的方法Query executes a prepared query statement with the given arguments and returns the query results as a \*Rows.
- `func (s *Stmt) Close() error`
@ -894,7 +887,7 @@ Node-example-raw.js
验证方法:
1. 新建安装验证目录,例如:~/tdengine-test拷贝github上nodejsChecker.js源程序。下载地址https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js
1. 新建安装验证目录,例如:\~/tdengine-test拷贝github上nodejsChecker.js源程序。下载地址https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js
2. 在命令中执行以下命令:

View File

@ -1,62 +1,62 @@
# Java Connector
Java连接器支持的系统有
| **CPU类型** | x6464bit | | | ARM64 | ARM32 |
| ------------ | ------------ | -------- | -------- | -------- | -------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
Java连接器的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
![tdengine-connector](../assets/tdengine-jdbc-connector.png)
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* JDBC-JNIJava 应用在物理节点1pnode1上使用 JDBC-JNI 的 API ,直接调用客户端 APIlibtaos.so 或 taos.dll将写入和查询请求发送到位于物理节点2pnode2上的 taosd 实例。
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## JDBC-JNI和JDBC-RESTful的对比
## TDengine DataType 和 Java DataType
<table >
<tr align="center"><th>对比项</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
<tr align="center">
<td>支持的操作系统</td>
<td>linux、windows</td>
<td>全平台</td>
</tr>
<tr align="center">
<td>是否需要安装 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>server 升级后是否需要升级 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>写入性能</td>
<td colspan="2">JDBC-RESTful 是 JDBC-JNI 的 50%90% </td>
</tr>
<tr align="center">
<td>查询性能</td>
<td colspan="2">JDBC-RESTful 与 JDBC-JNI 没有差别</td>
</tr>
</table>
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@ -67,30 +67,63 @@ maven 项目中使用如下 pom.xml 配置即可:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
<version>2.0.18</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
## 使用说明
## JDBC的使用说明
### 获取连接
#### 通过JdbcUrl获取连接
#### 指定URL获取连接
通过指定URL获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
通过指定的jdbcUrl获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例建立了到hostname为taosdemo.com端口为6030TDengine的默认端口数据库名为test的连接。这个url中指定用户名user为root密码password为taosdata。
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
TDengine 的 JDBC URL 规范格式为:
`jdbc:TAOS://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下
* user登录 TDengine 用户名,默认值 root。
* password用户登录密码默认值 taosdata。
@ -99,13 +132,17 @@ url中的配置参数如下
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
#### 使用JdbcUrl和Properties获取连接
除了通过指定的jdbcUrl获取连接还可以使用Properties指定建立连接时的参数如下所示
#### 指定URL和Properties获取连接
除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
// Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@ -114,9 +151,10 @@ public Connection getConn() throws Exception{
return conn;
}
```
以上示例建立一个到hostname为taosdemo.com端口为6030数据库名为test的连接。这个连接在url中指定了用户名(user)为root密码password为taosdata并在connProps中指定了使用的字符集、语言环境、时区等信息。
properties中的配置参数如下
以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 root。
* TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 taosdata。
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
@ -124,10 +162,14 @@ properties中的配置参数如下
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
#### 使用客户端配置文件建立连接
当使用JDBC连接TDengine集群时可以使用客户端配置文件在客户端配置文件中指定集群的firstEp、secondEp参数。
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
1. 在java中不指定hostname和port
1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
@ -140,7 +182,7 @@ public Connection getConn() throws Exception{
return conn;
}
```
2. 在配置文件中指定firstEp和secondEp
2. 在配置文件中指定 firstEp secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
@ -155,17 +197,19 @@ secondEp cluster_node2:6030
# locale en_US.UTF-8
```
以上示例jdbc会使用客户端的配置文件建立到hostname为cluster_node1端口为6030数据库名为test的连接。当集群中firstEp节点失效时JDBC会尝试使用secondEp连接集群。
TDengine中只要保证firstEp和secondEp中一个节点有效就可以正常建立到集群的连接。
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp 连接集群。
TDengine 中,只要保证 firstEp secondEp 中一个节点有效,就可以正常建立到集群的连接。
> 注意这里的配置文件指的是调用JDBC Connector的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
#### 配置参数的优先级
通过以上3种方式获取连接如果配置参数在url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
2. Properties connProps
3. 客户端配置文件 taos.cfg
例如在url中指定了password为taosdata在Properties中指定了password为taosdemo那么JDBC会使用url中的password建立连接。
例如:在 url 中指定了 password 为 taosdata在 Properties 中指定了 password 为 taosdemo那么JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
@ -183,6 +227,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@ -193,6 +238,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
@ -214,6 +260,7 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 订阅
@ -248,7 +295,7 @@ while(true) {
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
@ -265,8 +312,11 @@ resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
@ -284,16 +334,17 @@ conn.close();
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
// jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
// connection pool configurations
config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource
@ -306,6 +357,7 @@ conn.close();
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@ -324,38 +376,29 @@ conn.close();
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
DruidDataSource dataSource = new DruidDataSource();
// jdbc properties
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
dataSource.setUrl(url);
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
// pool configurations
dataSource.setInitialSize(10);
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@ -370,18 +413,51 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT | java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@ -406,3 +482,4 @@ Query OK, 1 row(s) in set (0.000141s)
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B

View File

@ -1,5 +1,19 @@
# 常见问题
## 0. 怎么报告问题?
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
1. /var/log/taos (如果没有修改过默认路径)
2. /etc/taos
附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
为了保证有足够的debug信息如果问题能够重复请修改/etc/taos/taos.cfg文件最后面添加一行“debugFlag 135"(不带引号本身然后重启taosd, 重复问题然后再递交。也可以通过如下SQL语句临时设置taosd的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
```
但系统正常运行时请一定将debugFlag设置为131否则会产生大量的日志信息降低系统效率。
## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
@ -71,7 +85,9 @@ TDengine还没有一组专用的validation queries。然而建议你使用系统
## 9. 我可以删除或更新一条记录吗?
不能。因为TDengine是为联网设备采集的数据设计的不容许修改。但TDengine提供数据保留策略只要数据记录超过保留时长就会被自动删除。
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
从 2.0.8.0 开始TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
## 10. 我怎么创建超过1024列的表
@ -118,16 +134,3 @@ TDengine是根据hostname唯一标志一台机器的在数据文件从机器A
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下修复dnodeEps.json的dnodeId对应的FQDN重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
## 17. 怎么报告问题?
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
1. /var/log/taos
2. /etc/taos
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
为了保证有足够的debug信息如果问题能够重复请修改/etc/taos/taos.cfg文件最后面添加一行“debugFlag 135"(不带引号本身然后重启taosd, 重复问题然后再递交。也可以通过如下SQL语句临时设置taosd的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
```
但系统正常运行时请一定将debugFlag设置为131否则会产生大量的日志信息降低系统效率。

View File

@ -222,7 +222,7 @@ MQTT是一流行的物联网数据传输协议TDengine 可以很方便的接
## EMQ Broker 直接写入
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>
## HiveMQ Broker 直接写入

View File

@ -36,6 +36,9 @@
# 0.0: only one core available.
# tsRatioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
# keepColumnName 0
# number of management nodes in the system
# numOfMnodes 3
@ -72,9 +75,6 @@
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# time of keeping table meta data in cache, seconds
# tableMetaKeepTimer 7200
# minimum sliding window time, milli-second
# minSlidingTime 10
@ -162,10 +162,10 @@
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1
# stop writing temporary files when the disk size of the log folder is less than this value
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 0.1
# stop writing data when the disk size of the log folder is less than this value
# if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB 0.1
# One mnode is equal to the number of vnode consumed
@ -265,9 +265,6 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1
# enable/disable stream (continuous query)
# stream 1

View File

@ -43,6 +43,7 @@ mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
echo "" > ${pkg_dir}${install_home_path}/email
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script

View File

@ -1,14 +1,16 @@
FROM ubuntu:20.04
FROM ubuntu:18.04
WORKDIR /root
ARG version
RUN echo $version
COPY tdengine.tar.gz /root/
RUN tar -zxf tdengine.tar.gz
WORKDIR /root/TDengine-server-$version/
RUN /bin/bash install.sh -e no
ARG pkgFile
ARG dirName
RUN echo ${pkgFile}
RUN echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/${dirName}/
RUN /bin/bash install.sh -e no
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
ENV LANG=en_US.UTF-8

View File

@ -0,0 +1,44 @@
#!/bin/bash
set -e
#set -x
# dockerbuild.sh
# -n [version number]
# -p [xxxx]
# set parameters by default value
verNumber=""
passWord=""
while getopts "hn:p:" arg
do
case $arg in
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verNumber=${verNumber}"
docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:${verNumber}
# how set latest version ???

View File

@ -1,5 +0,0 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine-aarch64:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine-aarch64:$1

View File

@ -1,5 +1,63 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine:$1
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -f [pkg file]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=amd64
verNumber=""
passWord=""
pkgFile=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
f)
#echo "pkgFile=$OPTARG"
pkgFile=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -f [pkg file] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
echo "$(pwd)"
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
dirName=${pkgFile%-Linux*}
#echo "dirName=${dirName}"
docker build --rm -f "Dockerfile" -t tdengine/tdengine-${cpuType}:${verNumber} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${cpuType}:${verNumber}
# set this version to latest version
docker tag tdengine/tdengine-${cpuType}:${verNumber} tdengine/tdengine-${cpuType}:latest
docker push tdengine/tdengine-${cpuType}:latest

View File

@ -0,0 +1,56 @@
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}

View File

@ -51,6 +51,7 @@ mkdir -p %{buildroot}%{homepath}/include
mkdir -p %{buildroot}%{homepath}/init.d
mkdir -p %{buildroot}%{homepath}/script
echo "" > %{buildroot}%{homepath}/email
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script

View File

@ -348,7 +348,7 @@ function set_ipAsFqdn() {
}
function local_fqdn_check() {
#serverFqdn=$(hostname -f)
#serverFqdn=$(hostname)
echo
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
echo
@ -911,7 +911,7 @@ function install_TDengine() {
## ==============================Main program starts from here============================
serverFqdn=$(hostname -f)
serverFqdn=$(hostname)
if [ "$verType" == "server" ]; then
# Install server and client
if [ -x ${bin_dir}/taosd ]; then

View File

@ -21,7 +21,7 @@ else
cd ${script_dir}
script_dir="$(pwd)"
data_dir="/var/lib/taos"
log_dir="~/TDengineLog"
log_dir=~/TDengine/log
fi
log_link_dir="/usr/local/taos/log"

View File

@ -345,7 +345,7 @@ function set_ipAsFqdn() {
}
function local_fqdn_check() {
#serverFqdn=$(hostname -f)
#serverFqdn=$(hostname)
echo
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
echo
@ -881,7 +881,7 @@ function install_PowerDB() {
## ==============================Main program starts from here============================
serverFqdn=$(hostname -f)
serverFqdn=$(hostname)
if [ "$verType" == "server" ]; then
# Install server and client
if [ -x ${bin_dir}/powerd ]; then

View File

@ -24,7 +24,7 @@ data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then
log_dir="/var/log/taos"
else
log_dir="~/TDengineLog"
log_dir=~/TDengine/log
fi
data_link_dir="/usr/local/taos/data"
@ -178,7 +178,9 @@ function install_bin() {
function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
fi
if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
@ -190,12 +192,14 @@ function install_lib() {
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
${csudo} cp ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
${csudo} ldconfig
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
fi
}
function install_header() {

View File

@ -228,7 +228,7 @@ function set_ipAsFqdn() {
}
function local_fqdn_check() {
#serverFqdn=$(hostname -f)
#serverFqdn=$(hostname)
echo
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
echo
@ -492,5 +492,5 @@ function install_TDengine() {
## ==============================Main program starts from here============================
serverFqdn=$(hostname -f)
serverFqdn=$(hostname)
install_TDengine

View File

@ -0,0 +1,144 @@
# Usage:
# sudo gdb -x ./taosd-dump-cfg.gdb
define attach_pidof
if $argc != 1
help attach_pidof
else
shell echo -e "\
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
if \$PID > 0\n\
attach "$(pidof -s $arg0)"\n\
else\n\
print \"Process '"$arg0"' not found\"\n\
end" > /tmp/gdb.pidof
source /tmp/gdb.pidof
end
end
document attach_pidof
Attach to process by name
Usage: attach_pidof PROG_NAME
end
set $TAOS_CFG_VTYPE_INT8 = 0
set $TAOS_CFG_VTYPE_INT16 = 1
set $TAOS_CFG_VTYPE_INT32 = 2
set $TAOS_CFG_VTYPE_FLOAT = 3
set $TAOS_CFG_VTYPE_STRING = 4
set $TAOS_CFG_VTYPE_IPSTR = 5
set $TAOS_CFG_VTYPE_DIRECTORY = 6
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
set $TSDB_CFG_CTYPE_B_SHOW = 2U
set $TSDB_CFG_CTYPE_B_LOG = 4U
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
set $TSDB_CFG_CTYPE_B_OPTION = 16U
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
set $TSDB_CFG_PRINT_LEN = 53
define print_blank
if $argc == 1
set $blank_len = $arg0
while $blank_len > 0
printf "%s", " "
set $blank_len = $blank_len - 1
end
end
end
define dump_cfg
if $argc != 1
help dump_cfg
else
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
if $blen < 0
$blen = 0
end
#printf "%s: %d\n", "******blen: ", $blen
printf "%s: ", $arg0.option
print_blank $blen
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
printf "%d\n", *((int8_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
printf "%d\n", *((int16_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
printf "%d\n", *((int32_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
printf "%f\n", *((float *) $arg0.ptr)
else
printf "%s\n", $arg0.ptr
end
end
end
end
end
end
document dump_cfg
Dump a cfg entry
Usage: dump_cfg cfg
end
set pagination off
attach_pidof taosd
set $idx=0
#print tsGlobalConfigNum
#set $end=$1
set $end=tsGlobalConfigNum
p "*=*=*=*=*=*=*=*=*= taos global config:"
#while ($idx .lt. $end)
while ($idx < $end)
# print tsGlobalConfig[$idx].option
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
# p "1"
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
set $idx=0
p "*=*=*=*=*=*=*=*=*= taos local config:"
while ($idx < $end)
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
detach
quit

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.12.0'
version: '2.0.14.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.12.0
- usr/lib/libtaos.so.2.0.14.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
# Base compile

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc)
@ -8,6 +8,4 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)
ADD_LIBRARY(balance ${SRC})
ENDIF ()
ADD_LIBRARY(balance ${SRC})

View File

@ -24,7 +24,7 @@ extern "C" {
int32_t bnInitThread();
void bnCleanupThread();
void bnNotify();
void bnStartTimer(int64_t mseconds);
void bnStartTimer(int32_t mseconds);
#ifdef __cplusplus
}

View File

@ -30,7 +30,7 @@
#include "mnodeVgroup.h"
extern int64_t tsDnodeRid;
extern int64_t tsSdbRid;
extern int32_t tsSdbRid;
static SBnMgmt tsBnMgmt;
static void bnMonitorDnodeModule();

View File

@ -271,23 +271,23 @@ static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = systemScore;
*(float *)pWrite = (float)systemScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = pDnode->customScore;
*(float *)pWrite = (float)pDnode->customScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)moduleScore;
*(float *)pWrite = (float)moduleScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)vnodeScore;
*(float *)pWrite = (float)vnodeScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)(vnodeScore + moduleScore + pDnode->customScore + systemScore);
*(float *)pWrite = (float)(vnodeScore + moduleScore + pDnode->customScore + systemScore);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;

View File

@ -56,7 +56,7 @@ int32_t bnInitThread() {
pthread_attr_destroy(&thattr);
if (ret != 0) {
mError("failed to create balance thread since %s", strerror(errno));
mError("failed to create balance thread since %s", strerror(ret));
return -1;
}
@ -119,13 +119,13 @@ static void bnProcessTimer(void *handle, void *tmrId) {
}
}
void bnStartTimer(int64_t mseconds) {
void bnStartTimer(int32_t mseconds) {
if (tsBnThread.stop) return;
bool updateSoon = (mseconds != -1);
if (updateSoon) {
mTrace("balance function will be called after %" PRId64 " ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)mseconds, tsMnodeTmr, &tsBnThread.timer);
mTrace("balance function will be called after %d ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)(int64_t)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else {
taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer);
}

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
@ -28,6 +28,28 @@ IF (TD_LINUX)
ADD_SUBDIRECTORY(tests)
ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
#VERSION dylib version
#SOVERSION dylib version
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ADD_SUBDIRECTORY(tests)
ELSEIF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32)
@ -49,12 +71,12 @@ ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)

View File

@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
tFilePage filePage;
} SLocalDataSource;
typedef struct SLocalReducer {
typedef struct SLocalMerger {
SLocalDataSource ** pLocalDataSrc;
int32_t numOfBuffer;
int32_t numOfCompleted;
@ -62,7 +62,7 @@ typedef struct SLocalReducer {
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
} SLocalReducer;
} SLocalMerger;
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
@ -89,10 +89,10 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
/*
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql);
void tscDestroyLocalMerger(SSqlObj *pSql);
int32_t tscDoLocalMerge(SSqlObj *pSql);

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSC_LOG_H
#define TDENGINE_TSC_LOG_H
#ifndef TDENGINE_TSCLOG_H
#define TDENGINE_TSCLOG_H
#ifdef __cplusplus
extern "C" {
@ -22,7 +22,7 @@ extern "C" {
#include "tlog.h"
extern int32_t cDebugFlag;
extern uint32_t cDebugFlag;
extern int8_t tscEmbedded;
#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSCJOINPROCESS_H
#define TDENGINE_TSCJOINPROCESS_H
#ifndef TDENGINE_TSCSUBQUERY_H
#define TDENGINE_TSCSUBQUERY_H
#ifdef __cplusplus
extern "C" {
@ -43,8 +43,12 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql);
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId);
void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_TSCJOINPROCESS_H
#endif // TDENGINE_TSCSUBQUERY_H

View File

@ -98,20 +98,19 @@ static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t sub
return pCmd->pQueryInfo[subClauseIndex];
}
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
/**
@ -142,10 +141,6 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
/* use for keep current db info temporarily, for handle table with db prefix */
// todo remove it
void tscGetDBInfoFromTableFullName(char* tableId, char* db);
int tscAllocPayload(SSqlCmd* pCmd, int size);
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes);
@ -215,8 +210,8 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
@ -225,7 +220,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
@ -256,11 +251,11 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
* @param pPrevSql
* @return
*/
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd);
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
@ -292,7 +287,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
STableMeta* tscTableMetaClone(STableMeta* pTableMeta);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
void* malloc_throw(size_t size);

View File

@ -24,10 +24,6 @@ extern "C" {
#include "tstoken.h"
#include "tsclient.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
//struct SSchema;
/**
* get the number of tags of this table
* @param pTableMeta
@ -79,26 +75,6 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
*/
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* check if the schema is valid or not, including following aspects:
* 1. number of columns
* 2. column types
* 3. column length
* 4. column names
* 5. total length
*
* @param pSchema
* @param numOfCols
* @return
*/
bool isValidSchema(struct SSchema *pSchema, int32_t numOfCols);
/**
* get the schema for the "tbname" column. it is a built column
* @return
*/
SSchema tscGetTbnameColumnSchema();
/**
* create the table meta from the msg
* @param pTableMetaMsg

View File

@ -22,15 +22,15 @@ extern "C" {
#include "os.h"
#include "qAggMain.h"
#include "taos.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tglobal.h"
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "tglobal.h"
#include "tref.h"
#include "tutil.h"
#include "qExecutor.h"
#include "qSqlparser.h"
@ -39,7 +39,7 @@ extern "C" {
// forward declaration
struct SSqlInfo;
struct SLocalReducer;
struct SLocalMerger;
// data source from sql string or from file
enum {
@ -67,7 +67,7 @@ typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name
} CChildTableMeta;
typedef struct STableMeta {
@ -91,7 +91,7 @@ typedef struct STableMetaInfo {
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
char name[TSDB_TABLE_FNAME_LEN]; // (super) table name
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
@ -142,7 +142,7 @@ typedef struct SCond {
} SCond;
typedef struct SJoinNode {
char tableId[TSDB_TABLE_FNAME_LEN];
char tableName[TSDB_TABLE_FNAME_LEN];
uint64_t uid;
int16_t tagColId;
} SJoinNode;
@ -176,7 +176,7 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct STableDataBlocks {
char tableName[TSDB_TABLE_FNAME_LEN];
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgId; // virtual group id
@ -223,6 +223,8 @@ typedef struct SQueryInfo {
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
} SQueryInfo;
typedef struct {
@ -235,7 +237,7 @@ typedef struct {
int32_t numOfTablesInSubmit;
};
uint32_t insertType;
uint32_t insertType; // TODO remove it
int32_t clauseIndex; // index of multiple subclause query
char * curSql; // current sql, resume position of sql after parsing paused
@ -254,7 +256,7 @@ typedef struct {
int8_t submitSchema; // submit block is built with table schema
STagData tagData; // NOTE: pTagData->data is used as a variant length array
char **pTableNameList; // all involved tableMeta list of current insert sql statement.
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
int32_t numOfTables;
SHashObj *pTableBlockHashList; // data block for each table
@ -292,7 +294,7 @@ typedef struct {
SColumnIndex* pColumnIndex;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalReducer *pLocalReducer;
struct SLocalMerger *pLocalMerger;
} SSqlRes;
typedef struct {
@ -306,8 +308,8 @@ typedef struct STscObj {
void * pTimer;
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
char acctId[TSDB_ACCT_LEN];
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
char acctId[TSDB_ACCT_ID_LEN];
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
char sversion[TSDB_VERSION_LEN];
char writeAuth : 1;
char superAuth : 1;
@ -322,7 +324,8 @@ typedef struct STscObj {
} STscObj;
typedef struct SSubqueryState {
int32_t numOfRemain; // the number of remain unfinished subquery
pthread_mutex_t mutex;
int8_t *states;
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState;
@ -332,8 +335,8 @@ typedef struct SSqlObj {
pthread_t owner; // owner of sql object, by which it is executed
STscObj *pTscObj;
int64_t rpcRid;
void (*fp)();
void (*fetchFp)();
__async_cb_func_t fp;
__async_cb_func_t fetchFp;
void *param;
int64_t stime;
uint32_t queryId;
@ -352,6 +355,11 @@ typedef struct SSqlObj {
SSubqueryState subState;
struct SSqlObj **pSubs;
int64_t metaRid;
int64_t svgroupRid;
int64_t squeryLock;
struct SSqlObj *prev, *next;
int64_t self;
} SSqlObj;
@ -412,7 +420,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscResetSqlCmdObj(SSqlCmd *pCmd);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
/**
* free query result of the sql object
@ -437,7 +445,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql);
void tscImportDataFromFile(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
@ -465,9 +473,9 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData;
} else {
assert(bytes == tDataTypeDesc[type].nSize);
assert(bytes == tDataTypes[type].bytes);
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64Key;
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64;
pRes->length[columnIndex] = bytes;
}
} else {
@ -482,7 +490,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
pRes->length[columnIndex] = realLen;
} else {
assert(bytes == tDataTypeDesc[type].nSize);
assert(bytes == tDataTypes[type].bytes);
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData;
pRes->length[columnIndex] = bytes;

View File

@ -32,7 +32,6 @@ taos_errstr
taos_errno
taos_query_a
taos_fetch_rows_a
taos_fetch_row_a
taos_subscribe
taos_consume
taos_unsubscribe

View File

@ -20,24 +20,19 @@
#include "trpc.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscLocalMerge.h"
#include "tscUtil.h"
#include "tsched.h"
#include "tschemautil.h"
#include "tsclient.h"
static void tscProcessFetchRow(SSchedMsg *pMsg);
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)());
/*
* Proxy function to perform sequentially query&retrieve operation.
* If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection
* query), it will sequentially query&retrieve data for all vnodes
*/
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* param, const char* sqlstr, size_t sqlLen) {
SSqlCmd* pCmd = &pSql->cmd;
@ -95,7 +90,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
return;
}
nPrintTsc(sqlstr);
nPrintTsc("%s", sqlstr);
SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
if (pSql == NULL) {
@ -148,7 +143,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
}
// actual continue retrieve function with user-specified callback function
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()) {
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, __async_cb_func_t fp) {
SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL) { // error
tscError("sql object is NULL");
@ -191,11 +186,6 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
}
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
// query completed, continue to retrieve
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
}
void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
SSqlObj *pSql = (SSqlObj *)taosa;
if (pSql == NULL || pSql->signature != pSql) {
@ -263,116 +253,19 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
}
}
void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), void *param) {
SSqlObj *pSql = (SSqlObj *)taosa;
if (pSql == NULL || pSql->signature != pSql) {
tscError("sql object is NULL");
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
return;
}
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
if (pRes->qhandle == 0) {
tscError("qhandle is NULL");
pSql->param = param;
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
tscAsyncResultOnError(pSql);
return;
}
pSql->fetchFp = fp;
pSql->param = param;
if (pRes->row >= pRes->numOfRows) {
tscResetForNextRetrieve(pRes);
pSql->fp = tscAsyncFetchSingleRowProxy;
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
} else {
SSchedMsg schedMsg = { 0 };
schedMsg.fp = tscProcessFetchRow;
schedMsg.ahandle = pSql;
schedMsg.thandle = pRes->tsrow;
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
}
}
void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) {
SSqlObj *pSql = (SSqlObj *)tres;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (numOfRows == 0) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode);
} else {
/*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
(*pSql->fetchFp)(pSql->param, pSql, NULL);
}
return;
}
for (int i = 0; i < pCmd->numOfCols; ++i){
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i) + pSup->pSqlExpr->resBytes * pRes->row;
} else {
//todo add
}
}
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow);
}
void tscProcessFetchRow(SSchedMsg *pMsg) {
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < pCmd->numOfCols; ++i) {
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
} else {
// todo add
}
}
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow);
}
// this function will be executed by queue task threads, so the terrno is not valid
static void tscProcessAsyncError(SSchedMsg *pMsg) {
void (*fp)() = pMsg->ahandle;
terrno = *(int32_t*) pMsg->msg;
tfree(pMsg->msg);
(*fp)(pMsg->thandle, NULL, *(int32_t*)pMsg->msg);
(*fp)(pMsg->thandle, NULL, terrno);
}
void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
int32_t* c = malloc(sizeof(int32_t));
*c = code;
SSchedMsg schedMsg = { 0 };
SSchedMsg schedMsg = {0};
schedMsg.fp = tscProcessAsyncError;
schedMsg.ahandle = fp;
schedMsg.thandle = param;
@ -380,7 +273,6 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
taosScheduleTask(tscQhandle, &schedMsg);
}
void tscAsyncResultOnError(SSqlObj *pSql) {
if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
@ -388,7 +280,7 @@ void tscAsyncResultOnError(SSqlObj *pSql) {
}
assert(pSql->res.code != TSDB_CODE_SUCCESS);
tscError("%p add into queued async res, code:%s", pSql, tstrerror(pSql->res.code));
tscError("%p invoke user specified function due to error occured, code:%s", pSql, tstrerror(pSql->res.code));
SSqlRes *pRes = &pSql->res;
if (pSql->fp == NULL || pSql->fetchFp == NULL){
@ -402,8 +294,10 @@ void tscAsyncResultOnError(SSqlObj *pSql) {
int tscSendMsgToServer(SSqlObj *pSql);
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj *pSql = (SSqlObj *)param;
if (pSql == NULL || pSql->signature != pSql) return;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
if (pSql == NULL) return;
assert(pSql->signature == pSql && (int64_t)param == pSql->self);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@ -421,14 +315,15 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// check if it is a sub-query of super table query first, if true, enter another routine
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
}
@ -436,6 +331,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
// tscProcessSql can add error into async res
tscProcessSql(pSql);
taosReleaseRef(tscObjRef, pSql->self);
return;
} else { // continue to process normal async query
if (pCmd->parseFinished) {
@ -446,6 +342,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
}
@ -454,10 +351,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pCmd->command == TSDB_SQL_SELECT) {
tscDebug("%p redo parse sql string and proceed", pSql);
pCmd->parseFinished = false;
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, true);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -468,12 +366,14 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscProcessSql(pSql);
}
taosReleaseRef(tscObjRef, pSql->self);
return;
} else {
tscDebug("%p continue parse sql after get table meta", pSql);
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -483,12 +383,14 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else {
assert(code == TSDB_CODE_SUCCESS);
}
(*pSql->fp)(pSql->param, pSql, code);
taosReleaseRef(tscObjRef, pSql->self);
return;
}
@ -501,6 +403,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -509,6 +412,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -521,11 +425,16 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
(*pSql->fp)(pSql->param, pSql, code);
taosReleaseRef(tscObjRef, pSql->self);
return;
}
tscDoQuery(pSql);
taosReleaseRef(tscObjRef, pSql->self);
return;
_error:
@ -533,4 +442,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
pSql->res.code = code;
tscAsyncResultOnError(pSql);
}
taosReleaseRef(tscObjRef, pSql->self);
}

View File

@ -79,7 +79,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i;
STR_WITH_MAXSIZE_TO_VARSTR(dst, pSchema[i].name, pField->bytes);
char *type = tDataTypeDesc[pSchema[i].type].aName;
char *type = tDataTypes[pSchema[i].type].name;
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
@ -119,7 +119,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
// type name
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
char *type = tDataTypeDesc[pSchema[i].type].aName;
char *type = tDataTypes[pSchema[i].type].name;
output = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
STR_WITH_MAXSIZE_TO_VARSTR(output, type, pField->bytes);
@ -204,7 +204,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
assert(tscGetMetaInfo(pQueryInfo, 0)->pTableMeta != NULL);
const int32_t NUM_OF_DESC_TABLE_COLUMNS = 4;
const int32_t TYPE_COLUMN_LENGTH = 16;
const int32_t TYPE_COLUMN_LENGTH = 20;
const int32_t NOTE_COLUMN_MIN_LENGTH = 8;
int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
@ -569,10 +569,12 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
}
char fullName[TSDB_TABLE_FNAME_LEN * 2] = {0};
extractDBName(pTableMetaInfo->name, fullName);
tNameGetDbName(&pTableMetaInfo->name, fullName);
extractTableName(pMeta->sTableName, param->sTableName);
snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName);
extractTableName(pTableMetaInfo->name, param->buf);
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
@ -602,6 +604,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -619,9 +622,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name);
}
}
sprintf(result + strlen(result) - 1, "%s", ")");
@ -646,9 +649,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name);
}
}
snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS (");
@ -660,9 +663,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name);
}
}
sprintf(result + strlen(result) - 1, "%s", ")");
@ -675,8 +678,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pTableMetaInfo->pTableMeta != NULL);
char tableName[TSDB_TABLE_NAME_LEN] = {0};
extractTableName(pTableMetaInfo->name, tableName);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@ -712,7 +714,9 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
extractTableName(pTableMetaInfo->name, param->buf);
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
param->fp = tscRebuildCreateDBStatement;
@ -745,7 +749,10 @@ static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
char db[TSDB_DB_NAME_LEN] = {0};
pthread_mutex_lock(&pSql->pTscObj->mutex);
extractDBName(pSql->pTscObj->db, db);
pthread_mutex_unlock(&pSql->pTscObj->mutex);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);

View File

@ -16,7 +16,7 @@
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "tlosertree.h"
#include "tscLog.h"
#include "tscUtil.h"
@ -56,7 +56,7 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
}
}
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescriptor *pDesc) {
/*
* the fields and offset attributes in pCmd and pModel may be different due to
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
@ -95,11 +95,11 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].i64 = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
pCtx->param[1].i64 = pQueryInfo->order.orderColId;
} else if (functionId == TSDB_FUNC_APERCT) {
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
pCtx->param[0].i64 = pExpr->param[0].i64;
pCtx->param[0].nType = pExpr->param[0].nType;
}
@ -166,7 +166,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
return pFillCol;
}
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@ -212,9 +212,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
return;
}
size_t size = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush;
size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush;
SLocalReducer *pReducer = (SLocalReducer *) calloc(1, size);
SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
@ -372,7 +372,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->offset = (int32_t)pQueryInfo->limit.offset;
pRes->pLocalReducer = pReducer;
pRes->pLocalMerger = pReducer;
pRes->numOfGroups = 0;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
@ -477,13 +477,13 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
return 0;
}
void tscDestroyLocalReducer(SSqlObj *pSql) {
void tscDestroyLocalMerger(SSqlObj *pSql) {
if (pSql == NULL) {
return;
}
SSqlRes *pRes = &(pSql->res);
if (pRes->pLocalReducer == NULL) {
if (pRes->pLocalMerger == NULL) {
return;
}
@ -491,14 +491,14 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// there is no more result, so we release all allocated resource
SLocalReducer *pLocalReducer = (SLocalReducer *)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
if (pLocalReducer != NULL) {
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
SLocalMerger *pLocalMerge = (SLocalMerger *)atomic_exchange_ptr(&pRes->pLocalMerger, NULL);
if (pLocalMerge != NULL) {
pLocalMerge->pFillInfo = taosDestroyFillInfo(pLocalMerge->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
if (pLocalMerge->pCtx != NULL) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[i];
tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
@ -508,31 +508,31 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
}
}
tfree(pLocalReducer->pCtx);
tfree(pLocalMerge->pCtx);
}
tfree(pLocalReducer->prevRowOfInput);
tfree(pLocalMerge->prevRowOfInput);
tfree(pLocalReducer->pTempBuffer);
tfree(pLocalReducer->pResultBuf);
tfree(pLocalMerge->pTempBuffer);
tfree(pLocalMerge->pResultBuf);
if (pLocalReducer->pLoserTree) {
tfree(pLocalReducer->pLoserTree->param);
tfree(pLocalReducer->pLoserTree);
if (pLocalMerge->pLoserTree) {
tfree(pLocalMerge->pLoserTree->param);
tfree(pLocalMerge->pLoserTree);
}
tfree(pLocalReducer->pFinalRes);
tfree(pLocalReducer->discardData);
tfree(pLocalMerge->pFinalRes);
tfree(pLocalMerge->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
tfree(pLocalReducer->pLocalDataSrc[i]);
tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, pLocalMerge->finalModel,
pLocalMerge->numOfVnode);
for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) {
tfree(pLocalMerge->pLocalDataSrc[i]);
}
pLocalReducer->numOfBuffer = 0;
pLocalReducer->numOfCompleted = 0;
free(pLocalReducer);
pLocalMerge->numOfBuffer = 0;
pLocalMerge->numOfCompleted = 0;
free(pLocalMerge);
} else {
tscDebug("%p already freed or another free function is invoked", pSql);
}
@ -604,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
}
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
bool isSameGroup(SSqlCmd *pCmd, SLocalMerger *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// disable merge procedure for column projection query
@ -795,12 +795,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
/**
*
* @param pLocalReducer
* @param pLocalMerge
* @param pOneInterDataSrc
* @param treeList
* @return the number of remain input source. if ret == 0, all data has been handled
*/
int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
bool *needAdjustLoserTree) {
pOneInterDataSrc->rowIdx = 0;
pOneInterDataSrc->pageId += 1;
@ -817,17 +817,17 @@ int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *p
#endif
*needAdjustLoserTree = true;
} else {
pLocalReducer->numOfCompleted += 1;
pLocalMerge->numOfCompleted += 1;
pOneInterDataSrc->rowIdx = -1;
pOneInterDataSrc->pageId = -1;
*needAdjustLoserTree = true;
}
return pLocalReducer->numOfBuffer;
return pLocalMerge->numOfBuffer;
}
void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
SLoserTreeInfo *pTree) {
/*
* load a new data page into memory for intermediate dataset source,
@ -835,7 +835,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
*/
bool needToAdjust = true;
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
loadNewDataFromDiskFor(pLocalReducer, pOneInterDataSrc, &needToAdjust);
loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
}
/*
@ -843,7 +843,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
* if the loser tree is rebuild completed, we do not need to adjust
*/
if (needToAdjust) {
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalReducer->numOfBuffer;
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
#ifdef _DEBUG_VIEW
printf("before adjust:\t");
@ -860,7 +860,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
}
}
void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
void savePrevRecordAndSetupFillInfo(SLocalMerger *pLocalMerge, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
// discard following dataset in the same group and reset the interpolation information
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -873,28 +873,28 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
taosResetFillInfo(pFillInfo, revisedSTime);
}
pLocalReducer->discard = true;
pLocalReducer->discardData->num = 0;
pLocalMerge->discard = true;
pLocalMerge->discardData->num = 0;
SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1);
SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pModel, pLocalMerge->discardData, pLocalMerge->prevRowOfInput, 0, 1, 1);
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalMerger *pLocalMerge, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
tFilePage * pBeforeFillData = pLocalMerge->pResultBuf;
pRes->data = pLocalReducer->pFinalRes;
pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) pBeforeFillData->num;
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = (int32_t) pBeforeFillData->num;
tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
tColModelErase(pLocalMerge->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
@ -907,7 +907,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
pRes->numOfRows = 0;
pBeforeFillData->num = 0;
pLocalReducer->discard = true;
pLocalMerge->discard = true;
return;
}
@ -923,29 +923,29 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
pRes->numOfRows -= overflow;
pBeforeFillData->num -= overflow;
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
// set remain data to be discarded, and reset the interpolation information
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pLocalMerge->pFillInfo);
}
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalModel->rowSize));
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalMerge->finalModel->rowSize));
pRes->numOfClauseTotal += pRes->numOfRows;
pBeforeFillData->num = 0;
}
/*
* Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
* Note: pRes->pLocalMerge may be null, due to the fact that "tscDestroyLocalMerger" is called
* by "interuptHandler" function in shell
*/
static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutput) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
tFilePage *pBeforeFillData = pLocalReducer->pResultBuf;
tFilePage *pBeforeFillData = pLocalMerge->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
// todo extract function
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@ -953,11 +953,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalMerge->resColModel->capacity);
}
while (1) {
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalMerge->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
@ -970,7 +970,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
}
pRes->data = pLocalReducer->pFinalRes;
pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) newRows;
pQueryInfo->limit.offset = 0;
@ -979,14 +979,13 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
pQueryInfo->limit.offset -= newRows;
pRes->numOfRows = 0;
int32_t rpoints = taosNumOfRemainRows(pFillInfo);
if (rpoints <= 0) {
if (!taosFillHasMoreResults(pFillInfo)) {
if (!doneOutput) { // reduce procedure has not completed yet, but current results for fill are exhausted
break;
}
// all output in current group are completed
int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalMerge->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@ -1004,7 +1003,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
assert(pRes->numOfRows >= 0);
/* set remain data to be discarded, and reset the interpolation information */
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pFillInfo);
}
int32_t offset = 0;
@ -1026,8 +1025,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tfree(pResPages);
}
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
SColumnModel *pColumnModel = pLocalReducer->pDesc->pColumnModel;
static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
SColumnModel *pColumnModel = pLocalMerge->pDesc->pColumnModel;
assert(pColumnModel->capacity == 1 && tmpBuffer->num == 1);
// copy to previous temp buffer
@ -1035,20 +1034,20 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
SSchema *pSchema = getColumnModelSchema(pColumnModel, i);
int16_t offset = getColumnModelOffset(pColumnModel, i);
memcpy(pLocalReducer->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
memcpy(pLocalMerge->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
}
tmpBuffer->num = 0;
pLocalReducer->hasPrevRow = true;
pLocalMerge->hasPrevRow = true;
}
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[j];
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pCtx->functionId;
@ -1064,7 +1063,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
}
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
pCtx->param[0].i64 = pExpr->param[0].i64;
}
pCtx->currentStage = MERGE_STAGE;
@ -1075,20 +1074,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
}
for (int32_t j = 0; j < size; ++j) {
int32_t functionId = pLocalReducer->pCtx[j].functionId;
int32_t functionId = pLocalMerge->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
aAggs[functionId].mergeFunc(&pLocalReducer->pCtx[j]);
aAggs[functionId].mergeFunc(&pLocalMerge->pCtx[j]);
}
}
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
if (pLocalReducer->hasUnprocessedRow) {
pLocalReducer->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
savePreviousRow(pLocalReducer, tmpBuffer);
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
if (pLocalMerge->hasUnprocessedRow) {
pLocalMerge->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
@ -1102,7 +1101,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* the number of output result is decided by main output
*/
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) {
continue;
}
@ -1121,7 +1120,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* filled with the same result, which is the tags, specified in group by clause
*
*/
static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) {
static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalMerger *pLocalMerge) {
int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@ -1136,7 +1135,7 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
if (pCtx->functionId != TSDB_FUNC_TAG) {
continue;
}
@ -1154,20 +1153,20 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
free(buf);
}
int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx* pCtx = &pLocalMerge->pCtx[k];
aAggs[pCtx->functionId].xFinalize(pCtx);
}
pLocalReducer->hasPrevRow = false;
pLocalMerge->hasPrevRow = false;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx);
pLocalReducer->pResultBuf->num += numOfRes;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalMerge->pCtx);
pLocalMerge->pResultBuf->num += numOfRes;
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer);
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalMerge);
return numOfRes;
}
@ -1178,22 +1177,22 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
* results generated by simple aggregation function, we merge them all into one points
* *Exception*: column projection query, required no merge procedure
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
bool needToMerge(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
int16_t functionId = pLocalReducer->pCtx[0].functionId;
int16_t functionId = pLocalMerge->pCtx[0].functionId;
// todo opt performance
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0))) { // column projection query
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0) && pQueryInfo->distinctTag == false)) { // column projection query
ret = 1; // disable merge procedure
} else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc;
tOrderDescriptor *pDesc = pLocalMerge->pDesc;
if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator
ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
ret = compare_a(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
} else { // desc
ret = compare_d(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
ret = compare_d(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
}
}
}
@ -1231,17 +1230,17 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
/**
*
* @param pSql
* @param pLocalReducer
* @param pLocalMerge
* @param noMoreCurrentGroupRes
* @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups
*/
bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) {
bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurrentGroupRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage * pResBuf = pLocalReducer->pResultBuf;
SColumnModel *pModel = pLocalReducer->resColModel;
tFilePage * pResBuf = pLocalMerge->pResultBuf;
SColumnModel *pModel = pLocalMerge->resColModel;
pRes->code = TSDB_CODE_SUCCESS;
@ -1252,11 +1251,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
if (pQueryInfo->slimit.offset > 0) {
pRes->numOfRows = 0;
pQueryInfo->slimit.offset -= 1;
pLocalReducer->discard = !noMoreCurrentGroupRes;
pLocalMerge->discard = !noMoreCurrentGroupRes;
if (pLocalReducer->discard) {
SColumnModel *pInternModel = pLocalReducer->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalReducer->discardData, pLocalReducer->pTempBuffer->data, 0, 1, 1);
if (pLocalMerge->discard) {
SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return false;
@ -1265,19 +1264,14 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalModel->rowSize);
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
#ifdef _DEBUG_VIEW
printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
#endif
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
SFillInfo* pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL) {
TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@ -1285,34 +1279,34 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
doFillResult(pSql, pLocalMerge, noMoreCurrentGroupRes);
}
return true;
}
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset output buffer to the beginning
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
pLocalMerge->pCtx[i].aOutputBuf = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
pLocalReducer->pCtx[i].ptsOutputBuf = pLocalReducer->pCtx[0].aOutputBuf;
pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].aOutputBuf;
}
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
memset(pLocalMerge->pResultBuf, 0, pLocalMerge->nResultBufSize + sizeof(tFilePage));
}
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalMerger *pLocalMerge) {
// In handling data in other groups, we need to reset the interpolation information for a new group data
pRes->numOfRows = 0;
pRes->numOfRowsGroup = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->limit.offset = pLocalReducer->offset;
pQueryInfo->limit.offset = pLocalMerge->offset;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@ -1321,12 +1315,12 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
taosResetFillInfo(pLocalMerge->pFillInfo, newTime);
}
}
static bool isAllSourcesCompleted(SLocalReducer *pLocalReducer) {
return (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted);
static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
}
static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
@ -1334,19 +1328,19 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL && taosNumOfRemainRows(pFillInfo) > 0) {
if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
assert(pQueryInfo->fillType != TSDB_FILL_NONE);
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
tFilePage *pFinalDataBuf = pLocalMerge->pResultBuf;
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column
int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false);
doFillResult(pSql, pLocalMerge, false);
}
return true;
@ -1359,23 +1353,23 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
bool prevGroupCompleted = (!pLocalMerge->discard) && pLocalMerge->hasUnprocessedRow;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL ||
if ((isAllSourcesCompleted(pLocalMerge) && !pLocalMerge->hasPrevRow) || pLocalMerge->pLocalDataSrc[0] == NULL ||
prevGroupCompleted) {
// if fillType == TSDB_FILL_NONE, return directly
if (pQueryInfo->fillType != TSDB_FILL_NONE &&
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) {
doFillResult(pSql, pLocalReducer, true);
doFillResult(pSql, pLocalMerge, true);
}
}
@ -1385,7 +1379,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
*
* No results will be generated and query completed.
*/
if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalReducer) && (!pLocalReducer->hasUnprocessedRow))) {
if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalMerge) && (!pLocalMerge->hasUnprocessedRow))) {
return true;
}
@ -1394,7 +1388,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
return true;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
return false;
@ -1404,12 +1398,12 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
@ -1418,7 +1412,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
}
}
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
}
int32_t tscDoLocalMerge(SSqlObj *pSql) {
@ -1427,14 +1421,18 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tscResetForNextRetrieve(pRes);
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalMerger == NULL) { // all data has been processed
if (pRes->code == TSDB_CODE_SUCCESS) {
return pRes->code;
}
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
return pRes->code;
}
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
tFilePage *tmpBuffer = pLocalMerge->pTempBuffer;
if (doHandleLastRemainData(pSql)) {
return TSDB_CODE_SUCCESS;
@ -1444,24 +1442,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
SLoserTreeInfo *pTree = pLocalReducer->pLoserTree;
SLoserTreeInfo *pTree = pLocalMerge->pLoserTree;
// clear buffer
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
while (1) {
if (isAllSourcesCompleted(pLocalReducer)) {
if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
#ifdef _DEBUG_VIEW
printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
#endif
assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
assert((pTree->pNode[0].index < pLocalMerge->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
// chosen from loser tree
SLocalDataSource *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index];
SLocalDataSource *pOneDataSrc = pLocalMerge->pLocalDataSrc[pTree->pNode[0].index];
tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1,
pOneDataSrc->pMemBuffer->pColumnModel->capacity);
@ -1474,76 +1472,76 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo);
#endif
if (pLocalReducer->discard) {
assert(pLocalReducer->hasUnprocessedRow == false);
if (pLocalMerge->discard) {
assert(pLocalMerge->hasUnprocessedRow == false);
/* current record belongs to the same group of previous record, need to discard it */
if (isSameGroup(pCmd, pLocalReducer, pLocalReducer->discardData->data, tmpBuffer)) {
if (isSameGroup(pCmd, pLocalMerge, pLocalMerge->discardData->data, tmpBuffer)) {
tmpBuffer->num = 0;
pOneDataSrc->rowIdx += 1;
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
// all inputs are exhausted, abort current process
if (isAllSourcesCompleted(pLocalReducer)) {
if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
// data belongs to the same group needs to be discarded
continue;
} else {
pLocalReducer->discard = false;
pLocalReducer->discardData->num = 0;
pLocalMerge->discard = false;
pLocalMerge->discardData->num = 0;
if (saveGroupResultInfo(pSql)) {
return TSDB_CODE_SUCCESS;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
}
if (pLocalReducer->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) {
if (pLocalMerge->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) {
// belong to the group of the previous row, continue process it
doExecuteSecondaryMerge(pCmd, pLocalReducer, false);
doExecuteSecondaryMerge(pCmd, pLocalMerge, false);
// copy to buffer
savePreviousRow(pLocalReducer, tmpBuffer);
savePreviousRow(pLocalMerge, tmpBuffer);
} else {
/*
* current row does not belong to the group of previous row.
* so the processing of previous group is completed.
*/
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalMerge);
bool sameGroup = isSameGroup(pCmd, pLocalMerge, pLocalMerge->prevRowOfInput, tmpBuffer);
tFilePage *pResBuf = pLocalReducer->pResultBuf;
tFilePage *pResBuf = pLocalMerge->pResultBuf;
/*
* if the previous group does NOT generate any result (pResBuf->num == 0),
* continue to process results instead of return results.
*/
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalReducer->resColModel->capacity)) {
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalMerge->resColModel->capacity)) {
// does not belong to the same group
bool notSkipped = genFinalResults(pSql, pLocalReducer, !sameGroup);
bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup);
// this row needs to discard, since it belongs to the group of previous
if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false;
if (pLocalMerge->discard && sameGroup) {
pLocalMerge->hasUnprocessedRow = false;
tmpBuffer->num = 0;
} else { // current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true;
pLocalMerge->hasUnprocessedRow = true;
}
resetOutputBuf(pQueryInfo, pLocalReducer);
resetOutputBuf(pQueryInfo, pLocalMerge);
pOneDataSrc->rowIdx += 1;
// here we do not check the return value
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
if (pRes->numOfRows == 0) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
if (!sameGroup) {
/*
@ -1554,7 +1552,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
} else {
/*
@ -1562,7 +1560,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* We start the process in a new round.
*/
if (sameGroup) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
}
}
@ -1574,24 +1572,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
}
} else { // result buffer is not full
doProcessResultInNextWindow(pSql, numOfRes);
savePreviousRow(pLocalReducer, tmpBuffer);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
} else {
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
savePreviousRow(pLocalReducer, tmpBuffer); // copy the processed row to buffer
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer
}
pOneDataSrc->rowIdx += 1;
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
}
if (pLocalReducer->hasPrevRow) {
finalizeRes(pQueryInfo, pLocalReducer);
if (pLocalMerge->hasPrevRow) {
finalizeRes(pQueryInfo, pLocalMerge);
}
if (pLocalReducer->pResultBuf->num) {
genFinalResults(pSql, pLocalReducer, true);
if (pLocalMerge->pResultBuf->num) {
genFinalResults(pSql, pLocalMerge, true);
}
return TSDB_CODE_SUCCESS;
@ -1599,8 +1597,8 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) {
SSqlRes *pRes = &pObj->res;
if (pRes->pLocalReducer != NULL) {
tscDestroyLocalReducer(pObj);
if (pRes->pLocalMerger != NULL) {
tscDestroyLocalMerger(pObj);
}
pRes->qhandle = 1; // hack to pass the safety check in fetch_row function
@ -1608,17 +1606,17 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->row = 0;
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
pRes->pLocalReducer = (SLocalReducer *)calloc(1, sizeof(SLocalReducer));
pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
/*
* we need one additional byte space
* the sprintf function needs one additional space to put '\0' at the end of string
*/
size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1;
pRes->pLocalReducer->pResultBuf = (tFilePage *)calloc(1, allocSize);
pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize);
pRes->pLocalReducer->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalReducer->pResultBuf->data;
pRes->pLocalMerger->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalMerger->pResultBuf->data;
}
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {

View File

@ -20,6 +20,7 @@
#include "os.h"
#include "ttype.h"
#include "hash.h"
#include "tscUtil.h"
#include "tschemautil.h"
@ -40,46 +41,9 @@ enum {
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
static int32_t tscToInteger(SStrToken *pToken, int64_t *value, char **endPtr) {
if (pToken->n == 0) {
return TK_ILLEGAL;
}
int32_t radix = 10;
if (pToken->type == TK_HEX) {
radix = 16;
} else if (pToken->type == TK_BIN) {
radix = 2;
}
errno = 0;
*value = strtoll(pToken->z, endPtr, radix);
if (**endPtr == 'e' || **endPtr == 'E' || **endPtr == '.') {
errno = 0;
double v = round(strtod(pToken->z, endPtr));
if (v > INT64_MAX || v <= INT64_MIN) {
errno = ERANGE;
} else {
*value = (int64_t)v;
}
}
// not a valid integer number, return error
if (*endPtr - pToken->z != pToken->n) {
return TK_ILLEGAL;
}
return pToken->type;
}
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
if (pToken->n == 0) {
return TK_ILLEGAL;
}
errno = 0;
*value = strtod(pToken->z, endPtr);
*value = strtold(pToken->z, endPtr);
// not a valid integer number, return error
if ((*endPtr - pToken->z) != pToken->n) {
@ -163,86 +127,119 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return TSDB_CODE_SUCCESS;
}
// todo extract the null value check
static bool isNullStr(SStrToken* pToken) {
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
}
int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
int16_t timePrec) {
int64_t iv;
int32_t numType;
char * endptr = NULL;
errno = 0; // clear the previous existed error information
int32_t ret;
char *endptr = NULL;
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
return tscInvalidSQLErrMsg(msg, "invalid numeric data", pToken->z);
}
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: { // bool
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
if (strncmp(pToken->z, "true", pToken->n) == 0) {
*(uint8_t *)payload = TSDB_TRUE;
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
*(uint8_t *)payload = TSDB_FALSE;
} else if (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0) {
*(uint8_t *)payload = TSDB_DATA_BOOL_NULL;
} else {
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10);
*(uint8_t *)payload = (int8_t)((iv == 0) ? TSDB_FALSE : TSDB_TRUE);
} else if (pToken->type == TK_FLOAT) {
double dv = strtod(pToken->z, NULL);
*(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE);
} else if (pToken->type == TK_NULL) {
*(uint8_t *)payload = TSDB_DATA_BOOL_NULL;
if (isNullStr(pToken)) {
*((uint8_t *)payload) = TSDB_DATA_BOOL_NULL;
} else {
return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z);
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
if (strncmp(pToken->z, "true", pToken->n) == 0) {
*(uint8_t *)payload = TSDB_TRUE;
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
*(uint8_t *)payload = TSDB_FALSE;
} else {
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10);
*(uint8_t *)payload = (int8_t)((iv == 0) ? TSDB_FALSE : TSDB_TRUE);
} else if (pToken->type == TK_FLOAT) {
double dv = strtod(pToken->z, NULL);
*(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE);
} else {
return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z);
}
}
break;
}
case TSDB_DATA_TYPE_TINYINT:
if (pToken->type == TK_NULL) {
*((int8_t *)payload) = TSDB_DATA_TINYINT_NULL;
} else if ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)) {
*((int8_t *)payload) = TSDB_DATA_TINYINT_NULL;
if (isNullStr(pToken)) {
*((uint8_t *)payload) = TSDB_DATA_TINYINT_NULL;
} else {
numType = tscToInteger(pToken, &iv, &endptr);
if (TK_ILLEGAL == numType) {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid tinyint data", pToken->z);
} else if (errno == ERANGE || iv > INT8_MAX || iv <= INT8_MIN) {
return tscInvalidSQLErrMsg(msg, "tinyint data overflow", pToken->z);
} else if (!IS_VALID_TINYINT(iv)) {
return tscInvalidSQLErrMsg(msg, "data overflow", pToken->z);
}
*((int8_t *)payload) = (int8_t)iv;
*((uint8_t *)payload) = (uint8_t)iv;
}
break;
case TSDB_DATA_TYPE_UTINYINT:
if (isNullStr(pToken)) {
*((uint8_t *)payload) = TSDB_DATA_UTINYINT_NULL;
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned tinyint data", pToken->z);
} else if (!IS_VALID_UTINYINT(iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned tinyint data overflow", pToken->z);
}
*((uint8_t *)payload) = (uint8_t)iv;
}
break;
case TSDB_DATA_TYPE_SMALLINT:
if (pToken->type == TK_NULL) {
*((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL;
} else if ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)) {
if (isNullStr(pToken)) {
*((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL;
} else {
numType = tscToInteger(pToken, &iv, &endptr);
if (TK_ILLEGAL == numType) {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid smallint data", pToken->z);
} else if (errno == ERANGE || iv > INT16_MAX || iv <= INT16_MIN) {
} else if (!IS_VALID_SMALLINT(iv)) {
return tscInvalidSQLErrMsg(msg, "smallint data overflow", pToken->z);
}
*((int16_t *)payload) = (int16_t)iv;
}
break;
case TSDB_DATA_TYPE_USMALLINT:
if (isNullStr(pToken)) {
*((uint16_t *)payload) = TSDB_DATA_USMALLINT_NULL;
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned smallint data", pToken->z);
} else if (!IS_VALID_USMALLINT(iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned smallint data overflow", pToken->z);
}
*((uint16_t *)payload) = (uint16_t)iv;
}
break;
case TSDB_DATA_TYPE_INT:
if (pToken->type == TK_NULL) {
*((int32_t *)payload) = TSDB_DATA_INT_NULL;
} else if ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)) {
if (isNullStr(pToken)) {
*((int32_t *)payload) = TSDB_DATA_INT_NULL;
} else {
numType = tscToInteger(pToken, &iv, &endptr);
if (TK_ILLEGAL == numType) {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid int data", pToken->z);
} else if (errno == ERANGE || iv > INT32_MAX || iv <= INT32_MIN) {
} else if (!IS_VALID_INT(iv)) {
return tscInvalidSQLErrMsg(msg, "int data overflow", pToken->z);
}
@ -251,17 +248,30 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
break;
case TSDB_DATA_TYPE_UINT:
if (isNullStr(pToken)) {
*((uint32_t *)payload) = TSDB_DATA_UINT_NULL;
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned int data", pToken->z);
} else if (!IS_VALID_UINT(iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned int data overflow", pToken->z);
}
*((uint32_t *)payload) = (uint32_t)iv;
}
break;
case TSDB_DATA_TYPE_BIGINT:
if (pToken->type == TK_NULL) {
*((int64_t *)payload) = TSDB_DATA_BIGINT_NULL;
} else if ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)) {
if (isNullStr(pToken)) {
*((int64_t *)payload) = TSDB_DATA_BIGINT_NULL;
} else {
numType = tscToInteger(pToken, &iv, &endptr);
if (TK_ILLEGAL == numType) {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z);
} else if (errno == ERANGE || iv == INT64_MIN) {
} else if (!IS_VALID_BIGINT(iv)) {
return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z);
}
@ -269,11 +279,23 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
}
break;
case TSDB_DATA_TYPE_UBIGINT:
if (isNullStr(pToken)) {
*((uint64_t *)payload) = TSDB_DATA_UBIGINT_NULL;
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned bigint data", pToken->z);
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned bigint data overflow", pToken->z);
}
*((uint64_t *)payload) = iv;
}
break;
case TSDB_DATA_TYPE_FLOAT:
if (pToken->type == TK_NULL) {
*((int32_t *)payload) = TSDB_DATA_FLOAT_NULL;
} else if ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)) {
if (isNullStr(pToken)) {
*((int32_t *)payload) = TSDB_DATA_FLOAT_NULL;
} else {
double dv;
@ -281,24 +303,16 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
}
float fv = (float)dv;
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || (fv > FLT_MAX || fv < -FLT_MAX)) {
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || isnan(dv)) {
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
}
if (isinf(fv) || isnan(fv)) {
*((int32_t *)payload) = TSDB_DATA_FLOAT_NULL;
}
*((float *)payload) = fv;
*((float *)payload) = (float)dv;
}
break;
case TSDB_DATA_TYPE_DOUBLE:
if (pToken->type == TK_NULL) {
*((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL;
} else if ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)) {
if (isNullStr(pToken)) {
*((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL;
} else {
double dv;
@ -306,15 +320,11 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || (dv > DBL_MAX || dv < -DBL_MAX)) {
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z);
}
if (isinf(dv) || isnan(dv)) {
*((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL;
} else {
*((double *)payload) = dv;
}
*((double *)payload) = dv;
}
break;
@ -337,7 +347,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
setVardataNull(payload, TSDB_DATA_TYPE_NCHAR);
} else {
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
size_t output = 0;
int32_t output = 0;
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno));
@ -693,7 +703,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -752,6 +762,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
char *sql = *sqlstr;
pSql->cmd.autoCreated = false;
// get the token of specified table
index = 0;
tableToken = tStrGetToken(sql, &index, false, 0, NULL);
@ -801,26 +813,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
tscAddEmptyMetaInfo(pQueryInfo);
}
STableMetaInfo *pSTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
code = tscSetTableFullName(pSTableMeterMetaInfo, &sToken, pSql);
STableMetaInfo *pSTableMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
code = tscSetTableFullName(pSTableMetaInfo, &sToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
tstrncpy(pCmd->tagData.name, pSTableMeterMetaInfo->name, sizeof(pCmd->tagData.name));
tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name);
pCmd->tagData.dataLen = 0;
code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
code = tscGetTableMeta(pSql, pSTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMeterMetaInfo)) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMeterMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMeterMetaInfo->pTableMeta);
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
@ -828,7 +840,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SParsedDataColInfo spd = {0};
uint8_t numOfTags = tscGetNumOfTags(pSTableMeterMetaInfo->pTableMeta);
uint8_t numOfTags = tscGetNumOfTags(pSTableMetaInfo->pTableMeta);
spd.numOfCols = numOfTags;
// if specify some tags column
@ -893,6 +905,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.type != TK_LP) {
return tscInvalidSQLErrMsg(pCmd->payload, NULL, sToken.z);
}
SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -935,11 +954,15 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
tdDestroyKVRowBuilder(&kvRowBuilder);
if (row == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL);
}
tdSortKVRowByColIdx(row);
pCmd->tagData.dataLen = kvRowLen(row);
if (pCmd->tagData.dataLen <= 0){
return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL);
}
char* pTag = realloc(pCmd->tagData.data, pCmd->tagData.dataLen);
if (pTag == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1020,11 +1043,7 @@ static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
}
/**
* usage: insert into table1 values() () table2 values()()
*
* @param str
* @param acct
* @param db
* parse insert sql
* @param pSql
* @return
*/
@ -1265,7 +1284,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) {
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
}
@ -1320,15 +1339,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
}
if (tscIsInsertData(pSql->sqlstr)) {
/*
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
* the error handle callback function can rightfully restore the user-defined callback function (fp).
*/
if (initial && (pSql->cmd.insertType != TSDB_QUERY_TYPE_STMT_INSERT)) {
pSql->fetchFp = pSql->fp;
pSql->fp = (void(*)())tscHandleMultivnodeInsert;
}
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
return ret;
}
@ -1336,10 +1346,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
// make a backup as tsParseInsertSql may modify the string
char* sqlstr = strdup(pSql->sqlstr);
ret = tsParseInsertSql(pSql);
if (sqlstr == NULL || pSql->parseRetry >= 1 || ret != TSDB_CODE_TSC_INVALID_SQL) {
if ((sqlstr == NULL) || (pSql->parseRetry >= 1) ||
(ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) {
free(sqlstr);
} else {
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, true);
free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
pSql->parseRetry++;
@ -1351,7 +1362,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, true);
pSql->parseRetry++;
ret = tscToSQLCmd(pSql, &SQLInfo);
}
@ -1382,7 +1393,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
}
if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) {
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
return code;
}
@ -1399,39 +1410,38 @@ typedef struct SImportFileSupport {
FILE *fp;
} SImportFileSupport;
static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRows) {
assert(param != NULL && tres != NULL);
char * tokenBuf = NULL;
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int32_t count = 0;
int32_t maxRows = 0;
FILE * fp = NULL;
SSqlObj *pSql = tres;
SSqlCmd *pCmd = &pSql->cmd;
SImportFileSupport *pSupporter = (SImportFileSupport *) param;
SImportFileSupport *pSupporter = (SImportFileSupport *)param;
SSqlObj *pParentSql = pSupporter->pSql;
FILE *fp = pSupporter->fp;
fp = pSupporter->fp;
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // handle error
assert(taos_errno(pSql) == code);
int32_t code = pSql->res.code;
do {
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t errc = fseek(fp, 0, SEEK_SET);
if (errc < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
} else {
break;
}
}
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
return;
} while (0);
// retry parse data from file and import data from the begining again
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t ret = fseek(fp, 0, SEEK_SET);
if (ret < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
code = TAOS_SYSTEM_ERROR(errno);
goto _error;
}
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
// accumulate the total submit records
@ -1445,28 +1455,32 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int32_t count = 0;
int32_t maxRows = 0;
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
if (pCmd->pTableBlockHashList == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
}
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) {
// return ret;
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows);
char *tokenBuf = calloc(1, 4096);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
@ -1494,30 +1508,42 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
}
tfree(tokenBuf);
free(line);
tfree(line);
if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS) {
if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code == TSDB_CODE_SUCCESS) {
return;
} else {
goto _error;
}
} else {
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
int32_t v = (code != TSDB_CODE_SUCCESS) ? code : (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
return;
}
} else {
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
int32_t v = (pParentSql->res.code != TSDB_CODE_SUCCESS) ? pParentSql->res.code : (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
}
_error:
tfree(tokenBuf);
tfree(line);
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
tscAsyncResultOnError(pParentSql);
}
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
void tscImportDataFromFile(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
if (pCmd->command != TSDB_SQL_INSERT) {
return;
@ -1529,19 +1555,19 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL);
pCmd->count = 1;
FILE *fp = fopen(pCmd->payload, "r");
FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter);
taos_free_result(pNew);
tscAsyncResultOnError(pSql);
return;
}
pSupporter->pSql = pSql;
pSupporter->fp = fp;
pSupporter->fp = fp;
parseFileSendDataBlock(pSupporter, pNew, 0);
parseFileSendDataBlock(pSupporter, pNew, TSDB_CODE_SUCCESS);
}

View File

@ -84,35 +84,35 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
var->nLen = 0;
if (tb->is_null != NULL && *(tb->is_null)) {
var->nType = TSDB_DATA_TYPE_NULL;
var->i64Key = 0;
var->i64 = 0;
continue;
}
var->nType = tb->buffer_type;
switch (tb->buffer_type) {
case TSDB_DATA_TYPE_NULL:
var->i64Key = 0;
var->i64 = 0;
break;
case TSDB_DATA_TYPE_BOOL:
var->i64Key = (*(int8_t*)tb->buffer) ? 1 : 0;
var->i64 = (*(int8_t*)tb->buffer) ? 1 : 0;
break;
case TSDB_DATA_TYPE_TINYINT:
var->i64Key = *(int8_t*)tb->buffer;
var->i64 = *(int8_t*)tb->buffer;
break;
case TSDB_DATA_TYPE_SMALLINT:
var->i64Key = *(int16_t*)tb->buffer;
var->i64 = *(int16_t*)tb->buffer;
break;
case TSDB_DATA_TYPE_INT:
var->i64Key = *(int32_t*)tb->buffer;
var->i64 = *(int32_t*)tb->buffer;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
var->i64Key = *(int64_t*)tb->buffer;
var->i64 = *(int64_t*)tb->buffer;
break;
case TSDB_DATA_TYPE_FLOAT:
@ -219,7 +219,7 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_BIGINT:
taosStringBuilderAppendInteger(&sb, var->i64Key);
taosStringBuilderAppendInteger(&sb, var->i64);
break;
case TSDB_DATA_TYPE_FLOAT:
@ -255,7 +255,6 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
@ -616,7 +615,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
case TSDB_DATA_TYPE_NCHAR: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_NCHAR: {
size_t output = 0;
int32_t output = 0;
if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -678,7 +677,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
return TSDB_CODE_SUCCESS;
case TSDB_DATA_TYPE_NCHAR: {
size_t output = 0;
int32_t output = 0;
if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -697,71 +696,52 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
int32_t alloced = 1, binded = 0;
if (pCmd->batchSize > 0) {
alloced = (pCmd->batchSize + 1) / 2;
binded = pCmd->batchSize / 2;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
}
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
for (int32_t i = 0; i < size; ++i) {
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i);
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk);
uint32_t dataSize = totalDataSize / alloced;
assert(dataSize * alloced == totalDataSize);
STableDataBlocks* pBlock = NULL;
if (alloced == binded) {
totalDataSize += dataSize + sizeof(SSubmitBlk);
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pBlock->pData = (char*)tmp;
pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
}
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
if (ret != 0) {
// todo handle error
}
uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
char* data = pBlock->pData + sizeof(SSubmitBlk) + dataSize * binded;
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = pBlock->params + j;
int code = doBindParam(data, param, bind + param->idx);
if (code != TSDB_CODE_SUCCESS) {
tscDebug("param %d: type mismatch or invalid", param->idx);
return code;
}
pBlock->pData = (char*)tmp;
pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
}
char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * pCmd->batchSize;
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
int code = doBindParam(data, param, &bind[param->idx]);
if (code != TSDB_CODE_SUCCESS) {
tscDebug("param %d: type mismatch or invalid", param->idx);
return code;
}
}
// actual work of all data blocks is done, update block size and numOfRows.
// note we don't do this block by block during the binding process, because
// we cannot recover if something goes wrong.
pCmd->batchSize = binded * 2 + 1;
if (binded < alloced) {
return TSDB_CODE_SUCCESS;
}
size_t total = taosArrayGetSize(pCmd->pDataBlocks);
for (int32_t i = 0; i < total; ++i) {
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i);
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk);
pBlock->size += totalDataSize / alloced;
SSubmitBlk* pSubmit = (SSubmitBlk*)pBlock->pData;
pSubmit->numOfRows += pSubmit->numOfRows / alloced;
}
return TSDB_CODE_SUCCESS;
}
static int insertStmtAddBatch(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
if ((pCmd->batchSize % 2) == 1) {
++pCmd->batchSize;
}
++pCmd->batchSize;
return TSDB_CODE_SUCCESS;
}
@ -793,50 +773,66 @@ static int insertStmtExecute(STscStmt* stmt) {
if (pCmd->batchSize == 0) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
if ((pCmd->batchSize % 2) == 1) {
++pCmd->batchSize;
}
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
assert(pCmd->numOfClause == 1);
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) {
// merge according to vgid
int code = tscMergeTableDataBlocks(stmt->pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
STableDataBlocks *pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// set the next sent data vnode index in data block arraylist
pTableMetaInfo->vgroupIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
return TSDB_CODE_SUCCESS;
}
SSqlObj *pSql = stmt->pSql;
SSqlRes *pRes = &pSql->res;
pRes->numOfRows = 0;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
}
STableDataBlocks* pBlock = NULL;
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
assert(ret == 0);
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
pBlk->numOfRows = pCmd->batchSize;
pBlk->dataLen = 0;
pBlk->uid = pTableMeta->id.uid;
pBlk->tid = pTableMeta->id.tid;
int code = tscMergeTableDataBlocks(stmt->pSql, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
STableDataBlocks* pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
SSqlObj* pSql = stmt->pSql;
SSqlRes* pRes = &pSql->res;
pRes->numOfRows = 0;
pRes->numOfTotal = 0;
pRes->numOfClauseTotal = 0;
pRes->qhandle = 0;
pSql->cmd.insertType = 0;
pSql->fetchFp = waitForQueryRsp;
pSql->fp = (void(*)())tscHandleMultivnodeInsert;
tscDoQuery(pSql);
tscProcessSql(pSql);
// wait for the callback function to post the semaphore
tsem_wait(&pSql->rspSem);
return pSql->res.code;
// data block reset
pCmd->batchSize = 0;
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
tfree(pCmd->pTableNameList[i]);
}
}
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
return pSql->res.code;
}
////////////////////////////////////////////////////////////////////////////////
@ -867,11 +863,11 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
}
tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
pStmt->pSql = pSql;
pStmt->pSql = pSql;
return pStmt;
}
@ -890,7 +886,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
SSqlRes *pRes = &pSql->res;
pSql->param = (void*) pSql;
pSql->fp = waitForQueryRsp;
pSql->cmd.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
pSql->fetchFp = waitForQueryRsp;
pCmd->insertType = TSDB_QUERY_TYPE_STMT_INSERT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
tscError("%p failed to malloc payload buffer", pSql);
@ -956,8 +954,9 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt->isInsert) {
return insertStmtBindParam(pStmt, bind);
} else {
return normalStmtBindParam(pStmt, bind);
}
return normalStmtBindParam(pStmt, bind);
}
int taos_stmt_add_batch(TAOS_STMT* stmt) {
@ -981,7 +980,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt->isInsert) {
ret = insertStmtExecute(pStmt);
} else {
} else { // normal stmt query
char* sql = normalStmtBuildSql(pStmt);
if (sql == NULL) {
ret = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -995,6 +994,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
free(sql);
}
}
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -66,50 +66,6 @@ STableComInfo tscGetTableInfo(const STableMeta* pTableMeta) {
return pTableMeta->tableInfo;
}
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
/* first column must be the timestamp, which is a primary key */
if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) {
return false;
}
/* type is valid, length is valid */
int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
// 1. valid types
if (pSchema[i].type > TSDB_DATA_TYPE_TIMESTAMP || pSchema[i].type < TSDB_DATA_TYPE_BOOL) {
return false;
}
// 2. valid length for each type
if (pSchema[i].type == TSDB_DATA_TYPE_TIMESTAMP) {
if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) {
return false;
}
} else {
if (pSchema[i].bytes != tDataTypeDesc[pSchema[i].type].nSize) {
return false;
}
}
// 3. valid column names
for (int32_t j = i + 1; j < numOfCols; ++j) {
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
return false;
}
}
rowLen += pSchema[i].bytes;
}
// valid total length
return (rowLen <= TSDB_MAX_BYTES_PER_ROW);
}
SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) {
assert(pTableMeta != NULL);
@ -150,6 +106,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);

View File

@ -25,8 +25,6 @@
#include "ttimer.h"
#include "tlockfree.h"
///SRpcCorEpSet tscMgmtEpSet;
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql);
@ -315,7 +313,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
return;
}
if (pEpSet) { // todo update this
if (pEpSet) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
@ -345,7 +343,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
} else {
// wait for a little bit moment and then retry, todo do not sleep in rpc callback thread
// wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queueu to process
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
int32_t duration = getWaitingTimeInterval(pSql->retry);
taosMsleep(duration);
@ -427,7 +426,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
taosRemoveRef(tscObjRef, pSql->self);
tscDebug("%p sqlObj is automatically freed", pSql);
tscDebug("%p sqlObj is automatically freed", pSql);
}
rpcFreeCont(rpcMsg->pCont);
@ -466,16 +465,20 @@ int doProcessSql(SSqlObj *pSql) {
}
int tscProcessSql(SSqlObj *pSql) {
char *name = NULL;
char name[TSDB_TABLE_FNAME_LEN] = {0};
SSqlCmd *pCmd = &pSql->cmd;
uint32_t type = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = NULL;
uint32_t type = 0;
if (pQueryInfo != NULL) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
name = (pTableMetaInfo != NULL)? pTableMetaInfo->name:NULL;
if (pTableMetaInfo != NULL) {
tNameExtractFullName(&pTableMetaInfo->name, name);
}
type = pQueryInfo->type;
// while numOfTables equals to 0, it must be Heartbeat
@ -488,9 +491,9 @@ int tscProcessSql(SSqlObj *pSql) {
pSql->res.code = TSDB_CODE_TSC_APP_ERROR;
return pSql->res.code;
}
} else if (pCmd->command < TSDB_SQL_LOCAL) {
} else if (pCmd->command >= TSDB_SQL_LOCAL) {
//pSql->epSet = tscMgmtEpSet;
} else { // local handler
// } else { // local handler
return (*tscProcessMsgRsp[pCmd->command])(pSql);
}
@ -674,10 +677,11 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
pMsg += sizeof(STableIdInfo);
}
}
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
pTableMeta->id.tid, pTableMeta->id.uid);
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), n, pTableMeta->id.tid, pTableMeta->id.uid);
return pMsg;
}
@ -759,12 +763,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || pColSchema->type < TSDB_DATA_TYPE_BOOL ||
pColSchema->type > TSDB_DATA_TYPE_NCHAR) {
tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pColSchema->name);
if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || !isValidDataType(pColSchema->type)) {
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -830,7 +835,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
pMsg += pExpr->param[j].nLen;
} else {
pSqlFuncExpr->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key);
pSqlFuncExpr->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64);
}
}
@ -870,7 +875,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
pMsg += pExpr->param[j].nLen;
} else {
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key);
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64);
}
}
@ -948,10 +953,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) ||
(pColSchema->type < TSDB_DATA_TYPE_BOOL || pColSchema->type > TSDB_DATA_TYPE_NCHAR)) {
(!isValidDataType(pColSchema->type))) {
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscError("%p tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, total, numOfTagColumns,
pCol->colIndex.columnIndex, pColSchema->name);
pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, total, numOfTagColumns, pCol->colIndex.columnIndex, pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -1028,7 +1035,8 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pCmd->numOfClause == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pCreateDbMsg->db, pTableMetaInfo->name, sizeof(pCreateDbMsg->db));
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateDbMsg->db);
assert(code == TSDB_CODE_SUCCESS);
return TSDB_CODE_SUCCESS;
}
@ -1042,7 +1050,9 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SCreateDnodeMsg *pCreate = (SCreateDnodeMsg *)pCmd->payload;
strncpy(pCreate->ep, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n);
SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0);
strncpy(pCreate->ep, t0->z, t0->n);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE;
@ -1059,13 +1069,13 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateAcctMsg *pAlterMsg = (SCreateAcctMsg *)pCmd->payload;
SStrToken *pName = &pInfo->pDCLInfo->user.user;
SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
SStrToken *pName = &pInfo->pMiscInfo->user.user;
SStrToken *pPwd = &pInfo->pMiscInfo->user.passwd;
strncpy(pAlterMsg->user, pName->z, pName->n);
strncpy(pAlterMsg->pass, pPwd->z, pPwd->n);
SCreateAcctSQL *pAcctOpt = &pInfo->pDCLInfo->acctOpt;
SCreateAcctInfo *pAcctOpt = &pInfo->pMiscInfo->acctOpt;
pAlterMsg->cfg.maxUsers = htonl(pAcctOpt->maxUsers);
pAlterMsg->cfg.maxDbs = htonl(pAcctOpt->maxDbs);
@ -1105,7 +1115,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateUserMsg *pAlterMsg = (SCreateUserMsg *)pCmd->payload;
SUserInfo *pUser = &pInfo->pDCLInfo->user;
SUserInfo *pUser = &pInfo->pMiscInfo->user;
strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n);
pAlterMsg->flag = (int8_t)pUser->type;
@ -1145,8 +1155,11 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropDbMsg->db, pTableMetaInfo->name, sizeof(pDropDbMsg->db));
pDropDbMsg->ignoreNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pDropDbMsg->db);
assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->name.type == TSDB_DB_NAME_T);
pDropDbMsg->ignoreNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DB;
return TSDB_CODE_SUCCESS;
@ -1163,15 +1176,19 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pDropTableMsg->tableId, pTableMetaInfo->name);
pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
tNameExtractFullName(&pTableMetaInfo->name, pDropTableMsg->name);
pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char dnodeEp[TSDB_EP_LEN] = {0};
tstrncpy(dnodeEp, pCmd->payload, TSDB_EP_LEN);
pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
@ -1179,43 +1196,28 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep));
tstrncpy(pDrop->ep, dnodeEp, tListLen(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char user[TSDB_USER_LEN] = {0};
tstrncpy(user, pCmd->payload, TSDB_USER_LEN);
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
SDropUserMsg *pDropMsg = (SDropUserMsg *)pCmd->payload;
tstrncpy(pDropMsg->user, user, tListLen(user));
return TSDB_CODE_SUCCESS;
}
@ -1231,7 +1233,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pUseDbMsg->db, pTableMetaInfo->name);
tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB;
return TSDB_CODE_SUCCESS;
@ -1251,14 +1253,16 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
size_t nameLen = strlen(pTableMetaInfo->name);
if (nameLen > 0) {
tstrncpy(pShowMsg->db, pTableMetaInfo->name, sizeof(pShowMsg->db)); // prefix is set here
if (tNameIsEmpty(&pTableMetaInfo->name)) {
pthread_mutex_lock(&pObj->mutex);
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
pthread_mutex_unlock(&pObj->mutex);
} else {
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt;
SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
@ -1317,12 +1321,12 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int msgLen = 0;
SSchema * pSchema;
int size = 0;
int msgLen = 0;
int size = 0;
SSchema *pSchema;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// Reallocate the payload size
@ -1353,11 +1357,10 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(SCreateTableMsg);
SCreatedTableInfo* p = taosArrayGet(list, i);
strcpy(pCreate->tableId, p->fullname);
strcpy(pCreate->tableName, p->fullname);
pCreate->igExists = (p->igExist)? 1 : 0;
// use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(p->fullname, pCreate->db);
pMsg = serializeTagData(&p->tagdata, pMsg);
int32_t len = (int32_t)(pMsg - (char*) pCreate);
@ -1366,10 +1369,8 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} else { // create (super) table
pCreateTableMsg->numOfTables = htonl(1); // only one table will be created
strcpy(pCreateMsg->tableId, pTableMetaInfo->name);
// use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateMsg->tableName);
assert(code == 0);
SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo;
@ -1427,7 +1428,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo;
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
@ -1435,9 +1436,8 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name);
tNameExtractFullName(&pTableMetaInfo->name, pAlterTableMsg->tableFname);
pAlterTableMsg->type = htons(pAlterInfo->type);
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
@ -1492,7 +1492,7 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db));
tNameExtractFullName(&pTableMetaInfo->name, pAlterDbMsg->db);
return TSDB_CODE_SUCCESS;
}
@ -1617,9 +1617,14 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// TODO refactor full_name
char *db; // ugly code to move the space
pthread_mutex_lock(&pObj->mutex);
db = strstr(pObj->db, TS_PATH_DELIMITER);
db = (db == NULL) ? pObj->db : db + 1;
tstrncpy(pConnect->db, db, sizeof(pConnect->db));
pthread_mutex_unlock(&pObj->mutex);
tstrncpy(pConnect->clientVersion, version, sizeof(pConnect->clientVersion));
tstrncpy(pConnect->msgVersion, "", sizeof(pConnect->msgVersion));
@ -1630,13 +1635,17 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
strcpy(pInfoMsg->tableId, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
@ -1693,33 +1702,6 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return 0;
}
//static UNUSED_FUNC int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
//// const int32_t defaultSize =
//// minMsgSize() + sizeof(SSuperTableMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS;
//// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
////
//// int32_t n = 0;
//// size_t size = taosArrayGetSize(pQueryInfo->tagCond.pCond);
//// for (int32_t i = 0; i < size; ++i) {
//// assert(0);
////// n += strlen(pQueryInfo->tagCond.cond[i].cond);
//// }
////
//// int32_t tagLen = n * TSDB_NCHAR_SIZE;
//// if (pQueryInfo->tagCond.tbnameCond.cond != NULL) {
//// tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE;
//// }
////
//// int32_t joinCondLen = (TSDB_TABLE_FNAME_LEN + sizeof(int16_t)) * 2;
//// int32_t elemSize = sizeof(SSuperTableMetaElemMsg) * pQueryInfo->numOfTables;
////
//// int32_t colSize = pQueryInfo->groupbyExpr.numOfGroupCols*sizeof(SColIndex);
////
//// int32_t len = tagLen + joinCondLen + elemSize + colSize + defaultSize;
////
//// return MAX(len, TSDB_DEFAULT_PAYLOAD_SIZE);
//}
int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
@ -1732,9 +1714,10 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
size_t size = sizeof(pTableMetaInfo->name);
tstrncpy(pMsg, pTableMetaInfo->name, size);
pMsg += size;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pMsg);
assert(code == TSDB_CODE_SUCCESS);
pMsg += TSDB_TABLE_FNAME_LEN;
}
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP;
@ -1805,7 +1788,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
(pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
pMetaMsg->tid, pMetaMsg->tableId);
pMetaMsg->tid, pMetaMsg->tableFname);
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -1834,15 +1817,18 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(i == 0);
}
assert(pSchema->type >= TSDB_DATA_TYPE_BOOL && pSchema->type <= TSDB_DATA_TYPE_NCHAR);
pSchema++;
}
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
assert(pTableMetaInfo->pTableMeta == NULL);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// check if super table hashmap or not
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
@ -1857,11 +1843,19 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
tfree(pSupTableMeta);
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), cMeta, sizeof(CChildTableMeta));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashPut(tscTableMetaInfo, name, strlen(name), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), pTableMeta, s);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashPut(tscTableMetaInfo, name, strlen(name), pTableMeta, s);
}
// update the vgroupInfo if needed
@ -1880,9 +1874,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
}
}
tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, pTableMetaInfo->name);
tscDebug("%p recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid,
tNameGetTableName(&pTableMetaInfo->name));
free(pTableMeta);
return TSDB_CODE_SUCCESS;
}
@ -1981,9 +1976,6 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
//
// rsp += tagLen;
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
//
// pMeta->index = 0;
// (void)taosCachePut(tscMetaCache, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
// }
}
@ -1996,16 +1988,20 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
}
int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
// master sqlObj locates in param
SSqlObj* parent = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pSql->param);
if(parent == NULL) {
return pSql->res.code;
}
assert(parent->signature == parent && (int64_t)pSql->param == parent->self);
SSqlRes* pRes = &pSql->res;
// NOTE: the order of several table must be preserved.
SSTableVgroupRspMsg *pStableVgroup = (SSTableVgroupRspMsg *)pRes->pRsp;
pStableVgroup->numOfTables = htonl(pStableVgroup->numOfTables);
char *pMsg = pRes->pRsp + sizeof(SSTableVgroupRspMsg);
// master sqlObj locates in param
SSqlObj* parent = pSql->param;
assert(parent != NULL);
SSqlCmd* pCmd = &parent->cmd;
for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) {
@ -2039,6 +2035,8 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pMsg += size;
}
taosReleaseRef(tscObjRef, parent->self);
return pSql->res.code;
}
@ -2100,7 +2098,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
return 0;
}
static void createHBObj(STscObj* pObj) {
static void createHbObj(STscObj* pObj) {
if (pObj->hbrid != 0) {
return;
}
@ -2144,10 +2142,13 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
SConnectRsp *pConnect = (SConnectRsp *)pRes->pRsp;
tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response
pthread_mutex_lock(&pObj->mutex);
int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
assert(len <= sizeof(pObj->db));
tstrncpy(pObj->db, temp, sizeof(pObj->db));
pthread_mutex_unlock(&pObj->mutex);
if (pConnect->epSet.numOfEps > 0) {
tscEpSetHtons(&pConnect->epSet);
@ -2163,7 +2164,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
pObj->superAuth = pConnect->superAuth;
pObj->connId = htonl(pConnect->connId);
createHBObj(pObj);
createHbObj(pObj);
//launch a timer to send heartbeat to maintain the connection and send status to mnode
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, (void *)pObj->rid, tscTmr, &pObj->pTimer);
@ -2174,13 +2175,18 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
tstrncpy(pObj->db, pTableMetaInfo->name, sizeof(pObj->db));
return 0;
pthread_mutex_lock(&pObj->mutex);
int ret = tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
pthread_mutex_unlock(&pObj->mutex);
return ret;
}
int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
//TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
taosHashEmpty(tscTableMetaInfo);
return 0;
}
@ -2189,18 +2195,22 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
//The cached tableMeta is expired in this case, so clean it in hash table
taosHashRemove(tscTableMetaInfo, pTableMetaInfo->name, strnlen(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
(int32_t) taosHashGetSize(tscTableMetaInfo));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
assert(pTableMetaInfo->pTableMeta == NULL);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, name, (int32_t) taosHashGetSize(tscTableMetaInfo));
pTableMetaInfo->pTableMeta = NULL;
return 0;
}
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
tscDebug("%p remove tableMeta in hashMap after alter-table: %s", pSql, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
@ -2239,6 +2249,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
assert(pRes->rspLen >= sizeof(SRetrieveTableRsp));
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
if (pRetrieve == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -2319,7 +2331,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
STableMetaInfo *pNewMeterMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name));
tNameAssign(&pNewMeterMetaInfo->name, &pTableMetaInfo->name);
if (pSql->cmd.autoCreated) {
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
@ -2332,11 +2344,15 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
tscDebug("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated);
pNew->fp = tscTableMetaCallBack;
pNew->param = pSql;
registerSqlObj(pNew);
pNew->fp = tscTableMetaCallBack;
pNew->param = (void *)pSql->self;
tscDebug("%p metaRid from %" PRId64 " to %" PRId64 , pSql, pSql->metaRid, pNew->self);
pSql->metaRid = pNew->self;
int32_t code = tscProcessSql(pNew);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated
@ -2346,22 +2362,27 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
}
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(strlen(pTableMetaInfo->name) != 0);
assert(tIsValidName(&pTableMetaInfo->name));
tfree(pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
pTableMetaInfo->pTableMeta = calloc(1, size);
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
int32_t len = (int32_t) strlen(pTableMetaInfo->name);
taosHashGetClone(tscTableMetaInfo, pTableMetaInfo->name, len, NULL, pTableMetaInfo->pTableMeta, -1);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name);
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, pTableMetaInfo->name);
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2389,16 +2410,24 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
const char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
if (code != TSDB_CODE_SUCCESS) {
tscError("%p failed to generate the table full name", pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMeta) {
tscDebug("%p update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql, name,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
size_t len = strlen(name);
taosHashRemove(tscTableMetaInfo, name, len);
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2439,8 +2468,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta* pTableMeta = tscTableMetaClone(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, &pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
}
if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
@ -2451,10 +2480,15 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
pNewQueryInfo->numOfTables = pQueryInfo->numOfTables;
registerSqlObj(pNew);
tscDebug("%p svgroupRid from %" PRId64 " to %" PRId64 , pSql, pSql->svgroupRid, pNew->self);
pSql->svgroupRid = pNew->self;
tscDebug("%p new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql, pNew, pNewQueryInfo->numOfTables);
pNew->fp = tscTableMetaCallBack;
pNew->param = pSql;
pNew->param = (void *)pSql->self;
code = tscProcessSql(pNew);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS;
@ -2475,8 +2509,8 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg;
tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg;
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;

View File

@ -15,7 +15,7 @@
#include "hash.h"
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "tkey.h"
#include "tcache.h"
#include "tnote.h"
@ -47,7 +47,7 @@ static bool validUserName(const char* user) {
}
static bool validPassword(const char* passwd) {
return validImpl(passwd, TSDB_PASSWORD_LEN - 1);
return validImpl(passwd, TSDB_KEY_LEN - 1);
}
static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db,
@ -226,11 +226,11 @@ TAOS *taos_connect_c(const char *ip, uint8_t ipLen, const char *user, uint8_t us
uint8_t passLen, const char *db, uint8_t dbLen, uint16_t port) {
char ipBuf[TSDB_EP_LEN] = {0};
char userBuf[TSDB_USER_LEN] = {0};
char passBuf[TSDB_PASSWORD_LEN] = {0};
char passBuf[TSDB_KEY_LEN] = {0};
char dbBuf[TSDB_DB_NAME_LEN] = {0};
strncpy(ipBuf, ip, MIN(TSDB_EP_LEN - 1, ipLen));
strncpy(userBuf, user, MIN(TSDB_USER_LEN - 1, userLen));
strncpy(passBuf, pass, MIN(TSDB_PASSWORD_LEN - 1, passLen));
strncpy(passBuf, pass, MIN(TSDB_KEY_LEN - 1, passLen));
strncpy(dbBuf, db, MIN(TSDB_DB_NAME_LEN - 1, dbLen));
return taos_connect(ipBuf, userBuf, passBuf, dbBuf, port);
}
@ -282,6 +282,10 @@ void taos_close(TAOS *taos) {
tscDebug("%p HB is freed", pHb);
taosReleaseRef(tscObjRef, pHb->self);
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pHb->rspSem, 0, 0);
#endif // __APPLE__
taos_free_result(pHb);
}
}
@ -315,7 +319,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, int64_t*
return NULL;
}
nPrintTsc(sqlstr);
nPrintTsc("%s", sqlstr);
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
if (pSql == NULL) {
@ -682,6 +686,8 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
// set the master sqlObj flag to cancel query
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
tscLockByThread(&pSql->squeryLock);
for (int i = 0; i < pSql->subState.numOfSub; ++i) {
// NOTE: pSub may have been released already here
SSqlObj *pSub = pSql->pSubs[i];
@ -701,6 +707,12 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
taosReleaseRef(tscObjRef, pSubObj->self);
}
if (pSql->subState.numOfSub <= 0) {
tscAsyncResultOnError(pSql);
}
tscUnlockByThread(&pSql->squeryLock);
tscDebug("%p super table query cancelled", pSql);
}
@ -761,6 +773,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int len = 0;
for (int i = 0; i < num_fields; ++i) {
if (i > 0) {
str[len++] = ' ';
@ -776,18 +789,34 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
len += sprintf(str + len, "%d", *((int8_t *)row[i]));
break;
case TSDB_DATA_TYPE_UTINYINT:
len += sprintf(str + len, "%u", *((uint8_t *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
len += sprintf(str + len, "%d", *((int16_t *)row[i]));
break;
case TSDB_DATA_TYPE_USMALLINT:
len += sprintf(str + len, "%u", *((uint16_t *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
len += sprintf(str + len, "%d", *((int32_t *)row[i]));
break;
case TSDB_DATA_TYPE_UINT:
len += sprintf(str + len, "%u", *((uint32_t *)row[i]));
break;
case TSDB_DATA_TYPE_BIGINT:
len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_UBIGINT:
len += sprintf(str + len, "%" PRIu64, *((uint64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
@ -802,13 +831,15 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
size_t xlen = 0;
for (xlen = 0; xlen < fields[i].bytes - VARSTR_HEADER_SIZE; xlen++) {
char c = ((char *)row[i])[xlen];
if (c == 0) break;
str[len++] = c;
int32_t charLen = varDataLen((char*)row[i] - VARSTR_HEADER_SIZE);
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
assert(charLen <= fields[i].bytes);
} else {
assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE);
}
str[len] = 0;
memcpy(str + len, row[i], charLen);
len += charLen;
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
@ -897,7 +928,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
tscResetSqlCmdObj(&pSql->cmd);
tscResetSqlCmd(&pSql->cmd, false);
SSqlCmd *pCmd = &pSql->cmd;
@ -950,13 +981,14 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
return code;
}
if (++pCmd->count > TSDB_MULTI_METERMETA_MAX_NUM) {
if (++pCmd->count > TSDB_MULTI_TABLEMETA_MAX_NUM) {
code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
sprintf(pCmd->payload, "tables over the max number");
return code;
}
if (payloadLen + strlen(pTableMetaInfo->name) + 128 >= pCmd->allocSize) {
int32_t xlen = tNameLen(&pTableMetaInfo->name);
if (payloadLen + xlen + 128 >= pCmd->allocSize) {
char *pNewMem = realloc(pCmd->payload, pCmd->allocSize + tblListLen);
if (pNewMem == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -969,7 +1001,9 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
pMsg = pCmd->payload;
}
payloadLen += sprintf(pMsg + payloadLen, "%s,", pTableMetaInfo->name);
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
payloadLen += sprintf(pMsg + payloadLen, "%s,", n);
}
*(pMsg + payloadLen) = '\0';

View File

@ -65,44 +65,67 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
return retryDelta;
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
SSqlStream *pStream = (SSqlStream *)pMsg->ahandle;
SSqlObj * pSql = pStream->pSql;
static void setRetryInfo(SSqlStream* pStream, int32_t code) {
SSqlObj* pSql = pStream->pSql;
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
pSql->param = pStream;
pSql->res.code = code;
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
}
static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
SSqlStream *pStream = (SSqlStream *)param;
assert(pStream->pSql == tres);
SSqlObj* pSql = (SSqlObj*) tres;
pSql->fp = doLaunchQuery;
pSql->fetchFp = doLaunchQuery;
pSql->res.completed = false;
if (code != TSDB_CODE_SUCCESS) {
setRetryInfo(pStream, code);
return;
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int code = tscGetTableMeta(pSql, pTableMetaInfo);
pSql->res.code = code;
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == 0 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscGetSTableVgroupInfo(pSql, 0);
pSql->res.code = code;
}
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
}
} else {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
tscDebug("%p stream:%p start stream query on:%s", pSql, pStream, pTableMetaInfo->name);
tscDoQuery(pStream->pSql);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name));
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
tscDoQuery(pSql);
tscIncStreamExecutionCount(pStream);
} else {
setRetryInfo(pStream, code);
}
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
SSqlStream *pStream = (SSqlStream *)pMsg->ahandle;
doLaunchQuery(pStream, pStream->pSql, 0);
}
static void tscProcessStreamTimer(void *handle, void *tmrId) {
SSqlStream *pStream = (SSqlStream *)handle;
if (pStream == NULL) return;
if (pStream->pTimer != tmrId) return;
if (pStream == NULL || pStream->pTimer != tmrId) {
return;
}
pStream->pTimer = NULL;
pStream->numOfRes = 0; // reset the numOfRes.
@ -168,7 +191,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
@ -268,7 +293,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pStream->stime += 1;
}
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, tNameGetTableName(&pTableMetaInfo->name),
pStream->numOfRes);
tfree(pTableMetaInfo->pTableMeta);
@ -392,11 +417,16 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
tscSetRetryTimer(pStream, pSql, timer);
}
static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (!pStream->isProject && pQueryInfo->interval.interval == 0) {
sprintf(pSql->cmd.payload, "the interval value is 0");
return -1;
}
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
@ -436,6 +466,8 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->interval.sliding = 0;
}
return TSDB_CODE_SUCCESS;
}
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
@ -485,34 +517,19 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
}
static void setErrorInfo(SSqlObj* pSql, int32_t code, char* info) {
if (pSql == NULL) {
return;
}
SSqlCmd* pCmd = &pSql->cmd;
pSql->res.code = code;
if (info != NULL) {
strncpy(pCmd->payload, info, pCmd->payloadLen);
}
}
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
SSqlStream* pStream = (SSqlStream*)param;
SSqlObj* pSql = pStream->pSql;
SSqlCmd* pCmd = &pSql->cmd;
if (code != TSDB_CODE_SUCCESS) {
setErrorInfo(pSql, code, pCmd->payload);
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, pSql->sqlstr, pCmd->payload, code);
pSql->res.code = code;
tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code));
pStream->fp(pStream->param, NULL, NULL);
return;
}
registerSqlObj(pSql);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@ -523,17 +540,25 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
pStream->ctime = taosGetTimestamp(pStream->precision);
pStream->etime = pQueryInfo->window.ekey;
tscAddIntoStreamList(pStream);
if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream);
pStream->fp(pStream->param, NULL, NULL);
return;
}
tscSetSlidingWindowInfo(pSql, pStream);
pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime);
int64_t starttime = tscGetLaunchTimestamp(pStream);
pCmd->command = TSDB_SQL_SELECT;
tscAddIntoStreamList(pStream);
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
pStream, tNameGetTableName(&pTableMetaInfo->name), pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {
@ -586,12 +611,15 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pSql->fp = tscCreateStream;
pSql->fetchFp = tscCreateStream;
registerSqlObj(pSql);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_SUCCESS) {
tscCreateStream(pStream, pSql, code);
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(pRes->code));
tscFreeSqlObj(pSql);
taosReleaseRef(tscObjRef, pSql->self);
free(pStream);
return NULL;
}

View File

@ -313,7 +313,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) {
char buf[TSDB_MAX_SQL_LEN];
sprintf(buf, "%s/subscribe/%s", tsDataDir, pSub->topic);
FILE* fp = fopen(buf, "r");
FILE* fp = fopen(buf, "rb");
if (fp == NULL) {
tscDebug("subscription progress file does not exist: %s", pSub->topic);
return 1;
@ -368,7 +368,7 @@ void tscSaveSubscriptionProgress(void* sub) {
}
sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic);
FILE* fp = fopen(path, "w+");
FILE* fp = fopen(path, "wb+");
if (fp == NULL) {
tscError("failed to create progress file for subscription: %s", pSub->topic);
return;

View File

@ -16,7 +16,7 @@
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "qTsbuf.h"
#include "tcompare.h"
#include "tscLog.h"
@ -55,6 +55,58 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
}
static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, int8_t state) {
assert(idx < subState->numOfSub);
assert(subState->states);
pthread_mutex_lock(&subState->mutex);
tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
subState->states[idx] = state;
pthread_mutex_unlock(&subState->mutex);
}
static bool allSubqueryDone(SSqlObj *pParentSql) {
bool done = true;
SSubqueryState *subState = &pParentSql->subState;
//lock in caller
for (int i = 0; i < subState->numOfSub; i++) {
if (0 == subState->states[i]) {
tscDebug("%p subquery:%p,%d is NOT finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
done = false;
break;
} else {
tscDebug("%p subquery:%p,%d is finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
}
}
return done;
}
static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
SSubqueryState *subState = &pParentSql->subState;
assert(idx < subState->numOfSub);
pthread_mutex_lock(&subState->mutex);
tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx);
subState->states[idx] = 1;
bool done = allSubqueryDone(pParentSql);
pthread_mutex_unlock(&subState->mutex);
return done;
}
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
@ -188,8 +240,10 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
tsBufFlush(output2);
tsBufDestroy(pSupporter1->pTSBuf);
pSupporter1->pTSBuf = NULL;
tsBufDestroy(pSupporter2->pTSBuf);
pSupporter2->pTSBuf = NULL;
TSKEY et = taosGetTimestampUs();
tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
@ -219,12 +273,9 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
assert (pSupporter->uid != 0);
taosGetTmpfilePath("join-", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w");
// todo handle error
if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
}
// do NOT create file here to reduce crash generated file left issue
pSupporter->f = NULL;
return pSupporter;
}
@ -244,12 +295,19 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
tscFieldInfoClear(&pSupporter->fieldsInfo);
if (pSupporter->pTSBuf != NULL) {
tsBufDestroy(pSupporter->pTSBuf);
pSupporter->pTSBuf = NULL;
}
unlink(pSupporter->path);
if (pSupporter->f != NULL) {
fclose(pSupporter->f);
unlink(pSupporter->path);
pSupporter->f = NULL;
}
if (pSupporter->pVgroupTables != NULL) {
taosArrayDestroy(pSupporter->pVgroupTables);
pSupporter->pVgroupTables = NULL;
@ -361,10 +419,6 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// scan all subquery, if one sub query has only ts, ignore it
tscDebug("%p start to launch secondary subqueries, %d out of %d needs to query", pSql, numOfSub, pSql->subState.numOfSub);
//the subqueries that do not actually launch the secondary query to virtual node is set as completed.
SSubqueryState* pState = &pSql->subState;
pState->numOfRemain = numOfSub;
bool success = true;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
@ -397,6 +451,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
@ -459,7 +514,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value
pExpr->param[0] = (tVariant) {.i64Key = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
pExpr->param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
pExpr->numOfParams = 1;
}
@ -474,10 +529,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
}
}
subquerySetState(pPrevSub, &pSql->subState, i, 0);
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
}
//prepare the subqueries object failed, abort
@ -511,21 +568,29 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
SJoinSupporter* p = pSub->param;
tscDestroyJoinSupporter(p);
if (pSub->res.code == TSDB_CODE_SUCCESS) {
taos_free_result(pSub);
}
taos_free_result(pSub);
pSql->pSubs[i] = NULL;
}
if (pSql->subState.states) {
pthread_mutex_destroy(&pSql->subState.mutex);
}
tfree(pSql->subState.states);
pSql->subState.numOfSub = 0;
}
static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
assert(pSqlObj->subState.numOfRemain > 0);
if (atomic_sub_fetch_32(&pSqlObj->subState.numOfRemain, 1) <= 0) {
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj);
return 0;
}
return 1;
//tscDestroyJoinSupporter(pSupporter);
}
// update the query time range according to the join results on timestamp
@ -642,7 +707,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0);
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->param->i64Key = tagColId;
pExpr->param->i64 = tagColId;
pExpr->numOfParams = 1;
}
@ -666,7 +731,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscProcessSql(pSql);
}
@ -769,6 +834,17 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
// check for the error code firstly
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed
@ -777,7 +853,9 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -794,7 +872,9 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p failed to malloc memory", pSql);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -836,9 +916,10 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// no data exists in next vnode, mark the <tid, tags> query completed
// only when there is no subquery exits any more, proceeds to get the intersect of the <tid, tags> tuple sets.
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("%p tagRetrieve:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
return;
}
}
SArray *s1 = NULL, *s2 = NULL;
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
@ -877,14 +958,16 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2);
SSqlObj* psub1 = pParentSql->pSubs[0];
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo1->pVgroupTables);
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables);
SSqlObj* psub2 = pParentSql->pSubs[1];
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo2->pVgroupTables);
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables);
pParentSql->subState.numOfSub = 2;
pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub;
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pParentSql);
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
SSqlObj* sub = pParentSql->pSubs[m];
issueTSCompQuery(sub, sub->param, pParentSql);
@ -907,6 +990,17 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
// check for the error code firstly
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet
@ -914,13 +1008,33 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
if (numOfRows > 0) { // write the compressed timestamp to disk file
if(pSupporter->f == NULL) {
pSupporter->f = fopen(pSupporter->path, "wb");
if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
}
fwrite(pRes->data, (size_t)pRes->numOfRows, 1, pSupporter->f);
fclose(pSupporter->f);
pSupporter->f = NULL;
@ -930,6 +1044,11 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -947,7 +1066,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// continue to retrieve ts-comp data from vnode
if (!pRes->completed) {
taosGetTmpfilePath("ts-join", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w");
pSupporter->f = fopen(pSupporter->path, "wb");
pRes->row = pRes->numOfRows;
taos_fetch_rows_a(tres, tsCompRetrieveCallback, param);
@ -973,7 +1092,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
taosGetTmpfilePath("ts-join", pSupporter->path);
// TODO check for failure
pSupporter->f = fopen(pSupporter->path, "w");
pSupporter->f = fopen(pSupporter->path, "wb");
pRes->row = pRes->numOfRows;
// set the callback function
@ -982,9 +1101,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
return;
}
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
return;
}
}
tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql);
@ -1022,6 +1141,19 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SSqlRes* pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(numOfRows == taos_errno(pSql));
@ -1061,9 +1193,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
}
}
assert(pState->numOfRemain > 0);
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pState->numOfRemain, pState->numOfSub);
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("%p sub:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pState->numOfSub);
return;
}
@ -1178,15 +1309,16 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
}
}
// get the number of subquery that need to retrieve the next vnode.
if (orderedPrjQuery) {
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if (pSub != NULL && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) {
pSql->subState.numOfRemain++;
subquerySetState(pSub, &pSql->subState, i, 0);
}
}
}
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
@ -1243,7 +1375,19 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
// retrieve data from current vnode.
tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch);
SJoinSupporter* pSupporter = NULL;
pSql->subState.numOfRemain = numOfFetch;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
if (pSql1 == NULL) {
continue;
}
SSqlRes* pRes1 = &pSql1->res;
if (pRes1->row >= pRes1->numOfRows) {
subquerySetState(pSql1, &pSql->subState, i, 0);
}
}
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
@ -1335,15 +1479,20 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
SJoinSupporter* pSupporter = (SJoinSupporter*)param;
SSqlObj* pParentSql = pSupporter->pObj;
// There is only one subquery and table for each subquery.
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1 && pSql->cmd.numOfClause == 1);
// retrieve actual query results from vnode during the second stage join subquery
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -1356,7 +1505,10 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code));
pParentSql->res.code = code;
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -1378,14 +1530,11 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
return;
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// In case of consequence query from other vnode, do not wait for other query response here.
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
return;
}
}
}
tscSetupOutputColumnIndex(pParentSql);
@ -1397,6 +1546,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data
pSql->cmd.command = TSDB_SQL_FETCH;
tscProcessSql(pSql);
} else { // first retrieve from vnode during the secondary stage sub-query
// set the command flag must be after the semaphore been correctly set.
@ -1432,8 +1582,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pSql->pSubs[pSql->subState.numOfRemain++] = pNew;
assert(pSql->subState.numOfRemain <= pSql->subState.numOfSub);
pSql->pSubs[tableIndex] = pNew;
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
addGroupInfoForSubquery(pSql, pNew, 0, tableIndex);
@ -1509,7 +1658,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name);
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
} else {
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
@ -1520,7 +1669,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->param->i64Key = tagColId;
pExpr->param->i64 = tagColId;
pExpr->numOfParams = 1;
}
@ -1544,7 +1693,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name);
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
}
} else {
assert(0);
@ -1565,6 +1714,19 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
int32_t code = TSDB_CODE_SUCCESS;
pSql->subState.numOfSub = pQueryInfo->numOfTables;
if (pSql->subState.states == NULL) {
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
if (pSql->subState.states == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pthread_mutex_init(&pSql->subState.mutex, NULL);
}
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pSql);
bool hasEmptySub = false;
tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables);
@ -1597,12 +1759,23 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pSql->fp)(pSql->param, pSql, 0);
} else {
int fail = 0;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
pSql->subState.numOfRemain = i - 1; // the already sent request will continue and do not go to the error process routine
break;
if (fail) {
(*pSub->fp)(pSub->param, pSub, 0);
continue;
}
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
pRes->code = code;
(*pSub->fp)(pSub->param, pSub, 0);
fail = 1;
}
}
if(fail) {
return;
}
pSql->cmd.command = TSDB_SQL_TABLE_JOIN_RETRIEVE;
@ -1631,6 +1804,25 @@ static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
}
}
void tscLockByThread(int64_t *lockedBy) {
int64_t tid = taosGetSelfPthreadId();
int i = 0;
while (atomic_val_compare_exchange_64(lockedBy, 0, tid) != 0) {
if (++i % 100 == 0) {
sched_yield();
}
}
}
void tscUnlockByThread(int64_t *lockedBy) {
int64_t tid = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(lockedBy, tid, 0) != tid) {
assert(false);
}
}
int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
@ -1684,7 +1876,21 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return ret;
}
pState->numOfRemain = pState->numOfSub;
if (pState->states == NULL) {
pState->states = calloc(pState->numOfSub, sizeof(*pState->states));
if (pState->states == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscAsyncResultOnError(pSql);
tfree(pMemoryBuf);
return ret;
}
pthread_mutex_init(&pState->mutex, NULL);
}
memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub);
tscDebug("%p reset all sub states to 0", pSql);
pRes->code = TSDB_CODE_SUCCESS;
int32_t i = 0;
@ -1785,7 +1991,22 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
* current query failed, and the retry count is less than the available
* count, retry query clear previous retrieved data, then launch a new sub query
*/
static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, int32_t code) {
static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) {
SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport));
if (trsupport == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memcpy(trsupport, oriTrs, sizeof(*trsupport));
const uint32_t nBufferSize = (1u << 16u); // 64KB
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trsupport->localBuffer == NULL) {
tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno));
tfree(trsupport);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SSqlObj *pParentSql = trsupport->pParentSql;
int32_t subqueryIndex = trsupport->subqueryIndex;
@ -1833,7 +2054,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
assert(pSql != NULL);
SSubqueryState* pState = &pParentSql->subState;
assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
// retrieved in subquery failed. OR query cancelled in retrieve phase.
if (taos_errno(pSql) == TSDB_CODE_SUCCESS && pParentSql->res.code != TSDB_CODE_SUCCESS) {
@ -1864,14 +2084,12 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
}
int32_t remain = -1;
if ((remain = atomic_sub_fetch_32(&pState->numOfRemain, 1)) > 0) {
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
pState->numOfSub - remain);
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
tscDebug("%p sub:%p,%d freed, not finished, total:%d", pParentSql, pSql, trsupport->subqueryIndex, pState->numOfSub);
tscFreeRetrieveSup(pSql);
return;
}
}
// all subqueries are failed
tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub,
@ -1936,14 +2154,12 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
return;
}
int32_t remain = -1;
if ((remain = atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1)) > 0) {
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
pState->numOfSub - remain);
if (!subAndCheckDone(pSql, pParentSql, idx)) {
tscDebug("%p sub:%p orderOfSub:%d freed, not finished", pParentSql, pSql, trsupport->subqueryIndex);
tscFreeRetrieveSup(pSql);
return;
}
}
// all sub-queries are returned, start to local merge process
pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
@ -1954,7 +2170,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
tscClearInterpInfo(pPQueryInfo);
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscDebug("%p build loser tree completed", pParentSql);
pParentSql->res.precision = pSql->res.precision;
@ -1989,7 +2205,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
SSqlObj * pParentSql = trsupport->pParentSql;
SSubqueryState* pState = &pParentSql->subState;
assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
@ -2093,7 +2308,15 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo
return pNew;
}
// todo there is are race condition in this function, while cancel is called by user.
void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
// the param may be null, since it may be done by other query threads. and the asyncOnError may enter in this
// function while kill query by a user.
if (param == NULL) {
assert(code != TSDB_CODE_SUCCESS);
return;
}
SRetrieveSupport *trsupport = (SRetrieveSupport *) param;
SSqlObj* pParentSql = trsupport->pParentSql;
@ -2202,7 +2425,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
}
if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
tscDebug("%p insert:%p,%d completed, total:%d", pParentObj, tres, pSupporter->index, pParentObj->subState.numOfSub);
return;
}
@ -2234,7 +2458,9 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscFreeQueryInfo(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
subquerySetState(pSql, &pParentObj->subState, i, 0);
tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql);
}
@ -2245,14 +2471,14 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscDebug("%p cleanup %d tableMeta in hashTable", pParentObj, pParentObj->cmd.numOfTables);
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
char* name = pParentObj->cmd.pTableNameList[i];
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
pParentObj->cmd.parseFinished = false;
pParentObj->subState.numOfRemain = numOfFailed;
tscResetSqlCmdObj(&pParentObj->cmd);
tscResetSqlCmd(&pParentObj->cmd, false);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
@ -2326,7 +2552,19 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
// the number of already initialized subqueries
int32_t numOfSub = 0;
pSql->subState.numOfRemain = pSql->subState.numOfSub;
if (pSql->subState.states == NULL) {
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
if (pSql->subState.states == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pthread_mutex_init(&pSql->subState.mutex, NULL);
}
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pSql);
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
if (pSql->pSubs == NULL) {
goto _error;

View File

@ -16,7 +16,7 @@
#include "tscUtil.h"
#include "hash.h"
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "taosmsg.h"
#include "tkey.h"
#include "tmd5.h"
@ -71,6 +71,7 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) {
bool tscQueryTags(SQueryInfo* pQueryInfo) {
int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int32_t functId = pExpr->functionId;
@ -88,21 +89,6 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
return true;
}
// todo refactor, extract methods and move the common module
void tscGetDBInfoFromTableFullName(char* tableId, char* db) {
char* st = strstr(tableId, TS_PATH_DELIMITER);
if (st != NULL) {
char* end = strstr(st + 1, TS_PATH_DELIMITER);
if (end != NULL) {
memcpy(db, tableId, (end - tableId));
db[end - tableId] = 0;
return;
}
}
db[0] = 0;
}
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
@ -314,7 +300,7 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
} else {
for (int32_t k = 0; k < pRes->numOfRows; ++k) {
char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
memcpy(p, &pInfo->pSqlExpr->param[1].i64Key, pInfo->field.bytes);
memcpy(p, &pInfo->pSqlExpr->param[1].i64, pInfo->field.bytes);
}
}
}
@ -395,7 +381,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd) {
tfree(pCmd->pQueryInfo);
}
void tscResetSqlCmdObj(SSqlCmd* pCmd) {
void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@ -413,13 +399,13 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd) {
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
tscFreeQueryInfo(pCmd);
}
void tscFreeSqlResult(SSqlObj* pSql) {
tscDestroyLocalReducer(pSql);
tscDestroyLocalMerger(pSql);
SSqlRes* pRes = &pSql->res;
tscDestroyResPointerInfo(pRes);
@ -440,6 +426,12 @@ static void tscFreeSubobj(SSqlObj* pSql) {
pSql->pSubs[i] = NULL;
}
if (pSql->subState.states) {
pthread_mutex_destroy(&pSql->subState.mutex);
}
tfree(pSql->subState.states);
pSql->subState.numOfSub = 0;
}
@ -455,17 +447,29 @@ void tscFreeRegisteredSqlObj(void *pSql) {
SSqlObj* p = *(SSqlObj**)pSql;
STscObj* pTscObj = p->pTscObj;
assert(RID_VALID(p->self));
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1);
int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1);
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
assert(RID_VALID(p->self));
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
tscFreeSqlObj(p);
taosReleaseRef(tscRefId, pTscObj->rid);
}
void tscFreeMetaSqlObj(int64_t *rid){
if (RID_VALID(*rid)) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, *rid);
if (pSql) {
taosRemoveRef(tscObjRef, *rid);
taosReleaseRef(tscObjRef, *rid);
}
*rid = 0;
}
}
void tscFreeSqlObj(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return;
@ -475,6 +479,9 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
tscFreeMetaSqlObj(&pSql->metaRid);
tscFreeMetaSqlObj(&pSql->svgroupRid);
tscFreeSubobj(pSql);
SSqlCmd* pCmd = &pSql->cmd;
@ -493,7 +500,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->self = 0;
tscFreeSqlResult(pSql);
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, false);
tfree(pCmd->tagData.data);
pCmd->tagData.dataLen = 0;
@ -503,10 +510,11 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pCmd->allocSize = 0;
tsem_destroy(&pSql->rspSem);
memset(pSql, 0, sizeof(*pSql));
free(pSql);
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
if (pDataBlock == NULL) {
return;
}
@ -519,6 +527,13 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
tfree(pDataBlock->pTableMeta);
}
if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pDataBlock->tableName, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
tfree(pDataBlock);
}
@ -554,21 +569,21 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
size_t size = taosArrayGetSize(pDataBlockList);
for (int32_t i = 0; i < size; i++) {
void* d = taosArrayGetP(pDataBlockList, i);
tscDestroyDataBlock(d);
tscDestroyDataBlock(d, false);
}
taosArrayDestroy(pDataBlockList);
return NULL;
}
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable) {
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
if (pBlockHashTable == NULL) {
return NULL;
}
STableDataBlocks** p = taosHashIterate(pBlockHashTable, NULL);
while(p) {
tscDestroyDataBlock(*p);
tscDestroyDataBlock(*p, removeMeta);
p = taosHashIterate(pBlockHashTable, p);
}
@ -585,17 +600,16 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
assert(pCmd->numOfClause == 1);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
// todo refactor
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
tNameAssign(&pTableMetaInfo->name, &pDataBlock->tableName);
if (pTableMetaInfo->pTableMeta != NULL) {
tfree(pTableMetaInfo->pTableMeta);
}
pTableMetaInfo->pTableMeta = tscTableMetaClone(pDataBlock->pTableMeta);
} else {
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableName, tListLen(pDataBlock->tableName)) == 0);
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
}
/*
@ -630,7 +644,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* @param dataBlocks
* @return
*/
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
if (dataBuf == NULL) {
@ -658,27 +672,26 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
tstrncpy(dataBuf->tableName, name, sizeof(dataBuf->tableName));
tNameAssign(&dataBuf->tableName, name);
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaClone(pTableMeta);
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList) {
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList) {
*dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
if (t1 != NULL) {
*dataBlocks = *t1;
}
if (*dataBlocks == NULL) {
int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId, pTableMeta, dataBlocks);
int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, name, pTableMeta, dataBlocks);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -767,22 +780,28 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
return result;
}
static void extractTableNameList(SSqlCmd* pCmd) {
static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList);
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
if (pCmd->pTableNameList == NULL) {
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
} else {
memset(pCmd->pTableNameList, 0, pCmd->numOfTables * POINTER_BYTES);
}
STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL);
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
pCmd->pTableNameList[i++] = strndup(pBlocks->tableName, TSDB_TABLE_FNAME_LEN);
pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
}
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
if (freeBlockMap) {
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, false);
}
}
int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
SSqlCmd* pCmd = &pSql->cmd;
@ -798,7 +817,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
taosHashCleanup(pVnodeDataBlockHashList);
@ -831,8 +850,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("%p name:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableName,
tscDebug("%p name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@ -862,7 +881,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
pOneTableBlock = *p;
}
extractTableNameList(pCmd);
extractTableNameList(pCmd, freeBlockMap);
// free the table data blocks;
pCmd->pDataBlocks = pVnodeDataBlockList;
@ -1027,7 +1046,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
SInternalField* pInfo = taosArrayGet(pFieldInfo->internalField, i);
if (pInfo->pArithExprInfo != NULL) {
tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
tExprTreeDestroy(pInfo->pArithExprInfo->pExpr, NULL);
SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) {
@ -1060,6 +1079,8 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
// set the correct columnIndex index
if (pColIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pExpr->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX;
} else if (pColIndex->columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) {
pExpr->colInfo.colId = TSDB_BLOCK_DIST_COLUMN_INDEX;
} else if (pColIndex->columnIndex <= TSDB_UD_COLUMN_INDEX) {
pExpr->colInfo.colId = pColIndex->columnIndex;
} else {
@ -1273,7 +1294,7 @@ SColumn* tscColumnClone(const SColumn* src) {
dst->colIndex = src->colIndex;
dst->numOfFilters = src->numOfFilters;
dst->filterInfo = tscFilterInfoClone(src->filterInfo, src->numOfFilters);
dst->filterInfo = tFilterInfoDup(src->filterInfo, src->numOfFilters);
return dst;
}
@ -1476,7 +1497,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
return false;
}
if (colId == TSDB_TBNAME_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
if (colId == TSDB_TBNAME_COLUMN_INDEX || colId == TSDB_BLOCK_DIST_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
return true;
}
@ -1779,10 +1800,10 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
info->itemList = taosArrayClone(pInfo->itemList);
info->itemList = taosArrayDup(pInfo->itemList);
}
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) {
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
if (pVgroupTables == NULL) {
return NULL;
}
@ -1813,7 +1834,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) {
tfree(pQueryInfo->pTableMetaInfo);
}
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) {
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
if (pAlloc == NULL) {
@ -1831,7 +1852,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo;
if (name != NULL) {
tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
tNameAssign(&pTableMetaInfo->name, name);
}
pTableMetaInfo->pTableMeta = pTableMeta;
@ -1850,7 +1871,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoClone(pVgroupTables);
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables);
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
@ -1890,7 +1911,7 @@ void registerSqlObj(SSqlObj* pSql) {
tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total);
}
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) {
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) {
tscError("%p new subquery failed, tableIndex:%d", pSql, 0);
@ -1913,6 +1934,10 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
}
if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pNew->rspSem, 0, 0);
#endif // __APPLE__
tscFreeSqlObj(pNew);
return NULL;
}
@ -1928,7 +1953,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
@ -1975,7 +2000,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui
tscFieldInfoUpdateOffset(pNewQueryInfo);
}
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) {
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
@ -2033,7 +2058,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayClone(pQueryInfo->groupbyExpr.columnInfo);
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@ -2084,27 +2109,26 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA;
char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = tscTableMetaClone(pTableMetaInfo->pTableMeta);
STableMeta* pTableMeta = tscTableMetaDup(pTableMetaInfo->pTableMeta);
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = tscTableMetaClone(pPrevInfo->pTableMeta);
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pTableMetaInfo->pVgroupTables);
}
// this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, name);
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
@ -2129,7 +2153,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
"%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
size, pNewQueryInfo->fieldsInfo.numOfOutput, pFinalInfo->name, pNewQueryInfo->window.skey,
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
tscPrintSelectClause(pNew, 0);
@ -2166,12 +2190,12 @@ void tscDoQuery(SSqlObj* pSql) {
}
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
tscProcessMultiVnodesImportFromFile(pSql);
tscImportDataFromFile(pSql);
} else {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
uint16_t type = pQueryInfo->type;
if (pSql->fp == (void(*)())tscHandleMultivnodeInsert) { // multi-vnodes insertion
if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion
tscHandleMultivnodeInsert(pSql);
return;
}
@ -2184,7 +2208,9 @@ void tscDoQuery(SSqlObj* pSql) {
tscProcessSql(pSql);
} else { // secondary stage join query.
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
tscLockByThread(&pSql->squeryLock);
tscHandleMasterSTableQuery(pSql);
tscUnlockByThread(&pSql->squeryLock);
} else {
tscProcessSql(pSql);
}
@ -2193,7 +2219,9 @@ void tscDoQuery(SSqlObj* pSql) {
return;
} else if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
tscLockByThread(&pSql->squeryLock);
tscHandleMasterSTableQuery(pSql);
tscUnlockByThread(&pSql->squeryLock);
return;
}
@ -2262,7 +2290,6 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
}
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
@ -2477,7 +2504,11 @@ bool tscSetSqlOwner(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
// set the sql object owner
#ifdef __APPLE__
pthread_t threadId = (pthread_t)taosGetSelfPthreadId();
#else // __APPLE__
uint64_t threadId = taosGetSelfPthreadId();
#endif // __APPLE__
if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) {
pRes->code = TSDB_CODE_QRY_IN_EXEC;
return false;
@ -2660,7 +2691,7 @@ uint32_t tscGetTableMetaMaxSize() {
return sizeof(STableMeta) + TSDB_MAX_COLUMNS * sizeof(SSchema);
}
STableMeta* tscTableMetaClone(STableMeta* pTableMeta) {
STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
uint32_t size = tscGetTableMetaSize(pTableMeta);
STableMeta* p = calloc(1, size);

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)

View File

@ -2,15 +2,117 @@
#include <iostream>
#include "taos.h"
#include "tglobal.h"
namespace {
static int64_t start_ts = 1433955661000;
}
/* test parse time function */
TEST(testCase, result_field_test) {
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
taos_init();
void stmtInsertTest() {
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
if (conn == NULL) {
printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
exit(-1);
}
TAOS_RES* res = taos_query(conn, "use test");
taos_free_result(res);
const char* sql = "insert into t1 values(?, ?, ?, ?)";
TAOS_STMT* stmt = taos_stmt_init(conn);
int32_t ret = taos_stmt_prepare(stmt, sql, 0);
ASSERT_EQ(ret, 0);
//ts timestamp, k int, a binary(11), b nchar(4)
struct {
int64_t ts;
int k;
char* a;
char* b;
} v = {0};
TAOS_BIND params[4];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.ts);
params[0].buffer = &v.ts;
params[0].length = &params[0].buffer_length;
params[0].is_null = NULL;
params[1].buffer_type = TSDB_DATA_TYPE_INT;
params[1].buffer_length = sizeof(v.k);
params[1].buffer = &v.k;
params[1].length = &params[1].buffer_length;
params[1].is_null = NULL;
params[2].buffer_type = TSDB_DATA_TYPE_BINARY;
params[2].buffer_length = sizeof(v.a);
params[2].buffer = &v.a;
params[2].is_null = NULL;
params[3].buffer_type = TSDB_DATA_TYPE_NCHAR;
params[3].buffer_length = sizeof(v.b);
params[3].buffer = &v.b;
params[3].is_null = NULL;
v.ts = start_ts + 20;
v.k = 123;
char* str = "abc";
uintptr_t len = strlen(str);
v.a = str;
params[2].length = &len;
params[2].buffer_length = len;
params[2].buffer = str;
char* nstr = "999";
uintptr_t len1 = strlen(nstr);
v.b = nstr;
params[3].buffer_length = len1;
params[3].buffer = nstr;
params[3].length = &len1;
taos_stmt_bind_param(stmt, params);
taos_stmt_add_batch(stmt);
if (taos_stmt_execute(stmt) != 0) {
printf("\033[31mfailed to execute insert statement.\033[0m\n");
return;
}
v.ts = start_ts + 30;
v.k = 911;
str = "92";
len = strlen(str);
params[2].length = &len;
params[2].buffer_length = len;
params[2].buffer = str;
nstr = "1920";
len1 = strlen(nstr);
params[3].buffer_length = len1;
params[3].buffer = nstr;
params[3].length = &len1;
taos_stmt_bind_param(stmt, params);
taos_stmt_add_batch(stmt);
ret = taos_stmt_execute(stmt);
if (ret != 0) {
printf("%p\n", ret);
printf("\033[31mfailed to execute insert statement.\033[0m\n");
return;
}
taos_stmt_close(stmt);
taos_close(conn);
}
void validateResultFields() {
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
if (conn == NULL) {
printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
@ -134,5 +236,31 @@ TEST(testCase, result_field_test) {
ASSERT_STREQ(fields[6].name, "first(ts)");
taos_free_result(res);
// update the configure parameter, the result field name will be changed
tsKeepOriginalColumnName = 1;
res = taos_query(conn, "select first(ts, a, k, k, b, b, ts) from t1");
ASSERT_EQ(taos_num_fields(res), 7);
fields = taos_fetch_fields(res);
ASSERT_EQ(fields[0].bytes, 8);
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_TIMESTAMP);
ASSERT_STREQ(fields[0].name, "ts");
ASSERT_EQ(fields[2].bytes, 4);
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_INT);
ASSERT_STREQ(fields[2].name, "k");
taos_free_result(res);
taos_close(conn);
}
}
/* test parse time function */
TEST(testCase, result_field_test) {
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
taos_init();
validateResultFields();
stmtInsertTest();
}

View File

@ -162,6 +162,10 @@ TEST(testCase, parse_time) {
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND);
char* t = "2021-01-08T02:11:40.000+00:00";
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
printf("%ld\n", time);
}

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)

View File

@ -13,17 +13,17 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSYNTAXTREEFUNCTION_H
#define TDENGINE_TSYNTAXTREEFUNCTION_H
#ifndef TDENGINE_QARITHMETICOPERATOR_H
#define TDENGINE_QARITHMETICOPERATOR_H
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*_bi_consumer_fn_t)(void *left, void *right, int32_t numOfLeft, int32_t numOfRight, void *output,
int32_t order);
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
int32_t rightType, void *output, int32_t order);
_bi_consumer_fn_t getArithmeticOperatorFn(int32_t leftType, int32_t rightType, int32_t optr);
_arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr);
#ifdef __cplusplus
}

View File

@ -20,30 +20,30 @@
#include <string.h>
#include "talgo.h"
#include "taosdef.h"
#include "ttype.h"
#include "tutil.h"
#ifdef __cplusplus
extern "C" {
#endif
#define STR_TO_VARSTR(x, str) \
do { \
VarDataLenT __len = strlen(str); \
*(VarDataLenT *)(x) = __len; \
memcpy(varDataVal(x), (str), __len); \
#define STR_TO_VARSTR(x, str) \
do { \
VarDataLenT __len = (VarDataLenT)strlen(str); \
*(VarDataLenT *)(x) = __len; \
memcpy(varDataVal(x), (str), __len); \
} while (0);
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \
char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \
varDataSetLen(x, (_e - (x)-VARSTR_HEADER_SIZE)); \
varDataSetLen(x, (_e - (x)-VARSTR_HEADER_SIZE)); \
} while (0)
#define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \
do { \
*(VarDataLenT *)(x) = (_size); \
memcpy(varDataVal(x), (str), (_size)); \
#define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \
do { \
*(VarDataLenT *)(x) = (VarDataLenT)(_size); \
memcpy(varDataVal(x), (str), (_size)); \
} while (0);
// ----------------- TSDB COLUMN DEFINITION
@ -156,7 +156,7 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
* +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+
*
*
* NOTE: timestamp in this row structure is TKEY instead of TSKEY
*/
typedef void *SDataRow;

View File

@ -31,6 +31,15 @@ extern "C" {
struct tExprNode;
struct SSchema;
#define QUERY_COND_REL_PREFIX_IN "IN|"
#define QUERY_COND_REL_PREFIX_LIKE "LIKE|"
#define QUERY_COND_REL_PREFIX_IN_LEN 3
#define QUERY_COND_REL_PREFIX_LIKE_LEN 5
typedef bool (*__result_filter_fn_t)(const void *, void *);
typedef void (*__do_filter_suppl_fn_t)(void *, void *);
enum {
TSQL_NODE_DUMMY = 0x0,
TSQL_NODE_EXPR = 0x1,
@ -38,9 +47,6 @@ enum {
TSQL_NODE_VALUE = 0x4,
};
typedef bool (*__result_filter_fn_t)(const void *, void *);
typedef void (*__do_filter_suppl_fn_t)(void *, void *);
/**
* this structure is used to filter data in tags, so the offset of filtered tag column in tagdata string is required
*/
@ -52,12 +58,6 @@ typedef struct tQueryInfo {
bool indexed; // indexed columns
} tQueryInfo;
typedef struct SExprTraverseSupp {
__result_filter_fn_t nodeFilterFn;
__do_filter_suppl_fn_t setupInfoFn;
void * pExtInfo;
} SExprTraverseSupp;
typedef struct tExprNode {
uint8_t nodeType;
union {
@ -65,7 +65,7 @@ typedef struct tExprNode {
uint8_t optr; // filter operator
uint8_t hasPK; // 0: do not contain primary filter, 1: contain
void * info; // support filter operation on this expression only available for leaf node
struct tExprNode *pLeft; // left child pointer
struct tExprNode *pRight; // right child pointer
} _node;
@ -74,19 +74,27 @@ typedef struct tExprNode {
};
} tExprNode;
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
typedef struct SExprTraverseSupp {
__result_filter_fn_t nodeFilterFn;
__do_filter_suppl_fn_t setupInfoFn;
void * pExtInfo;
} SExprTraverseSupp;
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*));
bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
int32_t rightType, void *output, int32_t order);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
#ifdef __cplusplus
}
#endif

View File

@ -36,6 +36,7 @@ extern int8_t tsEnableVnodeBak;
extern int8_t tsEnableTelemetryReporting;
extern char tsEmail[];
extern char tsArbitrator[];
extern int8_t tsArbOnline;
// common
extern int tsRpcTimer;
@ -62,7 +63,6 @@ extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked
extern int8_t tsKeepOriginalColumnName;
// client
extern int32_t tsTableMetaKeepTimer;
extern int32_t tsMaxSQLStringLen;
extern int8_t tsTscEnableRecordSql;
extern int32_t tsMaxNumOfOrderedResults;
@ -88,8 +88,8 @@ extern int32_t tsMinRowsInFileBlock;
extern int32_t tsMaxRowsInFileBlock;
extern int16_t tsCommitTime; // seconds
extern int32_t tsTimePrecision;
extern int16_t tsCompression;
extern int16_t tsWAL;
extern int8_t tsCompression;
extern int8_t tsWAL;
extern int32_t tsFsyncPeriod;
extern int32_t tsReplications;
extern int32_t tsQuorum;
@ -180,7 +180,7 @@ extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag;
extern int32_t vDebugFlag;
extern int32_t mDebugFlag;
extern int32_t cDebugFlag;
extern uint32_t cDebugFlag;
extern int32_t jniDebugFlag;
extern int32_t tmrDebugFlag;
extern int32_t sdbDebugFlag;
@ -190,7 +190,7 @@ extern int32_t monDebugFlag;
extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag;
extern int32_t odbcDebugFlag;
extern int32_t qDebugFlag;
extern uint32_t qDebugFlag;
extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_NAME_H
#define TDENGINE_NAME_H
@ -21,6 +36,20 @@ typedef struct SColumnInfoData {
void* pData; // the corresponding block data in memory
} SColumnInfoData;
#define TSDB_DB_NAME_T 1
#define TSDB_TABLE_NAME_T 2
#define T_NAME_ACCT 0x1u
#define T_NAME_DB 0x2u
#define T_NAME_TABLE 0x4u
typedef struct SName {
uint8_t type; //db_name_t, table_name_t
char acctId[TSDB_ACCT_ID_LEN];
char dbname[TSDB_DB_NAME_LEN];
char tname[TSDB_TABLE_NAME_LEN];
} SName;
void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
@ -31,12 +60,50 @@ void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable);
SSchema tGetTableNameColumnSchema();
SSchema tGetBlockDistColumnSchema();
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name);
bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters);
SSchema tscGetTbnameColumnSchema();
SSchema tGetTbnameColumnSchema();
/**
* check if the schema is valid or not, including following aspects:
* 1. number of columns
* 2. column types
* 3. column length
* 4. column names
* 5. total length
*
* @param pSchema
* @param numOfCols
* @return
*/
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
int32_t tNameExtractFullName(const SName* name, char* dst);
int32_t tNameLen(const SName* name);
SName* tNameDup(const SName* name);
bool tIsValidName(const SName* name);
const char* tNameGetTableName(const SName* name);
int32_t tNameGetDbName(const SName* name, char* dst);
int32_t tNameGetFullDbName(const SName* name, char* dst);
bool tNameIsEmpty(const SName* name);
void tNameAssign(SName* dst, const SName* src);
int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
int32_t tNameSetAcctId(SName* dst, const char* acct);
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken);
#endif // TDENGINE_NAME_H

View File

@ -28,7 +28,8 @@ typedef struct tVariant {
uint32_t nType;
int32_t nLen; // only used for string, for number, it is useless
union {
int64_t i64Key;
int64_t i64;
uint64_t u64;
double dKey;
char * pz;
wchar_t *wpz;
@ -36,9 +37,9 @@ typedef struct tVariant {
};
} tVariant;
void tVariantCreate(tVariant *pVar, SStrToken *token);
bool tVariantIsValid(tVariant *pVar);
void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type);
void tVariantCreate(tVariant *pVar, SStrToken *token);
void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32_t type);

File diff suppressed because it is too large Load Diff

View File

@ -16,18 +16,15 @@
#include "os.h"
#include "exception.h"
#include "qArithmeticOperator.h"
#include "qAst.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tbuffer.h"
#include "tcompare.h"
#include "tname.h"
#include "tschemautil.h"
#include "tsdb.h"
#include "tskiplist.h"
#include "tsqlfunction.h"
#include "texpr.h"
#include "tarithoperator.h"
static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) {
if (pLeft->nodeType == TSQL_NODE_COL) {
@ -102,13 +99,15 @@ static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOf
}
}
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *));
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
if (pNode == NULL) {
return;
}
if (pNode->nodeType == TSQL_NODE_EXPR) {
tExprTreeDestroy(&pNode, fp);
doExprTreeDestroy(&pNode, fp);
} else if (pNode->nodeType == TSQL_NODE_VALUE) {
tVariantDestroy(pNode->pVal);
} else if (pNode->nodeType == TSQL_NODE_COL) {
@ -118,14 +117,14 @@ void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
free(pNode);
}
void tExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
if (*pExpr == NULL) {
return;
}
if ((*pExpr)->nodeType == TSQL_NODE_EXPR) {
tExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
tExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
doExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
if (fp != NULL) {
fp((*pExpr)->_node.info);
@ -197,81 +196,82 @@ void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput,
* exprLeft + exprRight
* the type of returned value of one expression is always double float precious
*/
_bi_consumer_fn_t fp = getArithmeticOperatorFn(TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, pExprs->_node.optr);
fp(pLeftOutput, pRightOutput, numOfRows, numOfRows, pOutput, TSDB_ORDER_ASC);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC);
} else if (pRight->nodeType == TSQL_NODE_COL) { // exprLeft + columnRight
_bi_consumer_fn_t fp = getArithmeticOperatorFn(TSDB_DATA_TYPE_DOUBLE, pRight->pSchema->type, pExprs->_node.optr);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
// set input buffer
char *pInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId);
if (order == TSDB_ORDER_DESC) {
reverseCopy(pdata, pInputData, pRight->pSchema->type, numOfRows);
fp(pLeftOutput, pdata, numOfRows, numOfRows, pOutput, TSDB_ORDER_ASC);
OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pdata, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC);
} else {
fp(pLeftOutput, pInputData, numOfRows, numOfRows, pOutput, TSDB_ORDER_ASC);
OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pInputData, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC);
}
} else if (pRight->nodeType == TSQL_NODE_VALUE) { // exprLeft + 12
_bi_consumer_fn_t fp = getArithmeticOperatorFn(TSDB_DATA_TYPE_DOUBLE, pRight->pVal->nType, pExprs->_node.optr);
fp(pLeftOutput, &pRight->pVal->i64Key, numOfRows, 1, pOutput, TSDB_ORDER_ASC);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC);
}
} else if (pLeft->nodeType == TSQL_NODE_COL) {
// column data specified on left-hand-side
char *pLeftInputData = getSourceDataBlock(param, pLeft->pSchema->name, pLeft->pSchema->colId);
if (pRight->nodeType == TSQL_NODE_EXPR) { // columnLeft + expr2
_bi_consumer_fn_t fp = getArithmeticOperatorFn(pLeft->pSchema->type, TSDB_DATA_TYPE_DOUBLE, pExprs->_node.optr);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
if (order == TSDB_ORDER_DESC) {
reverseCopy(pdata, pLeftInputData, pLeft->pSchema->type, numOfRows);
fp(pdata, pRightOutput, numOfRows, numOfRows, pOutput, TSDB_ORDER_ASC);
OperatorFn(pdata, numOfRows, pLeft->pSchema->type, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC);
} else {
fp(pLeftInputData, pRightOutput, numOfRows, numOfRows, pOutput, TSDB_ORDER_ASC);
OperatorFn(pLeftInputData, numOfRows, pLeft->pSchema->type, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC);
}
} else if (pRight->nodeType == TSQL_NODE_COL) { // columnLeft + columnRight
// column data specified on right-hand-side
char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId);
_bi_consumer_fn_t fp = getArithmeticOperatorFn(pLeft->pSchema->type, pRight->pSchema->type, pExprs->_node.optr);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
// both columns are descending order, do not reverse the source data
fp(pLeftInputData, pRightInputData, numOfRows, numOfRows, pOutput, order);
OperatorFn(pLeftInputData, numOfRows, pLeft->pSchema->type, pRightInputData, numOfRows, pRight->pSchema->type, pOutput, order);
} else if (pRight->nodeType == TSQL_NODE_VALUE) { // columnLeft + 12
_bi_consumer_fn_t fp = getArithmeticOperatorFn(pLeft->pSchema->type, pRight->pVal->nType, pExprs->_node.optr);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
if (order == TSDB_ORDER_DESC) {
reverseCopy(pdata, pLeftInputData, pLeft->pSchema->type, numOfRows);
fp(pdata, &pRight->pVal->i64Key, numOfRows, 1, pOutput, TSDB_ORDER_ASC);
OperatorFn(pdata, numOfRows, pLeft->pSchema->type, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC);
} else {
fp(pLeftInputData, &pRight->pVal->i64Key, numOfRows, 1, pOutput, TSDB_ORDER_ASC);
OperatorFn(pLeftInputData, numOfRows, pLeft->pSchema->type, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC);
}
}
} else {
// column data specified on left-hand-side
if (pRight->nodeType == TSQL_NODE_EXPR) { // 12 + expr2
_bi_consumer_fn_t fp = getArithmeticOperatorFn(pLeft->pVal->nType, TSDB_DATA_TYPE_DOUBLE, pExprs->_node.optr);
fp(&pLeft->pVal->i64Key, pRightOutput, 1, numOfRows, pOutput, TSDB_ORDER_ASC);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC);
} else if (pRight->nodeType == TSQL_NODE_COL) { // 12 + columnRight
// column data specified on right-hand-side
char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId);
_bi_consumer_fn_t fp = getArithmeticOperatorFn(pLeft->pVal->nType, pRight->pSchema->type, pExprs->_node.optr);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
if (order == TSDB_ORDER_DESC) {
reverseCopy(pdata, pRightInputData, pRight->pSchema->type, numOfRows);
fp(&pLeft->pVal->i64Key, pdata, numOfRows, 1, pOutput, TSDB_ORDER_ASC);
OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, pdata, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC);
} else {
fp(&pLeft->pVal->i64Key, pRightInputData, 1, numOfRows, pOutput, TSDB_ORDER_ASC);
OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, pRightInputData, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC);
}
} else if (pRight->nodeType == TSQL_NODE_VALUE) { // 12 + 12
_bi_consumer_fn_t fp = getArithmeticOperatorFn(pLeft->pVal->nType, pRight->pVal->nType, pExprs->_node.optr);
fp(&pLeft->pVal->i64Key, &pRight->pVal->i64Key, 1, 1, pOutput, TSDB_ORDER_ASC);
_arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr);
OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC);
}
}
free(pLeftOutput);
free(pRightOutput);
tfree(pdata);
tfree(pLeftOutput);
tfree(pRightOutput);
}
static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) {
@ -285,7 +285,7 @@ static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) {
tbufWriteInt32(bw, pVal->nLen);
tbufWrite(bw, pVal->pz, pVal->nLen);
} else {
tbufWriteInt64(bw, pVal->i64Key);
tbufWriteInt64(bw, pVal->i64);
}
} else if (expr->nodeType == TSQL_NODE_COL) {
@ -342,7 +342,7 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
}
tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, pExpr, NULL);
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, pExpr, NULL);
pExpr->nodeType = tbufReadUint8(br);
if (pExpr->nodeType == TSQL_NODE_VALUE) {
@ -355,7 +355,7 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
pVal->pz = calloc(1, pVal->nLen + 1);
tbufReadToBuffer(br, pVal->pz, pVal->nLen);
} else {
pVal->i64Key = tbufReadInt64(br);
pVal->i64 = tbufReadInt64(br);
}
} else if (pExpr->nodeType == TSQL_NODE_COL) {
@ -396,7 +396,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
int32_t anchor = CLEANUP_GET_ANCHOR();
tExprNode* expr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, expr, NULL);
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
expr->nodeType = TSQL_NODE_EXPR;
@ -407,7 +407,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
left->pSchema = pSchema;
*pSchema = tscGetTbnameColumnSchema();
*pSchema = tGetTbnameColumnSchema();
tExprNode* right = exception_calloc(1, sizeof(tExprNode));
expr->_node.pRight = right;

View File

@ -41,6 +41,7 @@ int32_t tsStatusInterval = 1; // second
int32_t tsNumOfMnodes = 3;
int8_t tsEnableVnodeBak = 1;
int8_t tsEnableTelemetryReporting = 1;
int8_t tsArbOnline = 0;
char tsEmail[TSDB_FQDN_LEN] = {0};
// common
@ -71,7 +72,6 @@ char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
int32_t tsCompressMsgSize = -1;
// client
int32_t tsTableMetaKeepTimer = 7200; // second
int32_t tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
int8_t tsTscEnableRecordSql = 0;
@ -121,8 +121,8 @@ int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
@ -137,7 +137,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 100; // seconds 10days
int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1;
@ -212,13 +212,13 @@ int32_t mDebugFlag = 131;
int32_t sdbDebugFlag = 131;
int32_t dDebugFlag = 135;
int32_t vDebugFlag = 135;
int32_t cDebugFlag = 131;
uint32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131;
int32_t odbcDebugFlag = 131;
int32_t httpDebugFlag = 131;
int32_t mqttDebugFlag = 131;
int32_t monDebugFlag = 131;
int32_t qDebugFlag = 131;
uint32_t qDebugFlag = 131;
int32_t rpcDebugFlag = 131;
int32_t uDebugFlag = 131;
int32_t debugFlag = 0;
@ -550,7 +550,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 3;
cfg.maxValue = 7200000;
cfg.maxValue = 86400 * 365;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
taosInitConfigOption(cfg);
@ -595,16 +595,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
taosInitConfigOption(cfg);
cfg.option = "tableMetaKeepTimer";
cfg.ptr = &tsTableMetaKeepTimer;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 1;
cfg.maxValue = 8640000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
taosInitConfigOption(cfg);
cfg.option = "minSlidingTime";
cfg.ptr = &tsMinSlidingTime;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -768,7 +758,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "comp";
cfg.ptr = &tsCompression;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_COMP_LEVEL;
cfg.maxValue = TSDB_MAX_COMP_LEVEL;
@ -778,7 +768,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "walLevel";
cfg.ptr = &tsWAL;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_WAL_LEVEL;
cfg.maxValue = TSDB_MAX_WAL_LEVEL;
@ -810,8 +800,8 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsQuorum;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_REPLICA_OPTION;
cfg.maxValue = TSDB_MAX_DB_REPLICA_OPTION;
cfg.minValue = TSDB_MIN_DB_QUORUM_OPTION;
cfg.maxValue = TSDB_MAX_DB_QUORUM_OPTION;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);

View File

@ -3,27 +3,12 @@
#include "tname.h"
#include "tstoken.h"
#include "ttokendef.h"
#include "tvariant.h"
// todo refactor
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delim) {
};
}
return input;
}
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
UNUSED_FUNC static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
size_t len = 0;
while (*src != delimiter && *src != 0) {
*dst++ = *src++;
len++;
}
return len;
}
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
void extractTableName(const char* tableId, char* name) {
size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]);
@ -54,6 +39,14 @@ SSchema tGetTableNameColumnSchema() {
tstrncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
return s;
}
SSchema tGetBlockDistColumnSchema() {
SSchema s = {0};
s.bytes = TSDB_MAX_BINARY_LEN;;
s.type = TSDB_DATA_TYPE_BINARY;
s.colId = TSDB_BLOCK_DIST_COLUMN_INDEX;
tstrncpy(s.name, TSQL_BLOCK_DIST_L, TSDB_COL_NAME_LEN);
return s;
}
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name) {
SSchema s = {0};
@ -62,7 +55,7 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) {
s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE);
} else {
s.bytes = tDataTypeDesc[pVal->nType].nSize;
s.bytes = tDataTypes[pVal->nType].bytes;
}
s.colId = TSDB_UD_COLUMN_INDEX;
@ -81,7 +74,7 @@ bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
}
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
if (numOfFilters == 0) {
assert(src == NULL);
return NULL;
@ -196,7 +189,7 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
}
}
SSchema tscGetTbnameColumnSchema() {
SSchema tGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
@ -206,3 +199,246 @@ SSchema tscGetTbnameColumnSchema() {
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
// 1. valid types
if (!isValidDataType(pSchema[i].type)) {
return false;
}
// 2. valid length for each type
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY) {
if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) {
return false;
}
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
if (pSchema[i].bytes > TSDB_MAX_NCHAR_LEN) {
return false;
}
} else {
if (pSchema[i].bytes != tDataTypes[pSchema[i].type].bytes) {
return false;
}
}
// 3. valid column names
for (int32_t j = i + 1; j < numOfCols; ++j) {
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
return false;
}
}
rowLen += pSchema[i].bytes;
}
return rowLen <= maxLen;
}
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
if (!VALIDNUMOFTAGS(numOfTags)) {
return false;
}
/* first column must be the timestamp, which is a primary key */
if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) {
return false;
}
if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) {
return false;
}
if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) {
return false;
}
return true;
}
int32_t tNameExtractFullName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
// invalid full name format, abort
if (!tIsValidName(name)) {
return -1;
}
int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
size_t tnameLen = strlen(name->tname);
if (tnameLen > 0) {
assert(name->type == TSDB_TABLE_NAME_T);
dst[len] = TS_PATH_DELIMITER[0];
memcpy(dst + len + 1, name->tname, tnameLen);
dst[len + tnameLen + 1] = 0;
}
return 0;
}
int32_t tNameLen(const SName* name) {
assert(name != NULL);
int32_t len = (int32_t) strlen(name->acctId);
int32_t len1 = (int32_t) strlen(name->dbname);
int32_t len2 = (int32_t) strlen(name->tname);
if (name->type == TSDB_DB_NAME_T) {
assert(len2 == 0);
return len + len1 + TS_PATH_DELIMITER_LEN;
} else {
assert(len2 > 0);
return len + len1 + len2 + TS_PATH_DELIMITER_LEN * 2;
}
}
bool tIsValidName(const SName* name) {
assert(name != NULL);
if (!VALID_NAME_TYPE(name->type)) {
return false;
}
if (strlen(name->acctId) <= 0) {
return false;
}
if (name->type == TSDB_DB_NAME_T) {
return strlen(name->dbname) > 0;
} else {
return strlen(name->dbname) > 0 && strlen(name->tname) > 0;
}
}
SName* tNameDup(const SName* name) {
assert(name != NULL);
SName* p = calloc(1, sizeof(SName));
memcpy(p, name, sizeof(SName));
return p;
}
int32_t tNameGetDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
strncpy(dst, name->dbname, tListLen(name->dbname));
return 0;
}
int32_t tNameGetFullDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN,
"%s.%s", name->acctId, name->dbname);
return 0;
}
bool tNameIsEmpty(const SName* name) {
assert(name != NULL);
return name->type == 0 || strlen(name->acctId) <= 0;
}
const char* tNameGetTableName(const SName* name) {
assert(name != NULL && name->type == TSDB_TABLE_NAME_T);
return &name->tname[0];
}
void tNameAssign(SName* dst, const SName* src) {
memcpy(dst, src, sizeof(SName));
}
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) {
assert(dst != NULL && dbToken != NULL && acct != NULL);
// too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
return -1;
}
dst->type = TSDB_DB_NAME_T;
tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1);
return 0;
}
int32_t tNameSetAcctId(SName* dst, const char* acct) {
assert(dst != NULL && acct != NULL);
// too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId)) {
return -1;
}
tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
assert(strlen(dst->acctId) > 0);
return 0;
}
int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
assert(dst != NULL && str != NULL && strlen(str) > 0);
char* p = NULL;
if ((type & T_NAME_ACCT) == T_NAME_ACCT) {
p = strstr(str, TS_PATH_DELIMITER);
if (p == NULL) {
return -1;
}
int32_t len = (int32_t)(p - str);
// too long account id or too long db name
if ((len >= tListLen(dst->acctId)) || (len <= 0)) {
return -1;
}
memcpy (dst->acctId, str, len);
dst->acctId[len] = 0;
assert(strlen(dst->acctId) > 0);
}
if ((type & T_NAME_DB) == T_NAME_DB) {
dst->type = TSDB_DB_NAME_T;
char* start = (char*)((p == NULL)? str:(p+1));
int32_t len = 0;
p = strstr(start, TS_PATH_DELIMITER);
if (p == NULL) {
len = (int32_t) strlen(start);
} else {
len = (int32_t) (p - start);
}
// too long account id or too long db name
if ((len >= tListLen(dst->dbname)) || (len <= 0)) {
return -1;
}
memcpy (dst->dbname, start, len);
dst->dbname[len] = 0;
}
if ((type & T_NAME_TABLE) == T_NAME_TABLE) {
dst->type = TSDB_TABLE_NAME_T;
char* start = (char*) ((p == NULL)? str: (p+1));
int32_t len = (int32_t) strlen(start);
// too long account id or too long db name
if ((len >= tListLen(dst->tname)) || (len <= 0)) {
return -1;
}
memcpy (dst->tname, start, len);
dst->tname[len] = 0;
}
return 0;
}

View File

@ -14,11 +14,11 @@
*/
#include "os.h"
#include "taosdef.h"
#include "ttype.h"
#include "ttokendef.h"
#include "tscompression.h"
const int32_t TYPE_BYTES[11] = {
const int32_t TYPE_BYTES[15] = {
-1, // TSDB_DATA_TYPE_NULL
sizeof(int8_t), // TSDB_DATA_TYPE_BOOL
sizeof(int8_t), // TSDB_DATA_TYPE_TINYINT
@ -29,10 +29,28 @@ const int32_t TYPE_BYTES[11] = {
sizeof(double), // TSDB_DATA_TYPE_DOUBLE
sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_BINARY
sizeof(TSKEY), // TSDB_DATA_TYPE_TIMESTAMP
sizeof(VarDataOffsetT) // TSDB_DATA_TYPE_NCHAR
sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_NCHAR
sizeof(uint8_t), // TSDB_DATA_TYPE_UTINYINT
sizeof(uint16_t), // TSDB_DATA_TYPE_USMALLINT
sizeof(uint32_t), // TSDB_DATA_TYPE_UINT
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
};
static void getStatics_bool(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
do { \
(__sum) += (_list)[(_index)]; \
if ((__min) > (_list)[(_index)]) { \
(__min) = (_list)[(_index)]; \
(__minIndex) = (_index); \
} \
\
if ((__max) < (_list)[(_index)]) { \
(__max) = (_list)[(_index)]; \
(__maxIndex) = (_index); \
} \
} while (0)
static void getStatics_bool(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int8_t *data = (int8_t *)pData;
*min = INT64_MAX;
@ -43,26 +61,17 @@ static void getStatics_bool(const TSKEY *primaryKey, const void *pData, int32_t
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((char *)&data[i], TSDB_DATA_TYPE_BOOL)) {
if (data[i] == TSDB_DATA_BOOL_NULL) {
(*numOfNull) += 1;
continue;
}
*sum += data[i];
if (*min > data[i]) {
*min = data[i];
*minIndex = i;
}
if (*max < data[i]) {
*max = data[i];
*maxIndex = i;
}
DO_STATICS(*sum, *min, *max, *minIndex, *maxIndex, data, i);
}
}
static void getStatics_i8(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
static void getStatics_i8(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int8_t *data = (int8_t *)pData;
*min = INT64_MAX;
*max = INT64_MIN;
@ -72,26 +81,43 @@ static void getStatics_i8(const TSKEY *primaryKey, const void *pData, int32_t nu
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((char *)&data[i], TSDB_DATA_TYPE_TINYINT)) {
if (((uint8_t)data[i]) == TSDB_DATA_TINYINT_NULL) {
(*numOfNull) += 1;
continue;
}
*sum += data[i];
if (*min > data[i]) {
*min = data[i];
*minIndex = i;
}
if (*max < data[i]) {
*max = data[i];
*maxIndex = i;
}
DO_STATICS(*sum, *min, *max, *minIndex, *maxIndex, data, i);
}
}
static void getStatics_i16(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
static void getStatics_u8(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
uint8_t *data = (uint8_t *)pData;
uint64_t _min = UINT64_MAX;
uint64_t _max = 0;
uint64_t _sum = 0;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (((uint8_t)data[i]) == TSDB_DATA_UTINYINT_NULL) {
(*numOfNull) += 1;
continue;
}
DO_STATICS(_sum, _min, _max, *minIndex, *maxIndex, data, i);
}
*min = _min;
*max = _max;
*sum = _sum;
}
static void getStatics_i16(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int16_t *data = (int16_t *)pData;
*min = INT64_MAX;
*max = INT64_MIN;
@ -100,39 +126,45 @@ static void getStatics_i16(const TSKEY *primaryKey, const void *pData, int32_t n
ASSERT(numOfRow <= INT16_MAX);
// int64_t lastKey = 0;
// int16_t lastVal = TSDB_DATA_SMALLINT_NULL;
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_SMALLINT)) {
if (((uint16_t)data[i]) == TSDB_DATA_SMALLINT_NULL) {
(*numOfNull) += 1;
continue;
}
*sum += data[i];
if (*min > data[i]) {
*min = data[i];
*minIndex = i;
}
if (*max < data[i]) {
*max = data[i];
*maxIndex = i;
}
// if (isNull(&lastVal, TSDB_DATA_TYPE_SMALLINT)) {
// lastKey = primaryKey[i];
// lastVal = data[i];
// } else {
// *wsum = lastVal * (primaryKey[i] - lastKey);
// lastKey = primaryKey[i];
// lastVal = data[i];
// }
DO_STATICS(*sum, *min, *max, *minIndex, *maxIndex, data, i);
}
}
static void getStatics_i32(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
static void getStatics_u16(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
uint16_t *data = (uint16_t *)pData;
uint64_t _min = UINT64_MAX;
uint64_t _max = 0;
uint64_t _sum = 0;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (((uint16_t)data[i]) == TSDB_DATA_USMALLINT_NULL) {
(*numOfNull) += 1;
continue;
}
DO_STATICS(_sum, _min, _max, *minIndex, *maxIndex, data, i);
}
*min = _min;
*max = _max;
*sum = _sum;
}
static void getStatics_i32(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int32_t *data = (int32_t *)pData;
*min = INT64_MAX;
*max = INT64_MIN;
@ -141,29 +173,43 @@ static void getStatics_i32(const TSKEY *primaryKey, const void *pData, int32_t n
ASSERT(numOfRow <= INT16_MAX);
// int64_t lastKey = 0;
// int32_t lastVal = TSDB_DATA_INT_NULL;
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_INT)) {
if (((uint32_t)data[i]) == TSDB_DATA_INT_NULL) {
(*numOfNull) += 1;
continue;
}
*sum += data[i];
if (*min > data[i]) {
*min = data[i];
*minIndex = i;
}
if (*max < data[i]) {
*max = data[i];
*maxIndex = i;
}
DO_STATICS(*sum, *min, *max, *minIndex, *maxIndex, data, i);
}
}
static void getStatics_i64(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
static void getStatics_u32(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
uint32_t *data = (uint32_t *)pData;
uint64_t _min = UINT64_MAX;
uint64_t _max = 0;
uint64_t _sum = 0;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (((uint32_t)data[i]) == TSDB_DATA_UINT_NULL) {
(*numOfNull) += 1;
continue;
}
DO_STATICS(_sum, _min, _max, *minIndex, *maxIndex, data, i);
}
*min = _min;
*max = _max;
*sum = _sum;
}
static void getStatics_i64(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int64_t *data = (int64_t *)pData;
*min = INT64_MAX;
@ -174,52 +220,60 @@ static void getStatics_i64(const TSKEY *primaryKey, const void *pData, int32_t n
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_BIGINT)) {
if (((uint64_t)data[i]) == TSDB_DATA_BIGINT_NULL) {
(*numOfNull) += 1;
continue;
}
*sum += data[i];
if (*min > data[i]) {
*min = data[i];
*minIndex = i;
}
if (*max < data[i]) {
*max = data[i];
*maxIndex = i;
}
// if (isNull(&lastVal, TSDB_DATA_TYPE_BIGINT)) {
// lastKey = primaryKey[i];
// lastVal = data[i];
// } else {
// *wsum = lastVal * (primaryKey[i] - lastKey);
// lastKey = primaryKey[i];
// lastVal = data[i];
// }
DO_STATICS(*sum, *min, *max, *minIndex, *maxIndex, data, i);
}
}
static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
static void getStatics_u64(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
uint64_t *data = (uint64_t *)pData;
uint64_t _min = UINT64_MAX;
uint64_t _max = 0;
uint64_t _sum = 0;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (((uint64_t)data[i]) == TSDB_DATA_UBIGINT_NULL) {
(*numOfNull) += 1;
continue;
}
DO_STATICS(_sum, _min, _max, *minIndex, *maxIndex, data, i);
}
*min = _min;
*max = _max;
*sum = _sum;
}
static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
float *data = (float *)pData;
float fmin = FLT_MAX;
float fmax = -FLT_MAX;
double dsum = 0;
*minIndex = 0;
*maxIndex = 0;
float *data = (float *)pData;
float fmin = FLT_MAX;
float fmax = -FLT_MAX;
double dsum = 0;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_FLOAT)) {
if ((*(uint32_t*)&(data[i])) == TSDB_DATA_FLOAT_NULL) {
(*numOfNull) += 1;
continue;
}
float fv = 0;
fv = GET_FLOAT_VAL((const char*)&(data[i]));
float fv = GET_FLOAT_VAL((const char*)&(data[i]));
dsum += fv;
if (fmin > fv) {
fmin = fv;
@ -232,28 +286,24 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num
}
}
double csum = 0;
csum = GET_DOUBLE_VAL((const char *)sum);
csum += dsum;
SET_DOUBLE_VAL(sum, csum);
SET_DOUBLE_VAL(sum, dsum);
SET_DOUBLE_VAL(max, fmax);
SET_DOUBLE_VAL(min, fmin);
}
static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
static void getStatics_d(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
double *data = (double *)pData;
double dmin = DBL_MAX;
double dmax = -DBL_MAX;
double dsum = 0;
*minIndex = 0;
*maxIndex = 0;
double dmin = DBL_MAX;
double dmax = -DBL_MAX;
double dsum = 0;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_DOUBLE)) {
if ((*(uint64_t*)&(data[i])) == TSDB_DATA_DOUBLE_NULL) {
(*numOfNull) += 1;
continue;
}
@ -272,16 +322,12 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num
}
}
double csum = 0;
csum = GET_DOUBLE_VAL((const char *)sum);
csum += dsum;
SET_DOUBLE_PTR(sum, &csum);
SET_DOUBLE_PTR(sum, &dsum);
SET_DOUBLE_PTR(max, &dmax);
SET_DOUBLE_PTR(min, &dmin);
}
static void getStatics_bin(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
static void getStatics_bin(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
const char* data = pData;
ASSERT(numOfRow <= INT16_MAX);
@ -301,7 +347,7 @@ static void getStatics_bin(const TSKEY *primaryKey, const void *pData, int32_t n
*maxIndex = 0;
}
static void getStatics_nchr(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
const char* data = pData;
ASSERT(numOfRow <= INT16_MAX);
@ -321,18 +367,22 @@ static void getStatics_nchr(const TSKEY *primaryKey, const void *pData, int32_t
*maxIndex = 0;
}
tDataTypeDescriptor tDataTypeDesc[11] = {
{TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", NULL, NULL, NULL},
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool},
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
{TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt, getStatics_i32},
{TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", tsCompressBigint, tsDecompressBigint, getStatics_i64},
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat, getStatics_f},
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble, getStatics_d},
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, getStatics_bin},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, getStatics_nchr},
tDataTypeDescriptor tDataTypes[15] = {
{TSDB_DATA_TYPE_NULL, 6,1, "NOTYPE", NULL, NULL, NULL},
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool},
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
{TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt, getStatics_i32},
{TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", tsCompressBigint, tsDecompressBigint, getStatics_i64},
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat, getStatics_f},
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble, getStatics_d},
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, getStatics_bin},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, getStatics_nchr},
{TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", tsCompressTinyint, tsDecompressTinyint, getStatics_u8},
{TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", tsCompressSmallint, tsDecompressSmallint, getStatics_u16},
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", tsCompressInt, tsDecompressInt, getStatics_u32},
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", tsCompressBigint, tsDecompressBigint, getStatics_u64},
};
char tTokenTypeSwitcher[13] = {
@ -352,7 +402,7 @@ char tTokenTypeSwitcher[13] = {
};
bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR;
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT;
}
void setVardataNull(char* val, int32_t type) {
@ -373,38 +423,58 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BOOL_NULL;
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL;
}
break;
case TSDB_DATA_TYPE_TINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_TINYINT_NULL;
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_SMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint16_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_SMALLINT_NULL;
*(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_INT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_INT_NULL;
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL;
}
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BIGINT_NULL;
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_UTINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_USMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_UINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL;
}
break;
case TSDB_DATA_TYPE_UBIGINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_FLOAT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_FLOAT_NULL;
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL;
}
break;
case TSDB_DATA_TYPE_DOUBLE:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL;
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR:
@ -415,20 +485,24 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
break;
default: {
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize) = TSDB_DATA_INT_NULL;
*(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL;
}
break;
}
}
}
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
static uint32_t nullInt = TSDB_DATA_INT_NULL;
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
static uint32_t nullInt = TSDB_DATA_INT_NULL;
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
static union {
tstr str;
@ -436,38 +510,43 @@ static union {
} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
static void *nullValues[] = {
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
&nullTinyIntu, &nullSmallIntu, &nullIntu, &nullBigIntu,
};
void *getNullValue(int32_t type) {
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_NCHAR);
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_UBIGINT);
return nullValues[type - 1];
}
void assignVal(char *val, const char *src, int32_t len, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
*((int8_t *)val) = GET_INT8_VAL(src);
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
*((int16_t *)val) = GET_INT16_VAL(src);
break;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
*((int32_t *)val) = GET_INT32_VAL(src);
break;
}
case TSDB_DATA_TYPE_FLOAT:
SET_FLOAT_VAL(val, GET_FLOAT_VAL(src));
break;
case TSDB_DATA_TYPE_DOUBLE:
SET_DOUBLE_VAL(val, GET_DOUBLE_VAL(src));
break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
*((int64_t *)val) = GET_INT64_VAL(src);
break;
case TSDB_DATA_TYPE_SMALLINT:
*((int16_t *)val) = GET_INT16_VAL(src);
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
*((int8_t *)val) = GET_INT8_VAL(src);
break;
case TSDB_DATA_TYPE_BINARY:
varDataCopy(val, src);
break;
@ -483,12 +562,14 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT: {
SWAP(*(int32_t *)(pLeft), *(int32_t *)(pRight), int32_t);
break;
}
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: {
SWAP(*(int64_t *)(pLeft), *(int64_t *)(pRight), int64_t);
break;
@ -497,7 +578,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
SWAP(*(double *)(pLeft), *(double *)(pRight), double);
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT: {
SWAP(*(int16_t *)(pLeft), *(int16_t *)(pRight), int16_t);
break;
}
@ -508,7 +590,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
}
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT: {
SWAP(*(int8_t *)(pLeft), *(int8_t *)(pRight), int8_t);
break;
}
@ -521,3 +604,41 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
}
}
}
int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bool issigned) {
errno = 0;
int32_t ret = 0;
char* endPtr = NULL;
if (type == TK_FLOAT) {
double v = strtod(z, &endPtr);
if ((errno == ERANGE && v == HUGE_VALF) || isinf(v) || isnan(v)) {
ret = -1;
} else if ((issigned && (v < INT64_MIN || v > INT64_MAX)) || ((!issigned) && (v < 0 || v > UINT64_MAX))) {
ret = -1;
} else {
*value = (int64_t) round(v);
}
errno = 0;
return ret;
}
int32_t radix = 10;
if (type == TK_HEX) {
radix = 16;
} else if (type == TK_BIN) {
radix = 2;
}
// the string may be overflow according to errno
*value = issigned? strtoll(z, &endPtr, radix):strtoul(z, &endPtr, radix);
// not a valid integer number, return error
if (endPtr - z != n || errno == ERANGE) {
ret = -1;
}
errno = 0;
return ret;
}

View File

@ -21,36 +21,48 @@
#include "tstoken.h"
#include "ttokendef.h"
#include "tutil.h"
#include "ttype.h"
// todo support scientific expression number and oct number
void tVariantCreate(tVariant *pVar, SStrToken *token) { tVariantCreateFromString(pVar, token->z, token->n, token->type); }
void tVariantCreate(tVariant *pVar, SStrToken *token) {
int32_t ret = 0;
int32_t type = token->type;
void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type) {
memset(pVar, 0, sizeof(tVariant));
switch (type) {
switch (token->type) {
case TSDB_DATA_TYPE_BOOL: {
int32_t k = strncasecmp(pz, "true", 4);
int32_t k = strncasecmp(token->z, "true", 4);
if (k == 0) {
pVar->i64Key = TSDB_TRUE;
pVar->i64 = TSDB_TRUE;
} else {
assert(strncasecmp(pz, "false", 5) == 0);
pVar->i64Key = TSDB_FALSE;
assert(strncasecmp(token->z, "false", 5) == 0);
pVar->i64 = TSDB_FALSE;
}
break;
}
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_INT:
pVar->i64Key = strtoll(pz, NULL, 10);
case TSDB_DATA_TYPE_INT:{
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
if (ret != 0) {
pVar->nType = -1; // -1 means error type
return;
}
break;
}
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_FLOAT:
pVar->dKey = strtod(pz, NULL);
case TSDB_DATA_TYPE_FLOAT: {
pVar->dKey = strtod(token->z, NULL);
break;
}
case TSDB_DATA_TYPE_BINARY: {
pVar->pz = strndup(pz, len);
pVar->pz = strndup(token->z, token->n);
pVar->nLen = strdequote(pVar->pz);
break;
}
@ -74,20 +86,36 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
pVar->i64Key = GET_INT8_VAL(pz);
pVar->i64 = GET_INT8_VAL(pz);
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
pVar->u64 = GET_UINT8_VAL(pz);
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
pVar->i64Key = GET_INT16_VAL(pz);
pVar->i64 = GET_INT16_VAL(pz);
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
pVar->u64 = GET_UINT16_VAL(pz);
break;
}
case TSDB_DATA_TYPE_INT: {
pVar->i64Key = GET_INT32_VAL(pz);
pVar->i64 = GET_INT32_VAL(pz);
break;
}
case TSDB_DATA_TYPE_UINT: {
pVar->u64 = GET_UINT32_VAL(pz);
break;
}
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: {
pVar->i64Key = GET_INT64_VAL(pz);
pVar->i64 = GET_INT64_VAL(pz);
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
pVar->u64 = GET_UINT64_VAL(pz);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
@ -115,7 +143,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
}
default:
pVar->i64Key = GET_INT32_VAL(pVar);
pVar->i64 = GET_INT32_VAL(pz);
}
pVar->nType = type;
@ -141,6 +169,11 @@ void tVariantDestroy(tVariant *pVar) {
}
}
bool tVariantIsValid(tVariant *pVar) {
assert(pVar != NULL);
return isValidDataType(pVar->nType);
}
void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
@ -159,8 +192,8 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
}
if (pSrc->nType >= TSDB_DATA_TYPE_BOOL && pSrc->nType <= TSDB_DATA_TYPE_DOUBLE) {
pDst->i64Key = pSrc->i64Key;
if (IS_NUMERIC_TYPE(pSrc->nType) || (pSrc->nType == TSDB_DATA_TYPE_BOOL)) {
pDst->i64 = pSrc->i64;
} else if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { // this is only for string array
size_t num = taosArrayGetSize(pSrc->arr);
pDst->arr = taosArrayInit(num, sizeof(char*));
@ -172,7 +205,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
}
if (pDst->nType != TSDB_DATA_TYPE_ARRAY) {
pDst->nLen = tDataTypeDesc[pDst->nType].nSize;
pDst->nLen = tDataTypes[pDst->nType].bytes;
}
}
@ -189,30 +222,30 @@ int32_t tVariantCompare(const tVariant* p1, const tVariant* p2) {
return 1;
}
switch (p1->nType) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
if (p1->nLen == p2->nLen) {
return memcmp(p1->pz, p2->pz, p1->nLen);
} else {
return p1->nLen > p2->nLen? 1:-1;
}
};
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
if (p1->dKey == p2->dKey) {
return 0;
} else {
return p1->dKey > p2->dKey? 1:-1;
}
default:
if (p1->i64Key == p2->i64Key) {
return 0;
} else {
return p1->i64Key > p2->i64Key? 1:-1;
}
if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_NCHAR) {
if (p1->nLen == p2->nLen) {
return memcmp(p1->pz, p2->pz, p1->nLen);
} else {
return p1->nLen > p2->nLen? 1:-1;
}
} else if (p1->nType == TSDB_DATA_TYPE_FLOAT || p1->nType == TSDB_DATA_TYPE_DOUBLE) {
if (p1->dKey == p2->dKey) {
return 0;
} else {
return p1->dKey > p2->dKey? 1:-1;
}
} else if (IS_UNSIGNED_NUMERIC_TYPE(p1->nType)) {
if (p1->u64 == p2->u64) {
return 0;
} else {
return p1->u64 > p2->u64? 1:-1;
}
} else {
if (p1->i64 == p2->i64) {
return 0;
} else {
return p1->i64 > p2->i64? 1:-1;
}
}
}
@ -239,11 +272,15 @@ int32_t tVariantToString(tVariant *pVar, char *dst) {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
return sprintf(dst, "%d", (int32_t)pVar->i64Key);
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_UINT:
return sprintf(dst, "%d", (int32_t)pVar->i64);
case TSDB_DATA_TYPE_BIGINT:
return sprintf(dst, "%" PRId64, pVar->i64Key);
return sprintf(dst, "%" PRId64, pVar->i64);
case TSDB_DATA_TYPE_UBIGINT:
return sprintf(dst, "%" PRIu64, pVar->u64);
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
return sprintf(dst, "%.9lf", pVar->dKey);
@ -253,121 +290,6 @@ int32_t tVariantToString(tVariant *pVar, char *dst) {
}
}
#if 0
static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, bool releaseVariantPtr) {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
setNull(pDest, type, tDataTypeDesc[type].nSize);
return 0;
}
if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
*((int64_t *)pDest) = pVariant->i64Key;
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
if ((pVariant->dKey < INT64_MIN) || (pVariant->dKey > INT64_MAX)) {
return -1;
}
*((int64_t *)pDest) = (int64_t)pVariant->dKey;
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
errno = 0;
char *endPtr = NULL;
SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
}
if (token.type == TK_FLOAT) {
double v = strtod(pVariant->pz, &endPtr);
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) {
return -1;
}
if ((v < INT64_MIN) || (v > INT64_MAX)) {
return -1;
}
*((int64_t *)pDest) = (int64_t)v;
} else if (token.type == TK_INTEGER) {
int64_t val = strtoll(pVariant->pz, &endPtr, 10);
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
if (errno == ERANGE) {
return -1; // data overflow
}
*((int64_t *)pDest) = val;
} else if (token.type == TK_NULL) {
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
setNull(pDest, type, tDataTypeDesc[type].nSize);
} else {
return -1;
}
} else if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) {
errno = 0;
wchar_t *endPtr = NULL;
SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
}
if (token.type == TK_FLOAT) {
double v = wcstod(pVariant->wpz, &endPtr);
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) {
return -1;
}
if ((v < INT64_MIN) || (v > INT64_MAX)) {
return -1;
}
*((int64_t *)pDest) = (int64_t)v;
} else if (token.type == TK_NULL) {
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
setNull(pDest, type, tDataTypeDesc[type].nSize);
} else {
int64_t val = wcstoll(pVariant->wpz, &endPtr, 10);
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
if (errno == ERANGE) {
return -1; // data overflow
}
*((int64_t *)pDest) = val;
}
}
return 0;
}
#endif
static FORCE_INLINE int32_t convertToBoolImpl(char *pStr, int32_t len) {
if ((strncasecmp(pStr, "true", len) == 0) && (len == 4)) {
return TSDB_TRUE;
@ -385,15 +307,18 @@ static FORCE_INLINE int32_t wcsconvertToBoolImpl(wchar_t *pstr, int32_t len) {
return TSDB_TRUE;
} else if (wcsncasecmp(pstr, L"false", len) == 0 && (len == 5)) {
return TSDB_FALSE;
} else if (memcmp(pstr, L"null", wcslen(L"null")) == 0) {
return TSDB_DATA_BOOL_NULL;
} else {
return -1;
}
}
static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
const int32_t INITIAL_ALLOC_SIZE = 20;
const int32_t INITIAL_ALLOC_SIZE = 40;
char * pBuf = NULL;
// it is a in-place convert type for tVariant, local buffer is needed
if (*pDest == pVariant->pz) {
pBuf = calloc(1, INITIAL_ALLOC_SIZE);
}
@ -413,12 +338,12 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
}
} else {
if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
sprintf(pBuf == NULL ? *pDest : pBuf, "%" PRId64, pVariant->i64Key);
if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType)) {
sprintf(pBuf == NULL ? *pDest : pBuf, "%" PRId64, pVariant->i64);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
sprintf(pBuf == NULL ? *pDest : pBuf, "%lf", pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) {
sprintf(pBuf == NULL ? *pDest : pBuf, "%s", (pVariant->i64Key == TSDB_TRUE) ? "TRUE" : "FALSE");
sprintf(pBuf == NULL ? *pDest : pBuf, "%s", (pVariant->i64 == TSDB_TRUE) ? "TRUE" : "FALSE");
} else if (pVariant->nType == 0) { // null data
setNull(pBuf == NULL ? *pDest : pBuf, TSDB_DATA_TYPE_BINARY, 0);
}
@ -432,28 +357,33 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
return 0;
}
// todo handle the error
static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
char tmpBuf[40] = {0};
char * pDst = tmpBuf;
int32_t nLen = 0;
if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
nLen = sprintf(pDst, "%" PRId64, pVariant->i64Key);
// convert the number to string, than convert it to wchar string.
if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType)) {
nLen = sprintf(pDst, "%" PRId64, pVariant->i64);
} else if (IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
nLen = sprintf(pDst, "%"PRIu64, pVariant->u64);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
nLen = sprintf(pDst, "%lf", pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
pDst = pVariant->pz;
nLen = pVariant->nLen;
} else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) {
nLen = sprintf(pDst, "%s", (pVariant->i64Key == TSDB_TRUE) ? "TRUE" : "FALSE");
nLen = sprintf(pDst, "%s", (pVariant->i64 == TSDB_TRUE) ? "TRUE" : "FALSE");
}
if (*pDest == pVariant->pz) {
wchar_t *pWStr = calloc(1, (nLen + 1) * TSDB_NCHAR_SIZE);
taosMbsToUcs4(pDst, nLen, (char *)pWStr, (nLen + 1) * TSDB_NCHAR_SIZE, NULL);
bool ret = taosMbsToUcs4(pDst, nLen, (char *)pWStr, (nLen + 1) * TSDB_NCHAR_SIZE, NULL);
if (!ret) {
return -1;
}
// free the binary buffer in the first place
if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
free(pVariant->wpz);
@ -468,11 +398,15 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
pVariant->wpz = (wchar_t *)tmp;
} else {
size_t output = -1;
taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, &output);
int32_t output = 0;
bool ret = taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, &output);
if (!ret) {
return -1;
}
if (pDestSize != NULL) {
*pDestSize = (int32_t)output;
*pDestSize = output;
}
}
@ -481,80 +415,59 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *value) {
SStrToken stoken = {.z = pStr, .n = len};
if (TK_ILLEGAL == isValidNumber(&stoken)) {
if (TK_ILLEGAL == tGetNumericStringType(&stoken)) {
return -1;
}
*value = strtod(pStr, NULL);
return 0;
}
static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, int64_t lowBnd,
int64_t upperBnd, bool releaseVariantPtr) {
static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, bool issigned, bool releaseVariantPtr) {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
setNull((char *)result, type, tDataTypeDesc[type].nSize);
setNull((char *)result, type, tDataTypes[type].bytes);
return 0;
}
if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
*result = pVariant->i64Key;
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
*result = (int64_t)pVariant->dKey;
errno = 0;
if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType) || (pVariant->nType == TSDB_DATA_TYPE_BOOL)) {
*result = pVariant->i64;
} else if (IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
*result = pVariant->u64;
} else if (IS_FLOAT_TYPE(pVariant->nType)) {
*result = (int64_t) pVariant->dKey;
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
errno = 0;
char *endPtr = NULL;
SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
}
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
/*int32_t n = */tSQLGetToken(pVariant->pz, &token.type);
if (token.type == TK_NULL) {
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
setNull((char *)result, type, tDataTypeDesc[type].nSize);
setNull((char *)result, type, tDataTypes[type].bytes);
return 0;
}
SStrToken sToken = {.z = pVariant->pz, .n = pVariant->nLen};
if (TK_ILLEGAL == isValidNumber(&sToken)) {
// decide if it is a valid number
token.type = tGetNumericStringType(&token);
if (token.type == TK_ILLEGAL) {
return -1;
}
if (token.type == TK_FLOAT) {
double v = strtod(pVariant->pz, &endPtr);
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) {
return -1;
}
*result = (int64_t)v;
} else if (token.type == TK_INTEGER) {
int64_t val = strtoll(pVariant->pz, &endPtr, 10);
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
if (errno == ERANGE) {
return -1; // data overflow
}
*result = val;
} else {
int64_t res = 0;
int32_t t = tStrToInteger(token.z, token.type, token.n, &res, issigned);
if (t != 0) {
return -1;
}
if (releaseVariantPtr) {
free(pVariant->pz);
pVariant->nLen = 0;
}
*result = res;
} else if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) {
errno = 0;
wchar_t *endPtr = NULL;
@ -583,7 +496,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
free(pVariant->pz);
pVariant->nLen = 0;
}
setNull((char *)result, type, tDataTypeDesc[type].nSize);
setNull((char *)result, type, tDataTypes[type].bytes);
return 0;
} else {
int64_t val = wcstoll(pVariant->wpz, &endPtr, 10);
@ -599,19 +512,35 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
*result = val;
}
}
if ((*result <= lowBnd) || (*result > upperBnd)) {
return -1;
bool code = false;
switch(type) {
case TSDB_DATA_TYPE_TINYINT:
code = IS_VALID_TINYINT(*result); break;
case TSDB_DATA_TYPE_SMALLINT:
code = IS_VALID_SMALLINT(*result); break;
case TSDB_DATA_TYPE_INT:
code = IS_VALID_INT(*result); break;
case TSDB_DATA_TYPE_BIGINT:
code = IS_VALID_BIGINT(*result); break;
case TSDB_DATA_TYPE_UTINYINT:
code = IS_VALID_UTINYINT(*result); break;
case TSDB_DATA_TYPE_USMALLINT:
code = IS_VALID_USMALLINT(*result); break;
case TSDB_DATA_TYPE_UINT:
code = IS_VALID_UINT(*result); break;
case TSDB_DATA_TYPE_UBIGINT:
code = IS_VALID_UBIGINT(*result); break;
}
return 0;
return code? 0:-1;
}
static int32_t convertToBool(tVariant *pVariant, int64_t *pDest) {
if (pVariant->nType == TSDB_DATA_TYPE_BOOL) {
*pDest = pVariant->i64Key; // in order to be compatible to null of bool
} else if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
*pDest = ((pVariant->i64Key != 0) ? TSDB_TRUE : TSDB_FALSE);
*pDest = pVariant->i64; // in order to be compatible to null of bool
} else if (IS_NUMERIC_TYPE(pVariant->nType)) {
*pDest = ((pVariant->i64 != 0) ? TSDB_TRUE : TSDB_FALSE);
} else if (pVariant->nType == TSDB_DATA_TYPE_FLOAT || pVariant->nType == TSDB_DATA_TYPE_DOUBLE) {
*pDest = ((pVariant->dKey != 0) ? TSDB_TRUE : TSDB_FALSE);
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
@ -638,8 +567,6 @@ static int32_t convertToBool(tVariant *pVariant, int64_t *pDest) {
/*
* transfer data from variant serve as the implicit data conversion: from input sql string pVariant->nType
* to column type defined in schema
*
* todo handle the return value
*/
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix) {
if (pVariant == NULL || (pVariant->nType != 0 && !isValidDataType(pVariant->nType))) {
@ -647,57 +574,82 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
}
errno = 0; // reset global error code
int64_t result;
switch (type) {
case TSDB_DATA_TYPE_BOOL: {
int64_t dst = 0;
if (convertToBool(pVariant, &dst) < 0) {
if (convertToBool(pVariant, &result) < 0) {
return -1;
}
*(int8_t *)payload = (int8_t)dst;
*(int8_t *)payload = (int8_t)result;
break;
}
case TSDB_DATA_TYPE_TINYINT: {
int64_t result = 0;
if (convertToInteger(pVariant, &result, type, INT8_MIN, INT8_MAX, false) < 0) {
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
return -1;
}
*((int8_t *)payload) = (int8_t)result;
*((int8_t *)payload) = (int8_t) result;
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
return -1;
}
*((uint8_t *)payload) = (uint8_t) result;
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
int64_t result = 0;
if (convertToInteger(pVariant, &result, type, INT16_MIN, INT16_MAX, false) < 0) {
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
return -1;
}
*((int16_t *)payload) = (int16_t)result;
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
return -1;
}
*((uint16_t *)payload) = (uint16_t)result;
break;
}
case TSDB_DATA_TYPE_INT: {
int64_t result = 0;
if (convertToInteger(pVariant, &result, type, INT32_MIN, INT32_MAX, false) < 0) {
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
return -1;
}
*((int32_t *)payload) = (int32_t)result;
break;
}
case TSDB_DATA_TYPE_UINT: {
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
return -1;
}
*((uint32_t *)payload) = (uint32_t)result;
break;
}
case TSDB_DATA_TYPE_BIGINT: {
int64_t result = 0;
if (convertToInteger(pVariant, &result, type, INT64_MIN, INT64_MAX, false) < 0) {
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
return -1;
}
*((int64_t *)payload) = (int64_t)result;
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
return -1;
}
*((uint64_t *)payload) = (uint64_t)result;
break;
}
case TSDB_DATA_TYPE_FLOAT: {
if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
if (strncasecmp(TSDB_DATA_NULL_STR_L, pVariant->pz, pVariant->nLen) == 0 &&
@ -706,20 +658,19 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
return 0;
} else {
double value = -1;
int32_t ret;
ret = convertToDouble(pVariant->pz, pVariant->nLen, &value);
int32_t ret = convertToDouble(pVariant->pz, pVariant->nLen, &value);
if ((errno == ERANGE && (float)value == -1) || (ret != 0)) {
return -1;
}
SET_FLOAT_VAL(payload, value);
}
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
SET_FLOAT_VAL(payload, pVariant->i64Key);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
} else if (pVariant->nType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(pVariant->nType) || IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
SET_FLOAT_VAL(payload, pVariant->i64);
} else if (IS_FLOAT_TYPE(pVariant->nType)) {
SET_FLOAT_VAL(payload, pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*((int32_t *)payload) = TSDB_DATA_FLOAT_NULL;
*((uint32_t *)payload) = TSDB_DATA_FLOAT_NULL;
return 0;
}
@ -745,9 +696,9 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
SET_DOUBLE_VAL(payload, value);
}
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
SET_DOUBLE_VAL(payload, pVariant->i64Key);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
} else if (pVariant->nType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(pVariant->nType) || IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
SET_DOUBLE_VAL(payload, pVariant->i64);
} else if (IS_FLOAT_TYPE(pVariant->nType)) {
SET_DOUBLE_VAL(payload, pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL;
@ -755,9 +706,10 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
}
double dv = GET_DOUBLE_VAL(payload);
if (isinf(dv) || isnan(dv) || dv > DBL_MAX || dv < -DBL_MAX) {
if (errno == ERANGE || isinf(dv) || isnan(dv)) {
return -1;
}
break;
}
@ -794,7 +746,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*((int64_t *)payload) = TSDB_DATA_BIGINT_NULL;
} else {
*((int64_t *)payload) = pVariant->i64Key;
*((int64_t *)payload) = pVariant->i64;
}
break;
}
@ -805,7 +757,9 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
*(uint32_t *)payload = TSDB_DATA_NCHAR_NULL;
} else {
if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) {
toNchar(pVariant, &payload, &newlen);
if (toNchar(pVariant, &payload, &newlen) != 0) {
return -1;
}
} else {
wcsncpy((wchar_t *)payload, pVariant->wpz, pVariant->nLen);
}
@ -817,9 +771,11 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
char *p = varDataVal(payload);
if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) {
toNchar(pVariant, &p, &newlen);
if (toNchar(pVariant, &p, &newlen) != 0) {
return -1;
}
} else {
wcsncpy((wchar_t *)p, pVariant->wpz, pVariant->nLen);
memcpy(p, pVariant->wpz, pVariant->nLen);
newlen = pVariant->nLen;
}
@ -848,7 +804,7 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL: { // bool
if (convertToBool(pVariant, &pVariant->i64Key) < 0) {
if (convertToBool(pVariant, &pVariant->i64) < 0) {
return -1;
}
@ -859,7 +815,7 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT: {
convertToInteger(pVariant, &(pVariant->i64Key), type, INT64_MIN, INT64_MAX, true);
convertToInteger(pVariant, &(pVariant->i64), type, true, true);
pVariant->nType = TSDB_DATA_TYPE_BIGINT;
break;
}
@ -886,7 +842,7 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
free(pVariant->pz);
pVariant->dKey = v;
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
pVariant->dKey = (double)(pVariant->i64Key);
pVariant->dKey = (double)(pVariant->i64);
}
pVariant->nType = TSDB_DATA_TYPE_DOUBLE;
@ -901,7 +857,9 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
}
case TSDB_DATA_TYPE_NCHAR: {
if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) {
toNchar(pVariant, &pVariant->pz, &pVariant->nLen);
if (toNchar(pVariant, &pVariant->pz, &pVariant->nLen) != 0) {
return -1;
}
}
pVariant->nType = type;
break;
@ -910,5 +868,3 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
return 0;
}

View File

@ -19,136 +19,137 @@ using System.Runtime.InteropServices;
namespace TDengineDriver
{
enum TDengineDataType {
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
enum TDengineDataType
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
[DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
enum TDengineInitOption
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
[DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
[DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}
[DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}

View File

@ -1,32 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" output="target/classes" path="src/main/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry excluding="**" kind="src" output="target/classes" path="src/main/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
<attribute name="test" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="output" path="target/classes"/>
</classpath>

View File

@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>taos-jdbcdriver</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.m2e.core.maven2Builder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.eclipse.m2e.core.maven2Nature</nature>
</natures>
</projectDescription>

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
CMAKE_MINIMUM_REQUIRED(VERSION 3.5)
PROJECT(TDengine)
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.15-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.18-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.15</version>
<version>2.0.18</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.15</version>
<version>2.0.18</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -49,6 +49,7 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
@ -73,7 +74,16 @@
<version>1.2.58</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-dbcp2 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-dbcp2</artifactId>
<version>2.7.0</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
@ -116,7 +126,7 @@
<include>**/*Test.java</include>
</includes>
<excludes>
<exclude>**/BatchInsertTest.java</exclude>
<exclude>**/AppMemoryLeakTest.java</exclude>
<exclude>**/FailOverTest.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>

File diff suppressed because it is too large Load Diff

View File

@ -1,68 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class CatalogResultSet extends TSDBResultSetWrapper {
public CatalogResultSet(ResultSet resultSet) {
super.setOriginalResultSet(resultSet);
}
@Override
public String getString(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getString(columnIndex);
} else {
return null;
}
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getBoolean(columnIndex);
} else {
return false;
}
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getBytes(columnIndex);
} else {
return null;
}
}
@Override
public Object getObject(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getObject(columnIndex);
} else {
return null;
}
}
}

View File

@ -16,40 +16,40 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
public int getColSize() {
return colSize;
}
public int getColSize() {
return colSize;
}
public void setColSize(int colSize) {
this.colSize = colSize;
}
public void setColSize(int colSize) {
this.colSize = colSize;
}
public int getColType() {
return colType;
}
public int getColType() {
return colType;
}
public void setColType(int colType) {
this.colType = colType;
}
public void setColType(int colType) {
this.colType = colType;
}
public String getColName() {
return colName;
}
public String getColName() {
return colName;
}
public void setColName(String colName) {
this.colName = colName;
}
public void setColName(String colName) {
this.colName = colName;
}
public int getColIndex() {
return colIndex;
}
public int getColIndex() {
return colIndex;
}
public void setColIndex(int colIndex) {
this.colIndex = colIndex;
}
public void setColIndex(int colIndex) {
this.colIndex = colIndex;
}
}

View File

@ -1,51 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class GetColumnsResultSet extends TSDBResultSetWrapper {
private String catalog;
private String schemaPattern;
private String tableNamePattern;
private String columnNamePattern;
public GetColumnsResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) {
super.setOriginalResultSet(resultSet);
this.catalog = catalog;
this.schemaPattern = schemaPattern;
this.tableNamePattern = tableNamePattern;
this.columnNamePattern = columnNamePattern;
}
@Override
public String getString(int columnIndex) {
switch (columnIndex) {
case 1:
return catalog;
case 2:
return null;
case 3:
return tableNamePattern;
default:
return null;
}
}
}

View File

@ -1,53 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class GetTablesResultSet extends TSDBResultSetWrapper {
private String catalog;
private String schemaPattern;
private String tableNamePattern;
private String[] types;
public GetTablesResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String[] types) {
super.setOriginalResultSet(resultSet);
this.catalog = catalog;
this.schemaPattern = schemaPattern;
this.tableNamePattern = tableNamePattern;
this.types = types;
}
@Override
public String getString(int columnIndex) throws SQLException {
String ret = null;
switch (columnIndex) {
case 3:
return super.getString(1);
case 4:
return "table";
default:
return null;
}
}
}

View File

@ -19,68 +19,74 @@ import java.util.Map;
public abstract class TSDBConstants {
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final String STATEMENT_CLOSED = "Statement already closed.";
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed.";
public static final long JNI_NULL_POINTER = 0L;
public static Map<Integer, String> DATATYPE_MAP = null;
public static final int JNI_SUCCESS = 0;
public static final int JNI_TDENGINE_ERROR = -1;
public static final int JNI_CONNECTION_NULL = -2;
public static final int JNI_RESULT_SET_NULL = -3;
public static final int JNI_NUM_OF_FIELDS_0 = -4;
public static final int JNI_SQL_NULL = -5;
public static final int JNI_FETCH_END = -6;
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
public static final int TSDB_DATA_TYPE_INT = 4;
public static final int TSDB_DATA_TYPE_BIGINT = 5;
public static final int TSDB_DATA_TYPE_FLOAT = 6;
public static final int TSDB_DATA_TYPE_DOUBLE = 7;
public static final int TSDB_DATA_TYPE_BINARY = 8;
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
public static final int TSDB_DATA_TYPE_NCHAR = 10;
public static String WrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static final long JNI_NULL_POINTER = 0L;
public static String FixErrMsg(int code) {
switch (code) {
case JNI_TDENGINE_ERROR:
return WrapErrMsg("internal error of database!");
case JNI_CONNECTION_NULL:
return WrapErrMsg("invalid tdengine connection!");
case JNI_RESULT_SET_NULL:
return WrapErrMsg("invalid resultset pointer!");
case JNI_NUM_OF_FIELDS_0:
return WrapErrMsg("invalid num of fields!");
case JNI_SQL_NULL:
return WrapErrMsg("can't execute empty sql!");
case JNI_FETCH_END:
return WrapErrMsg("fetch to the end of resultset");
default:
break;
}
return WrapErrMsg("unkown error!");
}
public static final int JNI_SUCCESS = 0;
public static final int JNI_TDENGINE_ERROR = -1;
public static final int JNI_CONNECTION_NULL = -2;
public static final int JNI_RESULT_SET_NULL = -3;
public static final int JNI_NUM_OF_FIELDS_0 = -4;
public static final int JNI_SQL_NULL = -5;
public static final int JNI_FETCH_END = -6;
static {
DATATYPE_MAP = new HashMap<Integer, String>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
DATATYPE_MAP.put(4, "INT");
DATATYPE_MAP.put(5, "BIGINT");
DATATYPE_MAP.put(6, "FLOAT");
DATATYPE_MAP.put(7, "DOUBLE");
DATATYPE_MAP.put(8, "BINARY");
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
public static final int TSDB_DATA_TYPE_INT = 4;
public static final int TSDB_DATA_TYPE_BIGINT = 5;
public static final int TSDB_DATA_TYPE_FLOAT = 6;
public static final int TSDB_DATA_TYPE_DOUBLE = 7;
public static final int TSDB_DATA_TYPE_BINARY = 8;
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
public static final int TSDB_DATA_TYPE_NCHAR = 10;
// nchar field's max length
public static final int maxFieldSize = 16 * 1024;
public static String WrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static String FixErrMsg(int code) {
switch (code) {
case JNI_TDENGINE_ERROR:
return WrapErrMsg("internal error of database!");
case JNI_CONNECTION_NULL:
return WrapErrMsg("invalid tdengine connection!");
case JNI_RESULT_SET_NULL:
return WrapErrMsg("invalid resultset pointer!");
case JNI_NUM_OF_FIELDS_0:
return WrapErrMsg("invalid num of fields!");
case JNI_SQL_NULL:
return WrapErrMsg("can't execute empty sql!");
case JNI_FETCH_END:
return WrapErrMsg("fetch to the end of resultset");
default:
break;
}
return WrapErrMsg("unkown error!");
}
static {
DATATYPE_MAP = new HashMap<>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
DATATYPE_MAP.put(4, "INT");
DATATYPE_MAP.put(5, "BIGINT");
DATATYPE_MAP.put(6, "FLOAT");
DATATYPE_MAP.put(7, "DOUBLE");
DATATYPE_MAP.put(8, "BINARY");
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
}

Some files were not shown because too many files have changed in this diff Show More