Merge branch 'develop' into feature/TD-2581

This commit is contained in:
wpan 2021-07-27 11:32:36 +08:00
commit e9f664f922
311 changed files with 23846 additions and 8642 deletions

3
.gitmodules vendored
View File

@ -13,3 +13,6 @@
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc
[submodule "deps/TSZ"]
path = deps/TSZ
url = https://github.com/taosdata/TSZ.git

View File

@ -83,6 +83,8 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_ARM_32)
@ -91,6 +93,8 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_MIPS_64)
@ -143,6 +147,7 @@ IF (TD_LINUX)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_DARWIN_64)
@ -164,6 +169,7 @@ IF (TD_DARWIN_64)
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_WINDOWS)
@ -194,6 +200,7 @@ IF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/wepoll/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MsvcLibX/include)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_WINDOWS_64)

View File

@ -91,3 +91,12 @@ SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
ENDIF ()
IF (${TSZ_ENABLED} MATCHES "true")
# define add
MESSAGE(STATUS "build with TSZ enabled")
ADD_DEFINITIONS(-DTD_TSZ)
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
ELSE()
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
ENDIF()

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.33-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.1.4.1")
SET(TD_VER_NUMBER "2.1.5.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

5
deps/CMakeLists.txt vendored
View File

@ -15,7 +15,6 @@ ADD_SUBDIRECTORY(cJson)
ADD_SUBDIRECTORY(wepoll)
ADD_SUBDIRECTORY(MsvcLibX)
ADD_SUBDIRECTORY(rmonotonic)
ADD_SUBDIRECTORY(lua)
IF (TD_LINUX AND TD_MQTT)
@ -38,3 +37,7 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
BUILD_COMMAND ${MAKE}
)
ENDIF ()
IF (${TSZ_ENABLED} MATCHES "true")
ADD_SUBDIRECTORY(TSZ)
ENDIF()

1
deps/TSZ vendored Submodule

@ -0,0 +1 @@
Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c

View File

@ -126,7 +126,7 @@ taos> source <filename>;
$ taosdemo
```
该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "t0" 到 "t9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdIdgroupdId 被设置为 1 到 10 location 被设置为 "beijing" 或者 "shanghai"。
该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdIdgroupdId 被设置为 1 到 10 location 被设置为 "beijing" 或者 "shanghai"。
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
@ -156,10 +156,10 @@ taos> select count(*) from test.meters where location="beijing";
taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
```
- 对表 t10 按 10s 进行平均值、最大值和最小值聚合统计:
- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
```mysql
taos> select avg(current), max(voltage), min(phase) from test.t10 interval(10s);
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
```
**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。

View File

@ -715,7 +715,7 @@ Query OK, 1 row(s) in set (0.001091s)
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明BOOL 类型写作 `{true, false}``{0, 1}` 均可,但不能写作 0、1 之外的整数FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明BOOL 类型写作 `{true, false}``{0, 1}` 均可,但不能写作 0、1 之外的整数FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功TIMESTAMP 类型支持非主键的列<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
<!--
<a class="anchor" id="having"></a>
@ -1338,7 +1338,8 @@ SELECT function_list FROM stb_name
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒10a并且支持偏移 offset偏移必须小于间隔也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
* 从 2.1.5.0 版本开始INTERVAL 语句允许的最短时间间隔调整为 1 微秒1u当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒1a
2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
* **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val则自动开启下一个窗口。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:

View File

@ -1132,7 +1132,7 @@ TDengine supports aggregations over data, they are listed below:
```
Function: Return the difference between the max value and the min value of a column in statistics /STable.
Return Data Type: Same as applicable fields.
Return Data Type: Double.
Applicable Fields: All types except binary, nchar, bool.

View File

@ -44,6 +44,7 @@ echo "version=${version}"
#docker manifest rm tdengine/tdengine
#docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then
docker manifest rm tdengine/tdengine:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
@ -51,11 +52,12 @@ if [ "$verType" == "beta" ]; then
docker manifest push tdengine/tdengine-beta:${version}
elif [ "$verType" == "stable" ]; then
docker manifest rm tdengine/tdengine:latest
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:latest
docker manifest push tdengine/tdengine:${version}
docker manifest push tdengine/tdengine:${version}
else
echo "unknow verType, nor stabel or beta"

View File

@ -67,8 +67,10 @@ done
if [ "$verType" == "beta" ]; then
dockername=${cpuType}-${verType}
dirName=${pkgFile%-beta*}
elif [ "$verType" == "stable" ]; then
dockername=${cpuType}
dirName=${pkgFile%-Linux*}
else
echo "unknow verType, nor stabel or beta"
exit 1
@ -84,11 +86,10 @@ comunityArchiveDir=/nas/TDengine/v$version/community # community versionpac
cd ${scriptDir}
cp -f ${comunityArchiveDir}/${pkgFile} .
dirName=${pkgFile%-Linux*}
echo "dirName=${dirName}"
docker build --rm -f "Dockerfile" -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
docker build --rm -f "Dockerfile" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${dockername}:${version}

View File

@ -182,7 +182,13 @@ function install_jemalloc() {
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
${csudo} ldconfig
if [ -d /etc/ld.so.conf.d ]; then
${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi
fi
}

View File

@ -421,7 +421,7 @@ function install_service() {
}
function update_TDengine() {
echo -e "${GREEN}Start to update TDEngine...${NC}"
echo -e "${GREEN}Start to update TDengine...${NC}"
# Stop the service if running
if [ "$osType" != "Darwin" ]; then

View File

@ -59,7 +59,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
# exit 1
# fi
if [ "$verType" == "beta" ]; then
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}

View File

@ -41,10 +41,10 @@ fi
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
#strip ${build_dir}/bin/taosd
#strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \
${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
fi
@ -139,7 +139,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
@ -168,7 +168,7 @@ fi
# exit 1
cd ${release_dir}
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
@ -182,7 +182,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
# exit 1
# fi
if [ "$verType" == "beta" ]; then
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}

View File

@ -41,10 +41,10 @@ fi
if [ "$osType" != "Darwin" ]; then
# if [ "$pagMode" == "lite" ]; then
# strip ${build_dir}/bin/powerd
# strip ${build_dir}/bin/powerd
# strip ${build_dir}/bin/power
# bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
# else
# else
# bin_files="${build_dir}/bin/power ${build_dir}/bin/powerdemo ${script_dir}/remove_client_power.sh ${script_dir}/set_core.sh"
# fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
@ -67,6 +67,39 @@ mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
@ -77,11 +110,11 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
else
else
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
@ -158,15 +191,15 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go
fi
# Copy driver
mkdir -p ${install_dir}/driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
@ -188,11 +221,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
@ -200,7 +233,7 @@ fi
# exit 1
cd ${release_dir}
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
@ -217,8 +250,8 @@ fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stable or beta"
exit 1

View File

@ -215,7 +215,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
# exit 1
# fi
if [ "$verType" == "beta" ]; then
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.1.4.1'
version: '2.1.5.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.4.1
- usr/lib/libtaos.so.2.1.5.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -23,6 +23,8 @@
static SBnThread tsBnThread;
static void *bnThreadFunc(void *arg) {
setThreadName("bnThreadd");
while (1) {
pthread_mutex_lock(&tsBnThread.mutex);
if (tsBnThread.stop) {

View File

@ -11,7 +11,7 @@ IF (TD_LINUX)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt)
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt ${VAR_TSZ})
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
@ -21,7 +21,7 @@ IF (TD_LINUX)
IF (TD_LINUX_64)
TARGET_LINK_LIBRARIES(taos lua)
ENDIF ()
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .so
@ -37,13 +37,13 @@ ELSEIF (TD_DARWIN)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
@ -68,19 +68,19 @@ ELSEIF (TD_WINDOWS)
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
TARGET_LINK_LIBRARIES(taos trpc tutil query)
TARGET_LINK_LIBRARIES(taos trpc tutil query lua)
ELSEIF (TD_DARWIN)
SET(CMAKE_MACOSX_RPATH 1)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)

View File

@ -95,11 +95,23 @@ typedef struct SVgroupTableInfo {
SArray *itemList; // SArray<STableIdInfo>
} SVgroupTableInfo;
typedef struct SBlockKeyTuple {
TSKEY skey;
void* payloadAddr;
} SBlockKeyTuple;
typedef struct SBlockKeyInfo {
int32_t maxBytesAlloc;
SBlockKeyTuple* pKeyTuple;
} SBlockKeyInfo;
int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf);
int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo);
int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo);
void doRetrieveSubqueryData(SSchedMsg *pMsg);
@ -108,6 +120,7 @@ SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint
uint32_t offset);
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
void* tscDestroyUdfArrayList(SArray* pUdfList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
@ -262,7 +275,8 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
int tscGetSTableVgroupInfo(SSqlObj* pSql, SQueryInfo* pQueryInfo);
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
int tscGetTableMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists);
int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists, bool onlyLocal);
int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo);
void tscResetForNextRetrieve(SSqlRes* pRes);
void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo);
@ -309,10 +323,9 @@ bool hasMoreClauseToTry(SSqlObj* pSql);
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
int tscSetMgmtEpSetFromCfg(const char *first, const char *second, SRpcCorEpSet *corEpSet);
int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVgroupNameList, __async_cb_func_t fp);
int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVgroupNameList, SArray* pUdfList, __async_cb_func_t fp, bool metaClone);
int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t length, SArray* pNameArray);

View File

@ -88,13 +88,43 @@ typedef struct SBoundColumn {
int32_t offset; // all column offset value
} SBoundColumn;
typedef struct {
uint16_t schemaColIdx;
uint16_t boundIdx;
uint16_t finalIdx;
} SBoundIdxInfo;
typedef enum _COL_ORDER_STATUS {
ORDER_STATUS_UNKNOWN = 0,
ORDER_STATUS_ORDERED = 1,
ORDER_STATUS_DISORDERED = 2,
} EOrderStatus;
typedef struct SParsedDataColInfo {
int16_t numOfCols;
int16_t numOfBound;
int32_t *boundedColumns;
SBoundColumn *cols;
int16_t numOfCols;
int16_t numOfBound;
int32_t * boundedColumns; // bounded column idx according to schema
SBoundColumn * cols;
SBoundIdxInfo *colIdxInfo;
int8_t orderStatus; // bounded columns:
} SParsedDataColInfo;
#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED)
typedef struct {
SSchema * pSchema;
int16_t sversion;
int32_t flen;
uint16_t nCols;
void * buf;
void * pDataBlock;
SSubmitBlk *pSubmitBlk;
} SMemRowBuilder;
typedef struct {
TDRowLenT allNullLen;
} SMemRowHelper;
typedef struct STableDataBlocks {
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
@ -108,13 +138,15 @@ typedef struct STableDataBlocks {
uint32_t size;
STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache
char *pData;
SParsedDataColInfo boundColumnInfo;
bool cloned;
SParsedDataColInfo boundColumnInfo;
// for parameter ('?') binding
uint32_t numOfAllocedParams;
uint32_t numOfParams;
SParamInfo *params;
uint32_t numOfAllocedParams;
uint32_t numOfParams;
SParamInfo * params;
SMemRowHelper rowHelper;
} STableDataBlocks;
typedef struct {
@ -128,6 +160,7 @@ typedef struct SInsertStatementParam {
SHashObj *pTableBlockHashList; // data block for each table
SArray *pDataBlocks; // SArray<STableDataBlocks*>. Merged submit block for each vgroup
int8_t schemaAttached; // denote if submit block is built with table schema or not
uint8_t payloadType; // EPayloadType. 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert
STagData tagData; // NOTE: pTagData->data is used as a variant length array
int32_t batchSize; // for parameter ('?') binding and batch processing
@ -139,6 +172,14 @@ typedef struct SInsertStatementParam {
char *sql; // current sql statement position
} SInsertStatementParam;
typedef enum {
PAYLOAD_TYPE_KV = 0,
PAYLOAD_TYPE_RAW = 1,
} EPayloadType;
#define IS_RAW_PAYLOAD(t) \
(((int)(t)) == PAYLOAD_TYPE_RAW) // 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert
// TODO extract sql parser supporter
typedef struct {
int command;
@ -146,6 +187,7 @@ typedef struct {
SInsertStatementParam insertParam;
char reserve1[3]; // fix bus error on arm32
int32_t count; // todo remove it
bool subCmd;
char reserve2[3]; // fix bus error on arm32
int16_t numOfCols;
@ -242,6 +284,7 @@ typedef struct SSqlObj {
void * pStream;
void * pSubscription;
char * sqlstr;
void * pBuf; // table meta buffer
char parseRetry;
char retry;
char maxRetry;
@ -369,6 +412,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo);
int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
extern int32_t sentinel;
extern SHashObj *tscVgroupMap;
extern SHashObj *tscTableMetaInfo;
@ -381,10 +425,15 @@ extern int tscRefId;
extern int tscNumOfObj; // number of existed sqlObj in current process.
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
int16_t getNewResColId(SSqlCmd* pCmd);
int32_t schemaIdxCompar(const void *lhs, const void *rhs);
int32_t boundIdxCompar(const void *lhs, const void *rhs);
int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen);
int32_t getExtendedRowSize(STableComInfo *tinfo);
#ifdef __cplusplus
}
#endif

View File

@ -946,3 +946,34 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
return JNI_SUCCESS;
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
jobjectArray lines, jlong conn) {
TAOS *taos = (TAOS *)conn;
if (taos == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
int numLines = (*env)->GetArrayLength(env, lines);
char** c_lines = calloc(numLines, sizeof(char*));
for (int i = 0; i < numLines; ++i) {
jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i));
c_lines[i] = (char*)(*env)->GetStringUTFChars(env, line, 0);
}
int code = taos_insert_lines(taos, c_lines, numLines);
for (int i = 0; i < numLines; ++i) {
jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i));
(*env)->ReleaseStringUTFChars(env, line, c_lines[i]);
}
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, taos, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
return code;
}

View File

@ -7,11 +7,16 @@ taos_connect_auth
taos_close
taos_stmt_init
taos_stmt_prepare
taos_stmt_set_tbname_tags
taos_stmt_set_tbname
taos_stmt_is_insert
taos_stmt_num_params
taos_stmt_bind_param
taos_stmt_add_batch
taos_stmt_execute
taos_stmt_use_result
taos_stmt_close
taos_stmt_errstr
taos_query
taos_fetch_row
taos_result_precision
@ -37,6 +42,4 @@ taos_consume
taos_unsubscribe
taos_open_stream
taos_close_stream
taos_fetch_block
taos_load_table_info

View File

@ -602,6 +602,15 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
continue;
}
aAggs[functionId].mergeFunc(&pCtx[j]);
}
} else {
@ -610,6 +619,15 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
continue;
}
aAggs[functionId].xFinalize(&pCtx[j]);
}
@ -626,7 +644,11 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
}
for(int32_t j = 0; j < numOfExpr; ++j) {
aAggs[pCtx[j].functionId].init(&pCtx[j]);
if (pCtx[j].functionId < 0) {
continue;
}
aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo);
}
for (int32_t j = 0; j < numOfExpr; ++j) {
@ -638,6 +660,15 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
continue;
}
aAggs[functionId].mergeFunc(&pCtx[j]);
}
}
@ -651,6 +682,15 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
continue;
}
aAggs[functionId].mergeFunc(&pCtx[j]);
}
}
@ -871,7 +911,6 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
{
if (pAggInfo->hasDataBlockForNewGroup) {
pAggInfo->binfo.pRes->info.rows = 0;
pAggInfo->hasPrev = false; // now we start from a new group data set.
// not belongs to the same group, return the result of current group;
@ -880,7 +919,13 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
{ // reset output buffer
for(int32_t j = 0; j < pOperator->numOfOutput; ++j) {
aAggs[pAggInfo->binfo.pCtx[j].functionId].init(&pAggInfo->binfo.pCtx[j]);
SQLFunctionCtx* pCtx = &pAggInfo->binfo.pCtx[j];
if (pCtx->functionId < 0) {
clearOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity);
continue;
}
aAggs[pCtx->functionId].init(pCtx, pCtx->resultInfo);
}
}
@ -933,6 +978,14 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pAggInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pAggInfo->binfo.pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
continue;
}
aAggs[functionId].xFinalize(&pAggInfo->binfo.pCtx[j]);
}

View File

@ -38,10 +38,33 @@ enum {
TSDB_USE_CLI_TS = 1,
};
static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
char *str, char **end);
int32_t getExtendedRowSize(STableComInfo *tinfo) {
return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns;
}
int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) {
pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta
if (pHelper->allNullLen == 0) {
for (uint16_t i = 0; i < nCols; ++i) {
uint8_t type = pSSchema[i].type;
int32_t typeLen = TYPE_BYTES[type];
pHelper->allNullLen += typeLen;
if (TSDB_DATA_TYPE_BINARY == type) {
pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
} else if (TSDB_DATA_TYPE_NCHAR == type) {
int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE;
pHelper->allNullLen += len;
}
}
}
return 0;
}
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
errno = 0;
*value = strtold(pToken->z, endPtr);
@ -378,6 +401,342 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId,
uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) {
payloadColSetId(payload, columnId);
payloadColSetType(payload, columnType);
memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen);
return valueLen;
}
static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart,
char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec,
TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen,
TDRowLenT *kvRowColLen) {
int64_t iv;
int32_t ret;
char * endptr = NULL;
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
}
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: { // bool
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
} else {
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
if (strncmp(pToken->z, "true", pToken->n) == 0) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE,
TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE,
TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else {
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10);
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else if (pToken->type == TK_FLOAT) {
double dv = strtod(pToken->z, NULL);
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
} else {
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
}
}
break;
}
case TSDB_DATA_TYPE_TINYINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
} else if (!IS_VALID_TINYINT(iv)) {
return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
}
uint8_t tmpVal = (uint8_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]);
}
break;
case TSDB_DATA_TYPE_UTINYINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
} else if (!IS_VALID_UTINYINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
}
uint8_t tmpVal = (uint8_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]);
}
break;
case TSDB_DATA_TYPE_SMALLINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
} else if (!IS_VALID_SMALLINT(iv)) {
return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
}
int16_t tmpVal = (int16_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]);
}
break;
case TSDB_DATA_TYPE_USMALLINT:
if (isNullStr(pToken)) {
*sizeAppend =
tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
} else if (!IS_VALID_USMALLINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
}
uint16_t tmpVal = (uint16_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]);
}
break;
case TSDB_DATA_TYPE_INT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
} else if (!IS_VALID_INT(iv)) {
return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
}
int32_t tmpVal = (int32_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]);
}
break;
case TSDB_DATA_TYPE_UINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
} else if (!IS_VALID_UINT(iv)) {
return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
}
uint32_t tmpVal = (uint32_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]);
}
break;
case TSDB_DATA_TYPE_BIGINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
} else if (!IS_VALID_BIGINT(iv)) {
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
}
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv,
TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]);
}
break;
case TSDB_DATA_TYPE_UBIGINT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
}
uint64_t tmpVal = (uint64_t)iv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]);
}
break;
case TSDB_DATA_TYPE_FLOAT:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset);
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
isnan(dv)) {
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
float tmpVal = (float)dv;
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
if (isNullStr(pToken)) {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv,
TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]);
}
break;
case TSDB_DATA_TYPE_BINARY:
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost
if (pToken->type == TK_NULL) {
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES);
} else { // too long values will return invalid sql, not be truncated automatically
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
}
// STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n);
memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n);
*dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n);
}
break;
case TSDB_DATA_TYPE_NCHAR:
if (pToken->type == TK_NULL) {
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
} else {
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
int32_t output = 0;
payloadColSetId(payload, pSchema->colId);
payloadColSetType(payload, pSchema->type);
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)),
pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno));
return tscInvalidOperationMsg(msg, buf, pToken->z);
}
varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output);
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output);
*dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t));
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output);
}
break;
case TSDB_DATA_TYPE_TIMESTAMP: {
if (pToken->type == TK_NULL) {
if (primaryKey) {
// When building SKVRow primaryKey, we should not skip even with NULL value.
int64_t tmpVal = 0;
*sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal,
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
} else {
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
getNullValue(TSDB_DATA_TYPE_TIMESTAMP),
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
}
} else {
int64_t tmpVal;
if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
}
*sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId,
pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
}
break;
}
}
return TSDB_CODE_SUCCESS;
}
/*
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
@ -414,24 +773,38 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
return TSDB_CODE_SUCCESS;
}
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len,
char *tmpTokenBuf, SInsertStatementParam* pInsertParam) {
int32_t index = 0;
SStrToken sToken = {0};
char *payload = pDataBlocks->pData + pDataBlocks->size;
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf,
SInsertStatementParam *pInsertParam) {
int32_t index = 0;
SStrToken sToken = {0};
SMemRowHelper *pHelper = &pDataBlocks->rowHelper;
char * payload = pDataBlocks->pData + pDataBlocks->size;
SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo;
SSchema *schema = tscGetTableSchema(pDataBlocks->pTableMeta);
SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta);
TDRowTLenT dataRowLen = pHelper->allNullLen;
TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE;
TDRowTLenT payloadValOffset = 0;
TDRowLenT colValOffset = 0;
ASSERT(dataRowLen > 0);
payloadSetNCols(payload, spd->numOfBound);
payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols
// payloadSetTLen(payload, payloadValOffset);
char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple
char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey
// 1. set the parsed value from sql string
int32_t rowSize = 0;
for (int i = 0; i < spd->numOfBound; ++i) {
// the start position in data block buffer of current value in sql
int32_t colIndex = spd->boundedColumns[i];
char *start = payload + spd->cols[colIndex].offset;
SSchema *pSchema = &schema[colIndex];
rowSize += pSchema->bytes;
char *start = payload + spd->cols[colIndex].offset;
SSchema *pSchema = &schema[colIndex]; // get colId here
index = 0;
sToken = tStrGetToken(*str, &index, true);
@ -453,7 +826,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) ||
(sToken.n == 0) || (type == TK_RP)) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z);
}
@ -467,10 +841,10 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z);
}
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
tmpTokenBuf[j] = sToken.z[k + 1];
cnt++;
j++;
@ -487,42 +861,54 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec);
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns.
TDRowLenT kvRowColLen = 0;
TDRowLenT colValAppended = 0;
if (!IS_DATA_COL_ORDERED(spd->orderStatus)) {
ASSERT(spd->colIdxInfo != NULL);
if(!isPrimaryKey) {
kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN);
} else {
ASSERT(spd->colIdxInfo[i].finalIdx == 0);
}
}
// the primary key locates in 1st column
int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str,
isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended,
&dataRowDeltaColLen, &kvRowColLen);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
}
// 2. set the null value for the columns that do not assign values
if (spd->numOfBound < spd->numOfCols) {
char *ptr = payload;
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(ptr, sizeof(int8_t));
*(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
} else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) {
varDataSetLen(ptr, sizeof(int32_t));
*(uint32_t*) varDataVal(ptr) = TSDB_DATA_NCHAR_NULL;
} else {
setNull(ptr, schema[i].type, schema[i].bytes);
}
if (isPrimaryKey) {
if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) {
tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
payloadColSetOffset(kvPrimaryKeyStart, colValOffset);
} else {
payloadColSetOffset(kvStart, colValOffset);
if (IS_DATA_COL_ORDERED(spd->orderStatus)) {
kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column
}
ptr += schema[i].bytes;
}
rowSize = (int32_t)(ptr - payload);
colValOffset += colValAppended;
kvRowLen += kvRowColLen;
dataRowLen += dataRowDeltaColLen;
}
*len = rowSize;
if (kvRowLen < dataRowLen) {
payloadSetType(payload, SMEM_ROW_KV);
} else {
payloadSetType(payload, SMEM_ROW_DATA);
}
*len = (int32_t)(payloadValOffset + colValOffset);
payloadSetTLen(payload, *len);
return TSDB_CODE_SUCCESS;
}
@ -536,6 +922,27 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
return left > right ? 1 : -1;
}
}
int32_t schemaIdxCompar(const void *lhs, const void *rhs) {
uint16_t left = *(uint16_t *)lhs;
uint16_t right = *(uint16_t *)rhs;
if (left == right) {
return 0;
} else {
return left > right ? 1 : -1;
}
}
int32_t boundIdxCompar(const void *lhs, const void *rhs) {
uint16_t left = *(uint16_t *)POINTER_SHIFT(lhs, sizeof(uint16_t));
uint16_t right = *(uint16_t *)POINTER_SHIFT(rhs, sizeof(uint16_t));
if (left == right) {
return 0;
} else {
return left > right ? 1 : -1;
}
}
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam,
int32_t* numOfRows, char *tmpTokenBuf) {
@ -551,21 +958,26 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
int32_t precision = tinfo.precision;
int32_t extendedRowSize = getExtendedRowSize(&tinfo);
initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta),
tscGetNumOfColumns(pDataBlock->pTableMeta), 0);
while (1) {
index = 0;
sToken = tStrGetToken(*str, &index, false);
if (sToken.n == 0 || sToken.type != TK_LP) break;
*str += index;
if ((*numOfRows) >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) {
int32_t tSize;
code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize);
if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(pInsertParam->msg, "client out of memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
ASSERT(tSize > maxRows);
ASSERT(tSize >= maxRows);
maxRows = tSize;
}
@ -601,9 +1013,10 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) {
pColInfo->numOfCols = numOfCols;
pColInfo->numOfBound = numOfCols;
pColInfo->orderStatus = ORDER_STATUS_ORDERED;
pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t));
pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn));
pColInfo->colIdxInfo = NULL;
for (int32_t i = 0; i < pColInfo->numOfCols; ++i) {
if (i > 0) {
@ -643,7 +1056,7 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
return TSDB_CODE_SUCCESS;
}
static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
int32_t FORCE_INLINE tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
pBlocks->tid = pTableMeta->id.tid;
pBlocks->uid = pTableMeta->id.uid;
pBlocks->sversion = pTableMeta->sversion;
@ -657,7 +1070,7 @@ static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta,
}
// data block is disordered, sort it in ascending order
void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData;
// size is less than the total size, since duplicated rows may be removed yet.
@ -701,11 +1114,89 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
dataBuf->prevTS = INT64_MIN;
}
// data block is disordered, sort it in ascending order
int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlkKeyInfo) {
SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData;
int16_t nRows = pBlocks->numOfRows;
// size is less than the total size, since duplicated rows may be removed yet.
// if use server time, this block must be ordered
if (dataBuf->tsSource == TSDB_USE_SERVER_TS) {
assert(dataBuf->ordered);
}
// allocate memory
size_t nAlloc = nRows * sizeof(SBlockKeyTuple);
if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) {
size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple);
char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp;
pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc;
}
memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc);
SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
char * pBlockData = pBlocks->data;
TDRowTLenT totolPayloadTLen = 0;
TDRowTLenT payloadTLen = 0;
int n = 0;
while (n < nRows) {
pBlkKeyTuple->skey = payloadTSKey(pBlockData);
pBlkKeyTuple->payloadAddr = pBlockData;
payloadTLen = payloadTLen(pBlockData);
#if 0
ASSERT(payloadNCols(pBlockData) <= 4096);
ASSERT(payloadTLen(pBlockData) < 65536);
#endif
totolPayloadTLen += payloadTLen;
// next loop
pBlockData += payloadTLen;
++pBlkKeyTuple;
++n;
}
if (!dataBuf->ordered) {
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar);
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
int32_t i = 0;
int32_t j = 1;
while (j < nRows) {
TSKEY ti = (pBlkKeyTuple + i)->skey;
TSKEY tj = (pBlkKeyTuple + j)->skey;
if (ti == tj) {
totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j);
++j;
continue;
}
int32_t nextPos = (++i);
if (nextPos != j) {
memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
}
++j;
}
dataBuf->ordered = true;
pBlocks->numOfRows = i + 1;
}
dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen;
dataBuf->prevTS = INT64_MIN;
return 0;
}
static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
int32_t maxNumOfRows;
int32_t code = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows);
int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows);
if (TSDB_CODE_SUCCESS != code) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -991,7 +1482,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true, false);
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
return code;
}
@ -1002,7 +1493,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
sql = sToken.z;
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, false);
if (pInsertParam->sql == NULL) {
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
}
@ -1038,10 +1529,11 @@ static int32_t validateDataSource(SInsertStatementParam *pInsertParam, int32_t t
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo* pColInfo, SSchema* pSchema,
char* str, char **end) {
pColInfo->numOfBound = 0;
int32_t nCols = pColInfo->numOfCols;
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * pColInfo->numOfCols);
for(int32_t i = 0; i < pColInfo->numOfCols; ++i) {
pColInfo->numOfBound = 0;
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols);
for (int32_t i = 0; i < nCols; ++i) {
pColInfo->cols[i].hasVal = false;
}
@ -1056,6 +1548,8 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
goto _clean;
}
bool isOrdered = true;
int32_t lastColIdx = -1; // last column found
while (1) {
index = 0;
sToken = tStrGetToken(str, &index, false);
@ -1076,7 +1570,8 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
bool findColumnIndex = false;
// todo speedup by using hash list
for (int32_t t = 0; t < pColInfo->numOfCols; ++t) {
int32_t nScanned = 0, t = lastColIdx + 1;
while (t < nCols) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
if (pColInfo->cols[t].hasVal == true) {
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
@ -1085,10 +1580,39 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
pColInfo->cols[t].hasVal = true;
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
pColInfo->numOfBound += 1;
++pColInfo->numOfBound;
findColumnIndex = true;
if (isOrdered && (lastColIdx > t)) {
isOrdered = false;
}
lastColIdx = t;
break;
}
++t;
++nScanned;
}
if (!findColumnIndex) {
t = 0;
int32_t nRemain = nCols - nScanned;
while (t < nRemain) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
if (pColInfo->cols[t].hasVal == true) {
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
goto _clean;
}
pColInfo->cols[t].hasVal = true;
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
++pColInfo->numOfBound;
findColumnIndex = true;
if (isOrdered && (lastColIdx > t)) {
isOrdered = false;
}
lastColIdx = t;
break;
}
++t;
}
}
if (!findColumnIndex) {
@ -1097,10 +1621,32 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
}
}
memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0 , sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound));
pColInfo->orderStatus = isOrdered ? ORDER_STATUS_ORDERED : ORDER_STATUS_DISORDERED;
if (!isOrdered) {
pColInfo->colIdxInfo = tcalloc(pColInfo->numOfBound, sizeof(SBoundIdxInfo));
if (pColInfo->colIdxInfo == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _clean;
}
SBoundIdxInfo *pColIdx = pColInfo->colIdxInfo;
for (uint16_t i = 0; i < pColInfo->numOfBound; ++i) {
pColIdx[i].schemaColIdx = (uint16_t)pColInfo->boundedColumns[i];
pColIdx[i].boundIdx = i;
}
qsort(pColIdx, pColInfo->numOfBound, sizeof(SBoundIdxInfo), schemaIdxCompar);
for (uint16_t i = 0; i < pColInfo->numOfBound; ++i) {
pColIdx[i].finalIdx = i;
}
qsort(pColIdx, pColInfo->numOfBound, sizeof(SBoundIdxInfo), boundIdxCompar);
}
memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0,
sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound));
return TSDB_CODE_SUCCESS;
_clean:
_clean:
pInsertParam->sql = NULL;
return code;
}
@ -1358,7 +1904,6 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
int tsParseSql(SSqlObj *pSql, bool initial) {
int32_t ret = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
if (!initial) {
tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->insertParam.sql);
}
@ -1494,21 +2039,24 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
}
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret =
tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) {
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows);
tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta),
tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0);
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0;

File diff suppressed because it is too large Load Diff

View File

@ -47,6 +47,7 @@ typedef struct SNormalStmt {
typedef struct SMultiTbStmt {
bool nameSet;
bool tagSet;
bool subSet;
uint64_t currentUid;
char *sqlstr;
uint32_t tbNum;
@ -54,6 +55,7 @@ typedef struct SMultiTbStmt {
SStrToken stbname;
SStrToken values;
SArray *tags;
STableDataBlocks *lastBlock;
SHashObj *pTableHash;
SHashObj *pTableBlockHashList; // data block for each table
} SMultiTbStmt;
@ -347,11 +349,11 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) {
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
#if 0
if (0) {
@ -746,25 +748,25 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
size = 1;
*(uint8_t *)(data + param->offset) = *(uint8_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
size = 2;
*(uint16_t *)(data + param->offset) = *(uint16_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_FLOAT:
size = 4;
*(uint32_t *)(data + param->offset) = *(uint32_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
size = 8;
*(uint64_t *)(data + param->offset) = *(uint64_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_BINARY:
@ -790,7 +792,6 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
return TSDB_CODE_TSC_INVALID_VALUE;
}
memcpy(data + param->offset, bind->buffer, size);
if (param->offset == 0) {
if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
tscError("invalid timestamp");
@ -801,6 +802,58 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
return TSDB_CODE_SUCCESS;
}
static int32_t insertStmtGenLastBlock(STableDataBlocks** lastBlock, STableDataBlocks* pBlock) {
*lastBlock = (STableDataBlocks*)malloc(sizeof(STableDataBlocks));
memcpy(*lastBlock, pBlock, sizeof(STableDataBlocks));
(*lastBlock)->cloned = true;
(*lastBlock)->pData = NULL;
(*lastBlock)->ordered = true;
(*lastBlock)->prevTS = INT64_MIN;
(*lastBlock)->size = sizeof(SSubmitBlk);
(*lastBlock)->tsSource = -1;
return TSDB_CODE_SUCCESS;
}
static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, STableMeta* pTableMeta, SName* name) {
int32_t code = 0;
if (pStmt->mtb.lastBlock == NULL) {
tscError("no previous data block");
return TSDB_CODE_TSC_APP_ERROR;
}
int32_t msize = tscGetTableMetaSize(pTableMeta);
int32_t tsize = sizeof(STableDataBlocks) + msize;
void *t = malloc(tsize);
*pBlock = t;
memcpy(*pBlock, pStmt->mtb.lastBlock, sizeof(STableDataBlocks));
t = (char *)t + sizeof(STableDataBlocks);
(*pBlock)->pTableMeta = t;
memcpy((*pBlock)->pTableMeta, pTableMeta, msize);
(*pBlock)->pData = malloc((*pBlock)->nAllocSize);
(*pBlock)->vgId = (*pBlock)->pTableMeta->vgId;
tNameAssign(&(*pBlock)->tableName, name);
SSubmitBlk* blk = (SSubmitBlk*)(*pBlock)->pData;
memset(blk, 0, sizeof(*blk));
code = tsSetBlockInfo(blk, pTableMeta, 0);
if (code != TSDB_CODE_SUCCESS) {
STMT_RET(code);
}
return TSDB_CODE_SUCCESS;
}
static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
@ -1172,7 +1225,7 @@ static void insertBatchClean(STscStmt* pStmt) {
static int insertBatchStmtExecute(STscStmt* pStmt) {
int32_t code = 0;
if(pStmt->mtb.nameSet == false) {
tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
return invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no table name set");
@ -1227,11 +1280,11 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt->mtb.tbname = sToken;
pStmt->mtb.nameSet = false;
if (pStmt->mtb.pTableHash == NULL) {
pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
pStmt->mtb.pTableHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
}
if (pStmt->mtb.pTableBlockHashList == NULL) {
pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
pStmt->mtb.pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
}
pStmt->mtb.tagSet = true;
@ -1522,6 +1575,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags) {
STscStmt* pStmt = (STscStmt*)stmt;
int32_t code = 0;
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
@ -1559,6 +1613,11 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
pCmd->batchSize = pBlk->numOfRows;
if (pBlk->numOfRows == 0) {
(*t1)->prevTS = INT64_MIN;
}
tsSetBlockInfo(pBlk, (*t1)->pTableMeta, pBlk->numOfRows);
taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
@ -1566,6 +1625,51 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET(TSDB_CODE_SUCCESS);
}
if (pStmt->mtb.subSet && taosHashGetSize(pStmt->mtb.pTableHash) > 0) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
char sTableName[TSDB_TABLE_FNAME_LEN];
strncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName));
SStrToken tname = {0};
tname.type = TK_STRING;
tname.z = (char *)name;
tname.n = (uint32_t)strlen(name);
SName fullname = {0};
tscSetTableFullName(&fullname, &tname, pSql);
memcpy(&pTableMetaInfo->name, &fullname, sizeof(fullname));
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, true);
if (code != TSDB_CODE_SUCCESS) {
STMT_RET(code);
}
pTableMeta = pTableMetaInfo->pTableMeta;
if (strcmp(sTableName, pTableMeta->sTableName)) {
tscError("0x%"PRIx64" only tables belongs to one stable is allowed", pSql->self);
STMT_RET(TSDB_CODE_TSC_APP_ERROR);
}
STableDataBlocks* pBlock = NULL;
insertStmtGenBlock(pStmt, &pBlock, pTableMeta, &pTableMetaInfo->name);
pCmd->batchSize = 0;
pStmt->mtb.currentUid = pTableMeta->id.uid;
pStmt->mtb.tbNum++;
taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
STMT_RET(TSDB_CODE_SUCCESS);
}
if (pStmt->mtb.tagSet) {
pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
} else {
@ -1594,7 +1698,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
pCmd->insertParam.pTableBlockHashList = hashList;
}
int32_t code = tsParseSql(pStmt->pSql, true);
code = tsParseSql(pStmt->pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
tsem_wait(&pStmt->pSql->rspSem);
@ -1622,6 +1726,10 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
if (pStmt->mtb.lastBlock == NULL) {
insertStmtGenLastBlock(&pStmt->mtb.lastBlock, pBlock);
}
tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
}
@ -1629,7 +1737,17 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
}
int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
STscStmt* pStmt = (STscStmt*)stmt;
pStmt->mtb.subSet = true;
return taos_stmt_set_tbname_tags(stmt, name, NULL);
}
int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
STscStmt* pStmt = (STscStmt*)stmt;
pStmt->mtb.subSet = false;
return taos_stmt_set_tbname_tags(stmt, name, NULL);
}
@ -1653,6 +1771,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
if (pStmt->pSql && pStmt->pSql->res.code != 0) {
rmMeta = true;
}
tscDestroyDataBlock(pStmt->mtb.lastBlock, rmMeta);
pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, rmMeta);
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
@ -1687,6 +1806,8 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
pStmt->last = STMT_BIND;
tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid);
STMT_RET(insertStmtBindParam(pStmt, bind));
} else {
STMT_RET(normalStmtBindParam(pStmt, bind));
@ -1806,6 +1927,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
pStmt->last = STMT_EXECUTE;
pStmt->pSql->cmd.insertParam.payloadType = PAYLOAD_TYPE_RAW;
if (pStmt->multiTbInsert) {
ret = insertBatchStmtExecute(pStmt);
} else {

View File

@ -16,6 +16,7 @@
#include "os.h"
#include "tscLog.h"
#include "tsclient.h"
#include "tsocket.h"
#include "ttimer.h"
#include "tutil.h"
#include "taosmsg.h"
@ -228,7 +229,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
pHeartbeat->numOfQueries = 0;
SQueryDesc *pQdesc = (SQueryDesc *)pHeartbeat->pData;
@ -252,6 +253,16 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
//pQdesc->useconds = htobe64(pSql->res.useconds);
pQdesc->useconds = htobe64(now - pSql->stime);
pQdesc->qId = htobe64(pSql->res.qId);
pQdesc->sqlObjId = htobe64(pSql->self);
pQdesc->pid = pHeartbeat->pid;
if (pSql->cmd.pQueryInfo->stableQuery == true) {
pQdesc->numOfSub = pSql->subState.numOfSub;
} else {
pQdesc->numOfSub = 1;
}
pQdesc->numOfSub = htonl(pQdesc->numOfSub);
taosGetFqdn(pQdesc->fqdn);
pHeartbeat->numOfQueries++;
pQdesc++;

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <tscompression.h>
#include "os.h"
#include "qPlan.h"
#include "qTableMeta.h"
#include "tcmdtype.h"
#include "tlockfree.h"
#include "trpc.h"
@ -21,10 +24,8 @@
#include "tscLog.h"
#include "tscProfile.h"
#include "tscUtil.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "ttimer.h"
#include "qPlan.h"
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
@ -502,6 +503,7 @@ int doBuildAndSendMsg(SSqlObj *pSql) {
pCmd->command == TSDB_SQL_INSERT ||
pCmd->command == TSDB_SQL_CONNECT ||
pCmd->command == TSDB_SQL_HB ||
pCmd->command == TSDB_SQL_RETRIEVE_FUNC ||
pCmd->command == TSDB_SQL_STABLEVGROUP) {
pRes->code = tscBuildMsg[pCmd->command](pSql, NULL);
}
@ -544,7 +546,7 @@ int tscBuildAndSendRequest(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
type = pQueryInfo->type;
// while numOfTables equals to 0, it must be Heartbeat
assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0);
assert((pQueryInfo->numOfTables == 0 && (pQueryInfo->command == TSDB_SQL_HB || pSql->cmd.command == TSDB_SQL_RETRIEVE_FUNC)) || pQueryInfo->numOfTables > 0);
}
tscDebug("0x%"PRIx64" SQL cmd:%s will be processed, name:%s, type:%d", pSql->self, sqlCmd[pCmd->command], name, type);
@ -1042,6 +1044,34 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(int32_t);
}
// support only one udf
if (pQueryInfo->pUdfInfo != NULL && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) {
pQueryMsg->udfContentOffset = htonl((int32_t) (pMsg - pCmd->payload));
for(int32_t i = 0; i < taosArrayGetSize(pQueryInfo->pUdfInfo); ++i) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, i);
*(int8_t*) pMsg = pUdfInfo->resType;
pMsg += sizeof(pUdfInfo->resType);
*(int16_t*) pMsg = htons(pUdfInfo->resBytes);
pMsg += sizeof(pUdfInfo->resBytes);
STR_TO_VARSTR(pMsg, pUdfInfo->name);
pMsg += varDataTLen(pMsg);
*(int32_t*) pMsg = htonl(pUdfInfo->funcType);
pMsg += sizeof(pUdfInfo->funcType);
*(int32_t*) pMsg = htonl(pUdfInfo->bufSize);
pMsg += sizeof(pUdfInfo->bufSize);
pQueryMsg->udfContentLen = htonl(pUdfInfo->contLen);
memcpy(pMsg, pUdfInfo->content, pUdfInfo->contLen);
pMsg += pUdfInfo->contLen;
}
}
memcpy(pMsg, pSql->sqlstr, sqlLen);
pMsg += sqlLen;
@ -1077,6 +1107,18 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildCreateFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
SCreateFuncMsg *pCreateFuncMsg = (SCreateFuncMsg *)pCmd->payload;
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_FUNCTION;
pCmd->payloadLen = sizeof(SCreateFuncMsg) + htonl(pCreateFuncMsg->codeLen);
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateDnodeMsg);
@ -1201,6 +1243,17 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_FUNCTION;
pCmd->payloadLen = sizeof(SDropFuncMsg);
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMDropTableMsg);
@ -1304,11 +1357,19 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
if (pShowInfo->showType == TSDB_MGMT_TABLE_FUNCTION) {
pShowMsg->type = pShowInfo->showType;
pShowMsg->payloadLen = 0;
pCmd->payloadLen = sizeof(SShowMsg);
if (tNameIsEmpty(&pTableMetaInfo->name)) {
return TSDB_CODE_SUCCESS;
}
if (tNameIsEmpty(&pTableMetaInfo->name)) {
pthread_mutex_lock(&pObj->mutex);
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
pthread_mutex_unlock(&pObj->mutex);
@ -1316,7 +1377,6 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
@ -1829,6 +1889,29 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int tscBuildRetrieveFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char *pMsg = pCmd->payload;
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
int32_t numOfFuncs = (int32_t)taosArrayGetSize(pQueryInfo->pUdfInfo);
SRetrieveFuncMsg *pRetrieveFuncMsg = (SRetrieveFuncMsg *)pMsg;
pRetrieveFuncMsg->num = htonl(numOfFuncs);
pMsg += sizeof(SRetrieveFuncMsg);
for(int32_t i = 0; i < numOfFuncs; ++i) {
SUdfInfo* pUdf = taosArrayGet(pQueryInfo->pUdfInfo, i);
STR_TO_NET_VARSTR(pMsg, pUdf->name);
pMsg += varDataNetTLen(pMsg);
}
pCmd->msgType = TSDB_MSG_TYPE_CM_RETRIEVE_FUNC;
pCmd->payloadLen = (int32_t)(pMsg - pCmd->payload);
return TSDB_CODE_SUCCESS;
}
int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
STscObj *pObj = pSql->pTscObj;
@ -1883,7 +1966,6 @@ static int32_t tableMetaMsgConvert(STableMetaMsg* pMetaMsg) {
pMetaMsg->vgroup.vgId = htonl(pMetaMsg->vgroup.vgId);
pMetaMsg->uid = htobe64(pMetaMsg->uid);
// pMetaMsg->contLen = htonl(pMetaMsg->contLen);
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
@ -1948,13 +2030,14 @@ static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMet
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
// The super tableMeta already exists, create it according to tableMeta and add it to hash map
STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
if (updateSTable) {
STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
uint32_t size = tscGetTableMetaSize(pSupTableMeta);
int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size);
assert(code == TSDB_CODE_SUCCESS);
uint32_t size = tscGetTableMetaSize(pSupTableMeta);
int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size);
assert(code == TSDB_CODE_SUCCESS);
tfree(pSupTableMeta);
tfree(pSupTableMeta);
}
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta));
@ -2044,12 +2127,64 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
return pVgroupInfo;
}
int tscProcessRetrieveFuncRsp(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SUdfFuncMsg* pFuncMsg = (SUdfFuncMsg *)pSql->res.pRsp;
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
pFuncMsg->num = htonl(pFuncMsg->num);
assert(pFuncMsg->num == taosArrayGetSize(pQueryInfo->pUdfInfo));
char* pMsg = pFuncMsg->content;
for(int32_t i = 0; i < pFuncMsg->num; ++i) {
SFunctionInfoMsg* pFunc = (SFunctionInfoMsg*) pMsg;
for(int32_t j = 0; j < pFuncMsg->num; ++j) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, j);
if (strcmp(pUdfInfo->name, pFunc->name) != 0) {
continue;
}
if (pUdfInfo->content) {
continue;
}
pUdfInfo->resBytes = htons(pFunc->resBytes);
pUdfInfo->resType = pFunc->resType;
pUdfInfo->funcType = htonl(pFunc->funcType);
pUdfInfo->contLen = htonl(pFunc->len);
pUdfInfo->bufSize = htonl(pFunc->bufSize);
pUdfInfo->content = malloc(pUdfInfo->contLen);
memcpy(pUdfInfo->content, pFunc->content, pUdfInfo->contLen);
pMsg += sizeof(SFunctionInfoMsg) + pUdfInfo->contLen;
}
}
// master sqlObj locates in param
SSqlObj* parent = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pSql->param);
if(parent == NULL) {
return pSql->res.code;
}
SQueryInfo* parQueryInfo = tscGetQueryInfo(&parent->cmd);
assert(parent->signature == parent && (int64_t)pSql->param == parent->self);
taosArrayDestroy(parQueryInfo->pUdfInfo);
parQueryInfo->pUdfInfo = pQueryInfo->pUdfInfo; // assigned to parent sql obj.
pQueryInfo->pUdfInfo = NULL;
return TSDB_CODE_SUCCESS;
}
int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
char *rsp = pSql->res.pRsp;
SMultiTableMeta *pMultiMeta = (SMultiTableMeta *)rsp;
pMultiMeta->numOfTables = htonl(pMultiMeta->numOfTables);
pMultiMeta->numOfVgroup = htonl(pMultiMeta->numOfVgroup);
pMultiMeta->numOfUdf = htonl(pMultiMeta->numOfUdf);
rsp += sizeof(SMultiTableMeta);
@ -2059,46 +2194,66 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
}
SSqlCmd *pParentCmd = &pParentSql->cmd;
SHashObj *pSet = taosHashInit(pMultiMeta->numOfVgroup, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
SHashObj *pSet = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
char* buf = NULL;
char* pMsg = pMultiMeta->meta;
if (pMultiMeta->compressed) {
buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta));
int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1,
buf, pMultiMeta->rawLen - sizeof(SMultiTableMeta), ONE_STAGE_COMP, NULL, 0);
assert(len == pMultiMeta->rawLen - sizeof(SMultiTableMeta));
pMsg = buf;
}
for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) {
STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg;
int32_t code = tableMetaMsgConvert(pMetaMsg);
if (code != TSDB_CODE_SUCCESS) {
taosHashCleanup(pSet);
taosReleaseRef(tscObjRef, pParentSql->self);
tfree(buf);
return code;
}
bool freeMeta = false;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, pMetaMsg->tableFname);
tfree(pTableMeta);
taosHashCleanup(pSet);
taosReleaseRef(tscObjRef, pParentSql->self);
tfree(buf);
return TSDB_CODE_TSC_INVALID_VALUE;
}
SName sn = {0};
tNameFromString(&sn, pMetaMsg->tableFname, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
const char* tableName = tNameGetTableName(&sn);
size_t keyLen = strlen(tableName);
if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) {
STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,};
STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,};
taosHashPut(pParentCmd->pTableMetaMap, tableName, keyLen, &p, sizeof(STableMetaVgroupInfo));
const char* tableName = tNameGetTableName(&sn);
size_t keyLen = strlen(tableName);
taosHashPut(pParentCmd->pTableMetaMap, tableName, keyLen, &p, sizeof(STableMetaVgroupInfo));
} else {
freeMeta = true;
}
bool addToBuf = false;
if (taosHashGet(pSet, &pMetaMsg->uid, sizeof(pMetaMsg->uid)) == NULL) {
addToBuf = true;
taosHashPut(pSet, &pMetaMsg->uid, sizeof(pMetaMsg->uid), "", 0);
// for each super table, only update meta information once
bool updateStableMeta = false;
if (pTableMeta->tableType == TSDB_CHILD_TABLE && taosHashGet(pSet, &pMetaMsg->suid, sizeof(pMetaMsg->suid)) == NULL) {
updateStableMeta = true;
taosHashPut(pSet, &pTableMeta->suid, sizeof(pMetaMsg->suid), "", 0);
}
// create the tableMeta and add it into the TableMeta map
doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, addToBuf);
doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, updateStableMeta);
// if the vgroup is not updated in current process, update it.
// for each vgroup, only update the information once.
int64_t vgId = pMetaMsg->vgroup.vgId;
if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) {
doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup);
@ -2106,6 +2261,9 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
}
pMsg += pMetaMsg->contLen;
if (freeMeta) {
tfree(pTableMeta);
}
}
for(int32_t i = 0; i < pMultiMeta->numOfVgroup; ++i) {
@ -2116,16 +2274,53 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
assert(p != NULL);
int32_t size = 0;
if (p->pVgroupInfo!= NULL) {
tscVgroupInfoClear(p->pVgroupInfo);
//tfree(p->pTableMeta);
}
p->pVgroupInfo = createVgroupInfoFromMsg(pMsg, &size, pSql->self);
pMsg += size;
}
SQueryInfo* pQueryInfo = tscGetQueryInfo(pParentCmd);
if (pMultiMeta->numOfUdf > 0) {
assert(pQueryInfo->pUdfInfo != NULL);
}
for(int32_t i = 0; i < pMultiMeta->numOfUdf; ++i) {
SFunctionInfoMsg* pFunc = (SFunctionInfoMsg*) pMsg;
for(int32_t j = 0; j < pMultiMeta->numOfUdf; ++j) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, j);
if (strcmp(pUdfInfo->name, pFunc->name) != 0) {
continue;
}
if (pUdfInfo->content) {
continue;
}
pUdfInfo->resBytes = htons(pFunc->resBytes);
pUdfInfo->resType = pFunc->resType;
pUdfInfo->funcType = htonl(pFunc->funcType);
pUdfInfo->contLen = htonl(pFunc->len);
pUdfInfo->bufSize = htonl(pFunc->bufSize);
pUdfInfo->content = malloc(pUdfInfo->contLen);
memcpy(pUdfInfo->content, pFunc->content, pUdfInfo->contLen);
pMsg += sizeof(SFunctionInfoMsg) + pUdfInfo->contLen;
}
}
pSql->res.code = TSDB_CODE_SUCCESS;
pSql->res.numOfTotal = pMultiMeta->numOfTables;
tscDebug("0x%"PRIx64" load multi-tableMeta from mnode, numOfTables:%d", pSql->self, pMultiMeta->numOfTables);
taosHashCleanup(pSet);
taosReleaseRef(tscObjRef, pParentSql->self);
tfree(buf);
return TSDB_CODE_SUCCESS;
}
@ -2508,7 +2703,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
return code;
}
int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVgroupNameList, __async_cb_func_t fp) {
int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVgroupNameList, SArray* pUdfList, __async_cb_func_t fp, bool metaClone) {
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
if (NULL == pNew) {
tscError("0x%"PRIx64" failed to allocate sqlobj to get multiple table meta", pSql->self);
@ -2521,8 +2716,9 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
int32_t numOfTable = (int32_t) taosArrayGetSize(pNameList);
int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pVgroupNameList);
int32_t numOfUdf = pUdfList ? (int32_t)taosArrayGetSize(pUdfList) : 0;
int32_t size = (numOfTable + numOfVgroupList) * TSDB_TABLE_FNAME_LEN + sizeof(SMultiTableInfoMsg);
int32_t size = (numOfTable + numOfVgroupList) * TSDB_TABLE_FNAME_LEN + TSDB_FUNC_NAME_LEN * numOfUdf + sizeof(SMultiTableInfoMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, size)) {
tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
tscFreeSqlObj(pNew);
@ -2530,14 +2726,16 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
}
SMultiTableInfoMsg* pInfo = (SMultiTableInfoMsg*) pNew->cmd.payload;
pInfo->metaClone = metaClone? 1:0;
pInfo->numOfTables = htonl((uint32_t) taosArrayGetSize(pNameList));
pInfo->numOfVgroups = htonl((uint32_t) taosArrayGetSize(pVgroupNameList));
pInfo->numOfUdfs = htonl(numOfUdf);
char* start = pInfo->tableNames;
int32_t len = 0;
for(int32_t i = 0; i < numOfTable; ++i) {
char* name = taosArrayGetP(pNameList, i);
if (i < numOfTable - 1 || numOfVgroupList > 0) {
if (i < numOfTable - 1 || numOfVgroupList > 0 || numOfUdf > 0) {
len = sprintf(start, "%s,", name);
} else {
len = sprintf(start, "%s", name);
@ -2548,7 +2746,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
for(int32_t i = 0; i < numOfVgroupList; ++i) {
char* name = taosArrayGetP(pVgroupNameList, i);
if (i < numOfVgroupList - 1) {
if (i < numOfVgroupList - 1 || numOfUdf > 0) {
len = sprintf(start, "%s,", name);
} else {
len = sprintf(start, "%s", name);
@ -2557,12 +2755,23 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
start += len;
}
for(int32_t i = 0; i < numOfUdf; ++i) {
SUdfInfo * u = taosArrayGet(pUdfList, i);
if (i < numOfUdf - 1) {
len = sprintf(start, "%s,", u->name);
} else {
len = sprintf(start, "%s", u->name);
}
start += len;
}
pNew->cmd.payloadLen = (int32_t) ((start - pInfo->tableNames) + sizeof(SMultiTableInfoMsg));
pNew->cmd.msgType = TSDB_MSG_TYPE_CM_TABLES_META;
registerSqlObj(pNew);
tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get %d tableMeta, vgroupInfo:%d, msg size:%d", pSql->self,
pNew->self, numOfTable, numOfVgroupList, pNew->cmd.payloadLen);
tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get %d tableMeta, vgroupInfo:%d, udf:%d, msg size:%d", pSql->self,
pNew->self, numOfTable, numOfVgroupList, numOfUdf, pNew->cmd.payloadLen);
pNew->fp = fp;
pNew->param = (void *)pSql->self;
@ -2578,7 +2787,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
return code;
}
int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate) {
int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate, bool onlyLocal) {
assert(tIsValidName(&pTableMetaInfo->name));
uint32_t size = tscGetTableMetaMaxSize();
@ -2606,31 +2815,92 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
char buf[80*1024] = {0};
assert(size < 80*1024);
assert(size < 80 * TSDB_MAX_COLUMNS);
if (!pSql->pBuf) {
if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, buf);
int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, pSql->pBuf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}
}
return TSDB_CODE_SUCCESS;
}
if (onlyLocal) {
return TSDB_CODE_TSC_NO_META_CACHED;
}
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
return tscGetTableMetaImpl(pSql, pTableMetaInfo, false);
return tscGetTableMetaImpl(pSql, pTableMetaInfo, false, false);
}
int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists) {
return tscGetTableMetaImpl(pSql, pTableMetaInfo, createIfNotExists);
int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists, bool onlyLocal) {
return tscGetTableMetaImpl(pSql, pTableMetaInfo, createIfNotExists, onlyLocal);
}
int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
if (NULL == pNew) {
tscError("%p malloc failed for new sqlobj to get user-defined functions", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
pNew->cmd.command = TSDB_SQL_RETRIEVE_FUNC;
if (tscAddQueryInfo(&pNew->cmd) != TSDB_CODE_SUCCESS) {
tscError("%p malloc failed for new queryinfo", pSql);
tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd);
pNewQueryInfo->pUdfInfo = taosArrayInit(4, sizeof(SUdfInfo));
for(int32_t i = 0; i < taosArrayGetSize(pQueryInfo->pUdfInfo); ++i) {
SUdfInfo info = {0};
SUdfInfo* p1 = taosArrayGet(pQueryInfo->pUdfInfo, i);
info = *p1;
info.name = strdup(p1->name);
taosArrayPush(pNewQueryInfo->pUdfInfo, &info);
}
pNew->cmd.active = pNewQueryInfo;
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
tscError("%p malloc failed for payload to get table meta", pSql);
tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
tscDebug("%p new pSqlObj:%p to retrieve udf", pSql, pNew);
registerSqlObj(pNew);
pNew->fp = tscTableMetaCallBack;
pNew->param = (void *)pSql->self;
tscDebug("%p metaRid from %" PRId64 " to %" PRId64 , pSql, pSql->metaRid, pNew->self);
pSql->metaRid = pNew->self;
int32_t code = tscBuildAndSendRequest(pNew, NULL);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated
}
return code;
}
/**
@ -2682,7 +2952,6 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
if (allVgroupInfoRetrieved(pQueryInfo)) {
return TSDB_CODE_SUCCESS;
}
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
@ -2734,6 +3003,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_CREATE_DB] = tscBuildCreateDbMsg;
tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildUserMsg;
tscBuildMsg[TSDB_SQL_CREATE_FUNCTION] = tscBuildCreateFuncMsg;
tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildAcctMsg;
tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg;
@ -2742,6 +3012,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_DROP_FUNCTION] = tscBuildDropFuncMsg;
tscBuildMsg[TSDB_SQL_SYNC_DB_REPLICA] = tscBuildSyncDbReplicaMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
@ -2757,6 +3028,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg;
// tscBuildMsg[TSDB_SQL_META] = tscBuildTableMetaMsg;
tscBuildMsg[TSDB_SQL_STABLEVGROUP] = tscBuildSTableVgroupMsg;
tscBuildMsg[TSDB_SQL_RETRIEVE_FUNC] = tscBuildRetrieveFuncMsg;
tscBuildMsg[TSDB_SQL_HB] = tscBuildHeartBeatMsg;
tscBuildMsg[TSDB_SQL_SHOW] = tscBuildShowMsg;
@ -2775,6 +3047,7 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_META] = tscProcessTableMetaRsp;
tscProcessMsgRsp[TSDB_SQL_STABLEVGROUP] = tscProcessSTableVgroupRsp;
tscProcessMsgRsp[TSDB_SQL_MULTI_META] = tscProcessMultiTableMetaRsp;
tscProcessMsgRsp[TSDB_SQL_RETRIEVE_FUNC] = tscProcessRetrieveFuncRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW] = tscProcessShowRsp;
tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromNode; // rsp handled by same function.
@ -2804,3 +3077,4 @@ void tscInitMsgsFp() {
tscKeepConn[TSDB_SQL_FETCH] = 1;
tscKeepConn[TSDB_SQL_HB] = 1;
}

View File

@ -566,7 +566,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
if ((pQueryInfo == NULL) || pQueryInfo->globalMerge) {
return true;
}
@ -679,7 +679,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (!tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
if (!pQueryInfo->globalMerge) {
return;
}
@ -730,7 +730,7 @@ void taos_stop_query(TAOS_RES *res) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
if (pQueryInfo->globalMerge) {
assert(pSql->rpcRid <= 0);
tscKillSTableQuery(pSql);
} else {
@ -945,30 +945,35 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
return TSDB_CODE_TSC_DISCONNECTED;
}
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
pSql->pTscObj = taos;
pSql->signature = pSql;
pSql->fp = NULL; // todo set the correct callback function pointer
pSql->cmd.pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
int32_t length = (int32_t)strlen(tableNameList);
if (length == 0) {
return TSDB_CODE_SUCCESS;
}
if (length > MAX_TABLE_NAME_LENGTH) {
tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, length, MAX_TABLE_NAME_LENGTH);
tscFreeSqlObj(pSql);
tscError("tableNameList too long, length:%d, maximum allowed:%d", length, MAX_TABLE_NAME_LENGTH);
return TSDB_CODE_TSC_INVALID_OPERATION;
}
char *str = calloc(1, length + 1);
if (str == NULL) {
tscError("0x%"PRIx64" failed to allocate sql string buffer", pSql->self);
tscFreeSqlObj(pSql);
tscError("failed to allocate sql string buffer, size:%d", length);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
strtolower(str, tableNameList);
SArray* plist = taosArrayInit(4, POINTER_BYTES);
SArray* vgroupList = taosArrayInit(4, POINTER_BYTES);
if (plist == NULL || vgroupList == NULL) {
tfree(str);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
tscAllocPayload(&pSql->cmd, 1024);
pSql->pTscObj = taos;
pSql->signature = pSql;
int32_t code = (uint8_t) tscTransferTableNameList(pSql, str, length, plist);
free(str);
@ -978,10 +983,11 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
return code;
}
pSql->cmd.pTableMetaMap = taosHashInit(taosArrayGetSize(plist), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
registerSqlObj(pSql);
tscDebug("0x%"PRIx64" load multiple table meta, tableNameList: %s pObj:%p", pSql->self, tableNameList, pObj);
code = getMultiTableMetaFromMnode(pSql, plist, vgroupList, loadMultiTableMetaCallback);
code = getMultiTableMetaFromMnode(pSql, plist, vgroupList, NULL, loadMultiTableMetaCallback, false);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
code = TSDB_CODE_SUCCESS;
}

View File

@ -23,6 +23,7 @@
#include "tscSubquery.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "qUdf.h"
#include "qUtil.h"
#include "qPlan.h"
@ -32,7 +33,7 @@ typedef struct SInsertSupporter {
} SInsertSupporter;
static void freeJoinSubqueryObj(SSqlObj* pSql);
static bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql);
//static bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql);
static int32_t tsCompare(int32_t order, int64_t left, int64_t right) {
if (left == right) {
@ -107,6 +108,9 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
subState->states[idx] = 1;
bool done = allSubqueryDone(pParentSql);
if (!done) {
tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, pSql, idx, pParentSql->subState.numOfSub);
}
pthread_mutex_unlock(&subState->mutex);
return done;
}
@ -416,7 +420,9 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
}
// tscFieldInfoClear(&pSupporter->fieldsInfo);
if (pSupporter->fieldsInfo.internalField != NULL) {
taosArrayDestroy(pSupporter->fieldsInfo.internalField);
}
if (pSupporter->pTSBuf != NULL) {
tsBufDestroy(pSupporter->pTSBuf);
pSupporter->pTSBuf = NULL;
@ -430,7 +436,8 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
}
if (pSupporter->pVgroupTables != NULL) {
taosArrayDestroy(pSupporter->pVgroupTables);
//taosArrayDestroy(pSupporter->pVgroupTables);
tscFreeVgroupTableInfo(pSupporter->pVgroupTables);
pSupporter->pVgroupTables = NULL;
}
@ -705,6 +712,15 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) {
pQueryInfo->window = *win;
}
int32_t tagValCompar(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) varDataVal(p1);
const STidTags* t2 = (const STidTags*) varDataVal(p2);
__compar_fn_t func = getComparFunc(t1->padding, 0);
return func(t1->tag, t2->tag);
}
int32_t tidTagsCompar(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) (p1);
const STidTags* t2 = (const STidTags*) (p2);
@ -713,28 +729,7 @@ int32_t tidTagsCompar(const void* p1, const void* p2) {
return (t1->vgId > t2->vgId) ? 1 : -1;
}
tstr* tag1 = (tstr*) t1->tag;
tstr* tag2 = (tstr*) t2->tag;
if (tag1->len != tag2->len) {
return (tag1->len > tag2->len)? 1: -1;
}
return strncmp(tag1->data, tag2->data, tag1->len);
}
int32_t tagValCompar(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) varDataVal(p1);
const STidTags* t2 = (const STidTags*) varDataVal(p2);
tstr* tag1 = (tstr*) t1->tag;
tstr* tag2 = (tstr*) t2->tag;
if (tag1->len != tag2->len) {
return (tag1->len > tag2->len)? 1: -1;
}
return memcmp(tag1->data, tag2->data, tag1->len);
return tagValCompar(p1, p2);
}
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables) {
@ -865,6 +860,12 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
return true;
}
static void setTidTagType(SJoinSupporter* p, uint8_t type) {
for (int32_t i = 0; i < p->num; ++i) {
STidTags * tag = (STidTags*) varDataVal(p->pIdTagList + i * p->tagSize);
tag->padding = type;
}
}
static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray* resList) {
int16_t joinNum = pParentSql->subState.numOfSub;
@ -884,13 +885,17 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
for (int32_t i = 0; i < joinNum; i++) {
SJoinSupporter* p = pParentSql->pSubs[i]->param;
setTidTagType(p, pColSchema->type);
ctxlist[i].p = p;
ctxlist[i].res = taosArrayInit(p->num, size);
tscDebug("Join %d - num:%d", i, p->num);
// sort according to the tag valu
qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar);
if (p->pIdTagList != NULL) {
qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar);
}
if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) {
for (int32_t j = 0; j <= i; j++) {
@ -1174,7 +1179,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// no data exists in next vnode, mark the <tid, tags> query completed
// only when there is no subquery exits any more, proceeds to get the intersect of the <tid, tags> tuple sets.
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
//tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
return;
}
@ -1442,7 +1447,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
}
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub);
//tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub);
return;
}
@ -1892,7 +1897,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
int16_t type = 0;
int32_t inter = 0;
getResultDataInfo(s->type, s->bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0);
getResultDataInfo(s->type, s->bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0, NULL);
SSchema s1 = {.colId = s->colId, .type = (uint8_t)type, .bytes = bytes};
pSupporter->tagSize = s1.bytes;
@ -2291,6 +2296,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SArray* pColList = pNewQueryInfo->colList;
pNewQueryInfo->colList = NULL;
pNewQueryInfo->fillType = TSDB_FILL_NONE;
tscClearSubqueryInfo(pCmd);
tscFreeSqlResult(pSql);
@ -2439,9 +2445,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
tOrderDescriptor *pDesc = NULL;
pRes->qId = 0x1; // hack the qhandle check
const uint32_t nBufferSize = (1u << 16u); // 64KB
const uint32_t nBufferSize = (1u << 18u); // 256KB
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSubqueryState *pState = &pSql->subState;
@ -2802,6 +2808,28 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
tscFreeRetrieveSup(pSql);
// set the command flag must be after the semaphore been correctly set.
if (pParentSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_GLOBALMERGE;
SQueryInfo *pQueryInfo2 = tscGetQueryInfo(&pParentSql->cmd);
size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SExprInfo* pExprInfo = tscExprGet(pQueryInfo2, j);
int32_t functionId = pExprInfo->base.functionId;
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo2->pUdfInfo, -1 * functionId - 1);
code = initUdfInfo(pUdfInfo);
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
}
}
}
}
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else {
@ -3056,9 +3084,10 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
pParentObj->cmd.insertParam.schemaAttached = 1;
}
}
if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, pSupporter->index, pParentObj->subState.numOfSub);
// concurrency problem, other thread already release pParentObj
//tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub);
return;
}
@ -3469,20 +3498,20 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
bool allSubqueryExhausted = true;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
if (pSql->pSubs[i] == NULL) {
continue;
}
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd;
SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd1);
assert(pQueryInfo1->numOfTables == 1);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo1, 0);
/*
* if the global limitation is not reached, and current result has not exhausted, or next more vnodes are
* available, goes on
@ -3493,14 +3522,14 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
break;
}
}
hasData = !allSubqueryExhausted;
} else { // otherwise, in case inner join, if any subquery exhausted, query completed.
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
if (pSql->pSubs[i] == 0) {
continue;
}
SSqlRes * pRes1 = &pSql->pSubs[i]->res;
SQueryInfo *pQueryInfo1 = tscGetQueryInfo(&pSql->pSubs[i]->cmd);

View File

@ -28,6 +28,7 @@
#include "tconfig.h"
#include "ttimezone.h"
#include "tlocale.h"
#include "qScript.h"
// global, not configurable
#define TSC_VAR_NOT_RELEASE 1
@ -148,6 +149,8 @@ void taos_init_imp(void) {
taosInitNotes();
rpcInit();
scriptEnvPoolInit();
tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp);
}
@ -202,7 +205,9 @@ void taos_cleanup(void) {
if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) {
return;
}
if (tscEmbedded == 0) {
scriptEnvPoolCleanup();
}
taosHashCleanup(tscTableMetaInfo);
tscTableMetaInfo = NULL;

View File

@ -34,48 +34,48 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len) {
int32_t n = 0;
switch (type) {
case TSDB_DATA_TYPE_NULL:
n = sprintf(str, "null");
break;
case TSDB_DATA_TYPE_BOOL:
n = sprintf(str, (*(int8_t*)buf) ? "true" : "false");
break;
case TSDB_DATA_TYPE_TINYINT:
n = sprintf(str, "%d", *(int8_t*)buf);
break;
case TSDB_DATA_TYPE_SMALLINT:
n = sprintf(str, "%d", *(int16_t*)buf);
break;
case TSDB_DATA_TYPE_INT:
n = sprintf(str, "%d", *(int32_t*)buf);
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
n = sprintf(str, "%" PRId64, *(int64_t*)buf);
break;
case TSDB_DATA_TYPE_FLOAT:
n = sprintf(str, "%f", GET_FLOAT_VAL(buf));
break;
case TSDB_DATA_TYPE_DOUBLE:
n = sprintf(str, "%f", GET_DOUBLE_VAL(buf));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
if (bufSize < 0) {
tscError("invalid buf size");
return TSDB_CODE_TSC_INVALID_VALUE;
}
*str = '"';
memcpy(str + 1, buf, bufSize);
*(str + bufSize + 1) = '"';
@ -250,6 +250,15 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
for (int32_t i = 0; i < numOfExprs; ++i) {
int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
return false;
}
continue;
}
if (functionId != TSDB_FUNC_PRJ &&
functionId != TSDB_FUNC_TAGPRJ &&
functionId != TSDB_FUNC_TAG &&
@ -300,6 +309,16 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
f != TSDB_FUNC_DERIVATIVE) {
return false;
}
if (f < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * f - 1);
if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
return false;
}
continue;
}
}
return true;
@ -331,7 +350,7 @@ bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
size_t size = taosArrayGetSize(pQueryInfo->colList);
for (int32_t i = 0; i < size; ++i) {
SColumn* pCol = taosArrayGet(pQueryInfo->colList, i);
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
if (pCol->info.flist.numOfFilters > 0) {
return true;
}
@ -372,6 +391,10 @@ bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) {
}
}
if (tscIsProjectionQuery(pQueryInfo)) {
return false;
}
return false;
}
@ -560,6 +583,15 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
}
int32_t functionId = pExpr->base.functionId;
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
return true;
}
continue;
}
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
@ -1212,7 +1244,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
pOutput->precision = pSqlObjList[0]->res.precision;
SSchema* schema = NULL;
if (px->numOfTables > 1) {
SOperatorInfo** p = calloc(px->numOfTables, POINTER_BYTES);
@ -1315,8 +1347,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
return;
}
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
SQueryInfo* pQueryInfo = pCmd->pQueryInfo;
while(pQueryInfo != NULL) {
SQueryInfo* p = pQueryInfo->sibling;
@ -1334,6 +1365,12 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
tfree(pUpQueryInfo);
}
if (pQueryInfo->udfCopy) {
pQueryInfo->pUdfInfo = taosArrayDestroy(pQueryInfo->pUdfInfo);
} else {
pQueryInfo->pUdfInfo = tscDestroyUdfArrayList(pQueryInfo->pUdfInfo);
}
freeQueryInfoImpl(pQueryInfo);
clearAllTableMetaInfo(pQueryInfo, removeMeta);
@ -1482,6 +1519,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->signature = NULL;
pSql->fp = NULL;
tfree(pSql->sqlstr);
tfree(pSql->pBuf);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
@ -1493,7 +1531,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload);
pCmd->allocSize = 0;
tsem_destroy(&pSql->rspSem);
memset(pSql, 0, sizeof(*pSql));
free(pSql);
@ -1502,6 +1540,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo) {
tfree(pColInfo->boundedColumns);
tfree(pColInfo->cols);
tfree(pColInfo->colIdxInfo);
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
@ -1510,12 +1549,6 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
}
tfree(pDataBlock->pData);
tfree(pDataBlock->params);
// free the refcount for metermeta
if (pDataBlock->pTableMeta != NULL) {
tfree(pDataBlock->pTableMeta);
}
if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
@ -1524,7 +1557,17 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
if (!pDataBlock->cloned) {
tfree(pDataBlock->params);
// free the refcount for metermeta
if (pDataBlock->pTableMeta != NULL) {
tfree(pDataBlock->pTableMeta);
}
tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
}
tfree(pDataBlock);
}
@ -1567,6 +1610,47 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
return NULL;
}
void freeUdfInfo(SUdfInfo* pUdfInfo) {
if (pUdfInfo == NULL) {
return;
}
if (pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY]) {
(*(udfDestroyFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY])(&pUdfInfo->init);
}
tfree(pUdfInfo->name);
if (pUdfInfo->path) {
unlink(pUdfInfo->path);
}
tfree(pUdfInfo->path);
tfree(pUdfInfo->content);
taosCloseDll(pUdfInfo->handle);
}
// todo refactor
void* tscDestroyUdfArrayList(SArray* pUdfList) {
if (pUdfList == NULL) {
return NULL;
}
size_t size = taosArrayGetSize(pUdfList);
for (int32_t i = 0; i < size; i++) {
SUdfInfo* udf = taosArrayGet(pUdfList, i);
freeUdfInfo(udf);
}
taosArrayDestroy(pUdfList);
return NULL;
}
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
if (pBlockHashTable == NULL) {
return NULL;
@ -1662,12 +1746,14 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
dataBuf->nAllocSize = dataBuf->headerSize * 2;
}
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
//dataBuf->pData = calloc(1, dataBuf->nAllocSize);
dataBuf->pData = malloc(dataBuf->nAllocSize);
if (dataBuf->pData == NULL) {
tscError("failed to allocated memory, reason:%s", strerror(errno));
tfree(dataBuf);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memset(dataBuf->pData, 0, sizeof(SSubmitBlk));
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
@ -1715,11 +1801,108 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
return TSDB_CODE_SUCCESS;
}
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bool includeSchema) {
static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
SSchema* pSchema = pBuilder->pSchema;
char* p = (char*)pBuilder->buf;
int toffset = 0;
uint16_t nCols = pBuilder->nCols;
uint8_t memRowType = payloadType(p);
uint16_t nColsBound = payloadNCols(p);
if (pBuilder->nCols <= 0 || nColsBound <= 0) {
return NULL;
}
char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p));
SMemRow* memRow = (SMemRow)pBuilder->pDataBlock;
memRowSetType(memRow, memRowType);
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
* |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
* +-----------+----------+----------+--------------------------------------|--------------------|
* | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
* +-----------+----------+----------+--------------------------------------+--------------------|
* 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
* 2. dataTLen: total length including the header and body.
*/
if (memRowType == SMEM_ROW_DATA) {
SDataRow trow = (SDataRow)memRowDataBody(memRow);
dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen));
dataRowSetVersion(trow, pBuilder->sversion);
p = (char*)payloadBody(pBuilder->buf);
uint16_t i = 0, j = 0;
while (j < nCols) {
if (i >= nColsBound) {
break;
}
int16_t colId = payloadColId(p);
if (colId == pSchema[j].colId) {
// ASSERT(payloadColType(p) == pSchema[j].type);
tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
p = payloadNextCol(p);
++i;
++j;
} else if (colId < pSchema[j].colId) {
p = payloadNextCol(p);
++i;
} else {
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
}
while (j < nCols) {
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
#if 0 // no need anymore
while (i < nColsBound) {
p = payloadNextCol(p);
++i;
}
#endif
} else if (memRowType == SMEM_ROW_KV) {
SKVRow kvRow = (SKVRow)memRowKvBody(memRow);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound));
kvRowSetNCols(kvRow, nColsBound);
memRowSetKvVersion(memRow, pBuilder->sversion);
p = (char*)payloadBody(pBuilder->buf);
int i = 0;
while (i < nColsBound) {
int16_t colId = payloadColId(p);
uint8_t colType = payloadColType(p);
tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, toffset);
toffset += sizeof(SColIdx);
p = payloadNextCol(p);
++i;
}
} else {
ASSERT(0);
}
int32_t rowTLen = memRowTLen(memRow);
pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row
pBuilder->pSubmitBlk->dataLen += rowTLen;
return memRow;
}
// Erase the empty space reserved for binary data
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam,
SBlockKeyTuple* blkKeyTuple) {
// TODO: optimize this function, handle the case while binary is not presented
STableMeta* pTableMeta = pTableDataBlock->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
STableMeta* pTableMeta = pTableDataBlock->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
SSubmitBlk* pBlock = pDataBlock;
memcpy(pDataBlock, pTableDataBlock->pData, sizeof(SSubmitBlk));
@ -1728,7 +1911,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
int32_t flen = 0; // original total length of row
// schema needs to be included into the submit data block
if (includeSchema) {
if (insertParam->schemaAttached) {
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
for(int32_t j = 0; j < numOfCols; ++j) {
STColumn* pCol = (STColumn*) pDataBlock;
@ -1754,21 +1937,39 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
char* p = pTableDataBlock->pData + sizeof(SSubmitBlk);
pBlock->dataLen = 0;
int32_t numOfRows = htons(pBlock->numOfRows);
for (int32_t i = 0; i < numOfRows; ++i) {
SDataRow trow = (SDataRow) pDataBlock;
dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen));
dataRowSetVersion(trow, pTableMeta->sversion);
int toffset = 0;
for (int32_t j = 0; j < tinfo.numOfColumns; j++) {
tdAppendColVal(trow, p, pSchema[j].type, pSchema[j].bytes, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
p += pSchema[j].bytes;
if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
for (int32_t i = 0; i < numOfRows; ++i) {
SMemRow memRow = (SMemRow)pDataBlock;
memRowSetType(memRow, SMEM_ROW_DATA);
SDataRow trow = memRowDataBody(memRow);
dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen));
dataRowSetVersion(trow, pTableMeta->sversion);
int toffset = 0;
for (int32_t j = 0; j < tinfo.numOfColumns; j++) {
tdAppendColVal(trow, p, pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
p += pSchema[j].bytes;
}
pDataBlock = (char*)pDataBlock + memRowTLen(memRow);
pBlock->dataLen += memRowTLen(memRow);
}
} else {
SMemRowBuilder rowBuilder;
rowBuilder.pSchema = pSchema;
rowBuilder.sversion = pTableMeta->sversion;
rowBuilder.flen = flen;
rowBuilder.nCols = tinfo.numOfColumns;
rowBuilder.pDataBlock = pDataBlock;
rowBuilder.pSubmitBlk = pBlock;
rowBuilder.buf = p;
pDataBlock = (char*)pDataBlock + dataRowLen(trow);
pBlock->dataLen += dataRowLen(trow);
for (int32_t i = 0; i < numOfRows; ++i) {
rowBuilder.buf = (blkKeyTuple + i)->payloadAddr;
tdGenMemRowFromBuilder(&rowBuilder);
}
}
int32_t len = pBlock->dataLen + pBlock->schemaLen;
@ -1779,7 +1980,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
}
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
int32_t result = TD_DATA_ROW_HEAD_SIZE;
int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE;
int32_t columns = tscGetNumOfColumns(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
for(int32_t i = 0; i < columns; i++) {
@ -1793,16 +1994,14 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList);
if (pInsertParam->pTableNameList == NULL) {
pInsertParam->pTableNameList = calloc(pInsertParam->numOfTables, POINTER_BYTES);
} else {
memset(pInsertParam->pTableNameList, 0, pInsertParam->numOfTables * POINTER_BYTES);
pInsertParam->pTableNameList = malloc(pInsertParam->numOfTables * POINTER_BYTES);
}
STableDataBlocks **p1 = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
tfree(pInsertParam->pTableNameList[i]);
//tfree(pInsertParam->pTableNameList[i]);
pInsertParam->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pInsertParam->pTableBlockHashList, p1);
@ -1814,14 +2013,18 @@ static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeB
}
int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
int code = 0;
bool isRawPayload = IS_RAW_PAYLOAD(pInsertParam->payloadType);
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
STableDataBlocks** p = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
STableDataBlocks* pOneTableBlock = *p;
SBlockKeyInfo blkKeyInfo = {0}; // share by pOneTableBlock
while(pOneTableBlock) {
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
if (pBlocks->numOfRows > 0) {
@ -1835,37 +2038,54 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
tfree(blkKeyInfo.pKeyTuple);
return ret;
}
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) {
dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
}
dataBuf->nAllocSize = (uint32_t)(destSize * 1.5);
char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
if (tmp != NULL) {
dataBuf->pData = tmp;
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
//memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
} else { // failed to allocate memory, free already allocated memory and return error code
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
tfree(dataBuf->pData);
tfree(blkKeyInfo.pKeyTuple);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
if (isRawPayload) {
tscSortRemoveDataBlockDupRowsRaw(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize * (pBlocks->numOfRows - 1);
tscDebug("0x%"PRIx64" name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
} else {
if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) {
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
tfree(dataBuf->pData);
tfree(blkKeyInfo.pKeyTuple);
return code;
}
ASSERT(blkKeyInfo.pKeyTuple != NULL && pBlocks->numOfRows > 0);
SBlockKeyTuple* pLastKeyTuple = blkKeyInfo.pKeyTuple + pBlocks->numOfRows - 1;
tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
}
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
@ -1875,7 +2095,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
pBlocks->schemaLen = 0;
// erase the empty space reserved for binary data
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam->schemaAttached);
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam, blkKeyInfo.pKeyTuple);
assert(finalLen <= len);
dataBuf->size += (finalLen + sizeof(SSubmitBlk));
@ -1903,6 +2123,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
// free the table data blocks;
pInsertParam->pDataBlocks = pVnodeDataBlockList;
taosHashCleanup(pVnodeDataBlockHashList);
tfree(blkKeyInfo.pKeyTuple);
return TSDB_CODE_SUCCESS;
}
@ -2163,7 +2384,7 @@ void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArr
assert(pfield->pExpr->pExpr != NULL);
p.pExpr = calloc(1, sizeof(SExprInfo));
tscExprAssign(p.pExpr, pfield->pExpr);
}
}
taosArrayPush(pFieldInfo->internalField, &p);
}
@ -2993,6 +3214,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
assert(pCmd != NULL);
SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo));
if (pQueryInfo == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -3072,8 +3294,9 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
pQueryInfo->tsBuf = NULL;
pQueryInfo->fillType = pSrc->fillType;
pQueryInfo->fillVal = NULL;
pQueryInfo->numOfFillVal = 0;;
pQueryInfo->clauseLimit = pSrc->clauseLimit;
pQueryInfo->prjOffset = pSrc->prjOffset;
pQueryInfo->prjOffset = pSrc->prjOffset;
pQueryInfo->numOfTables = 0;
pQueryInfo->window = pSrc->window;
pQueryInfo->sessionWindow = pSrc->sessionWindow;
@ -3112,11 +3335,12 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
}
if (pSrc->fillType != TSDB_FILL_NONE) {
pQueryInfo->fillVal = malloc(pSrc->fieldsInfo.numOfOutput * sizeof(int64_t));
pQueryInfo->fillVal = calloc(1, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t));
if (pQueryInfo->fillVal == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pQueryInfo->numOfFillVal = pSrc->fieldsInfo.numOfOutput;
memcpy(pQueryInfo->fillVal, pSrc->fillVal, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t));
}
@ -3148,6 +3372,14 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
tscAddTableMetaInfo(pQueryInfo, &p1->name, pMeta, p1->vgroupList, p1->tagColList, p1->pVgroupTables);
}
SArray *pUdfInfo = NULL;
if (pSrc->pUdfInfo) {
pUdfInfo = taosArrayDup(pSrc->pUdfInfo);
}
pQueryInfo->pUdfInfo = pUdfInfo;
pQueryInfo->udfCopy = true;
_error:
return code;
}
@ -3194,7 +3426,9 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
info->itemList = taosArrayDup(pInfo->itemList);
if (pInfo->itemList) {
info->itemList = taosArrayDup(pInfo->itemList);
}
}
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
@ -3419,6 +3653,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
pNew->sqlstr = strdup(pSql->sqlstr);
tsem_init(&pNew->rspSem, 0, 0);
SSqlCmd* pnCmd = &pNew->cmd;
memcpy(pnCmd, pCmd, sizeof(SSqlCmd));
@ -3445,6 +3680,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pnCmd);
if (pQueryInfo->pUdfInfo) {
pNewQueryInfo->pUdfInfo = taosArrayDup(pQueryInfo->pUdfInfo);
pNewQueryInfo->udfCopy = true;
}
pNewQueryInfo->command = pQueryInfo->command;
pnCmd->active = pNewQueryInfo;
@ -3458,11 +3698,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->tsBuf = NULL;
pNewQueryInfo->fillType = pQueryInfo->fillType;
pNewQueryInfo->fillVal = NULL;
pNewQueryInfo->numOfFillVal = 0;
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -3493,11 +3735,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
}
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
//just make memory memory sanitizer happy
//refator later
pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
if (pNewQueryInfo->fillVal == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput;
memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
}
@ -3774,7 +4019,7 @@ bool tscIsUpdateQuery(SSqlObj* pSql) {
}
SSqlCmd* pCmd = &pSql->cmd;
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command);
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_RESET_CACHE == pCmd->command || TSDB_SQL_USE_DB == pCmd->command);
}
char* tscGetSqlStr(SSqlObj* pSql) {
@ -3964,17 +4209,21 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
// DON't free final since it may be recoreded and used later in APP
TAOS_FIELD* finalBk = pRes->final;
pRes->final = NULL;
tscFreeSqlResult(pSql);
pRes->final = finalBk;
pRes->numOfTotal = num;
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
taos_free_result(pSql->pSubs[i]);
}
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
pSql->fp = fp;
tscDebug("0x%"PRIx64" try data in the next subclause", pSql->self);
@ -4235,7 +4484,7 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
size_t size = tscGetTableMetaSize(pTableMeta);
STableMeta* p = calloc(1, size);
STableMeta* p = malloc(size);
memcpy(p, pTableMeta, size);
return p;
}
@ -4307,7 +4556,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI
int32_t inter = 0;
getResultDataInfo(pSource->base.colType, pSource->base.colBytes, functionId, 0, &pse->resType,
&pse->resBytes, &inter, 0, false);
&pse->resBytes, &inter, 0, false, NULL);
pse->colType = pse->resType;
pse->colBytes = pse->resBytes;
@ -4374,8 +4623,14 @@ static int32_t createGlobalAggregateExpr(SQueryAttr* pQueryAttr, SQueryInfo* pQu
functionId = TSDB_FUNC_STDDEV;
}
SUdfInfo* pUdfInfo = NULL;
if (functionId < 0) {
pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
}
getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, functionId, 0, &pse->resType, &pse->resBytes, &inter,
0, false);
0, false, pUdfInfo);
}
}
@ -4451,7 +4706,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->order = pQueryInfo->order;
pQueryAttr->fillType = pQueryInfo->fillType;
pQueryAttr->havingNum = pQueryInfo->havingFieldNum;
pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo;
if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor
pQueryAttr->window = pQueryInfo->window;
} else {
@ -4517,7 +4773,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
if (pQueryAttr->fillType != TSDB_FILL_NONE) {
pQueryAttr->fillVal = calloc(pQueryAttr->numOfOutput, sizeof(int64_t));
memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryAttr->numOfOutput * sizeof(int64_t));
memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryInfo->numOfFillVal * sizeof(int64_t));
}
pQueryAttr->srcRowSize = 0;
@ -4563,8 +4819,13 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq
strncpy(tablename, *str, TSDB_TABLE_FNAME_LEN);
len = (int32_t) strlen(tablename);
} else {
memcpy(tablename, *str, nextStr - (*str));
len = (int32_t)(nextStr - (*str));
if (len >= TSDB_TABLE_NAME_LEN) {
sprintf(pCmd->payload, "table name too long");
return TSDB_CODE_TSC_INVALID_OPERATION;
}
memcpy(tablename, *str, nextStr - (*str));
tablename[len] = '\0';
}
@ -4576,9 +4837,8 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq
// Check if the table name available or not
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
sprintf(pCmd->payload, "table name is invalid");
return code;
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
SName name = {0};
@ -4594,6 +4854,20 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq
return TSDB_CODE_SUCCESS;
}
int32_t nameComparFn(const void* n1, const void* n2) {
int32_t ret = strcmp(*(char**)n1, *(char**)n2);
if (ret == 0) {
return 0;
} else {
return ret > 0? 1:-1;
}
}
static void freeContent(void* p) {
char* ptr = *(char**)p;
tfree(ptr);
}
int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t length, SArray* pNameArray) {
SSqlCmd *pCmd = &pSql->cmd;
@ -4629,12 +4903,19 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt
}
}
if (taosArrayGetSize(pNameArray) > TSDB_MULTI_TABLEMETA_MAX_NUM) {
size_t len = taosArrayGetSize(pNameArray);
if (len == 1) {
return TSDB_CODE_SUCCESS;
}
if (len > TSDB_MULTI_TABLEMETA_MAX_NUM) {
code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
sprintf(pCmd->payload, "tables over the max number");
return code;
}
taosArraySort(pNameArray, nameComparFn);
taosArrayRemoveDuplicate(pNameArray, nameComparFn, freeContent);
return TSDB_CODE_SUCCESS;
}

View File

@ -41,8 +41,10 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DB, "create-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_TABLE, "create-table" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_FUNCTION, "create-function" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_DB, "drop-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_TABLE, "drop-table" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_FUNCTION, "drop-function" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_ACCT, "create-acct" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_USER, "create-user" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_ACCT, "drop-acct" )
@ -74,6 +76,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_STABLEVGROUP, "stable-vgroup" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MULTI_META, "multi-meta" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_HB, "heart-beat" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_FUNC, "retrieve-function" )
// SQL below for client local
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_LOCAL, "local" )

View File

@ -31,6 +31,14 @@ extern "C" {
memcpy(varDataVal(x), (str), __len); \
} while (0);
#define STR_TO_NET_VARSTR(x, str) \
do { \
VarDataLenT __len = (VarDataLenT)strlen(str); \
*(VarDataLenT *)(x) = htons(__len); \
memcpy(varDataVal(x), (str), __len); \
} while (0);
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \
char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \
@ -45,10 +53,10 @@ extern "C" {
// ----------------- TSDB COLUMN DEFINITION
typedef struct {
int8_t type; // Column type
int16_t colId; // column ID
int16_t bytes; // column bytes
int16_t offset; // point offset in SDataRow after the header part
int8_t type; // Column type
int16_t colId; // column ID
uint16_t bytes; // column bytes
uint16_t offset; // point offset in SDataRow after the header part.
} STColumn;
#define colType(col) ((col)->type)
@ -159,10 +167,11 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
return 0;
}
}
// ----------------- Data row structure
/* A data row, the format is like below:
* |<--------------------+--------------------------- len ---------------------------------->|
* |<------------------------------------------------ len ---------------------------------->|
* |<-- Head -->|<--------- flen -------------->| |
* +---------------------+---------------------------------+---------------------------------+
* | uint16_t | int16_t | | |
@ -176,8 +185,8 @@ typedef void *SDataRow;
#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
#define dataRowLen(r) (*(uint16_t *)(r))
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535
#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
#define dataRowKey(r) tdGetKey(dataRowTKey(r))
@ -193,20 +202,19 @@ void tdInitDataRow(SDataRow row, STSchema *pSchema);
SDataRow tdDataRowDup(SDataRow row);
// offset here not include dataRow header length
static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset) {
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row));
if (IS_VAR_DATA_TYPE(type)) {
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(ptr, value, varDataTLen(value));
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
dataRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]);
memcpy(POINTER_SHIFT(row, toffset), (const void *)(&tvalue), TYPE_BYTES[type]);
} else {
memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]);
}
@ -237,17 +245,21 @@ typedef struct SDataCol {
TSKEY ts; // only used in last NULL column
} SDataCol;
#define isAllRowsNull(pCol) ((pCol)->len == 0)
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints);
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints);
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
// Get the data pointer from a column-wised data
static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) {
static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) {
if (isAllRowsNull(pCol)) {
return getNullValue(pCol->type);
}
if (IS_VAR_DATA_TYPE(pCol->type)) {
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
@ -279,7 +291,7 @@ typedef struct {
} SDataCols;
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)]
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
@ -323,13 +335,13 @@ void tdResetDataCols(SDataCols *pCols);
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
SDataCols *tdFreeDataCols(SDataCols *pCols);
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset);
// ----------------- K-V data row structure
/*
/* |<-------------------------------------- len -------------------------------------------->|
* |<----- header ----->|<--------------------------- body -------------------------------->|
* +----------+----------+---------------------------------+---------------------------------+
* | int16_t | int16_t | | |
* | uint16_t | int16_t | | |
* +----------+----------+---------------------------------+---------------------------------+
* | len | ncols | cols index | data part |
* +----------+----------+---------------------------------+---------------------------------+
@ -337,14 +349,14 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge
typedef void *SKVRow;
typedef struct {
int16_t colId;
int16_t offset;
int16_t colId;
uint16_t offset;
} SColIdx;
#define TD_KV_ROW_HEAD_SIZE (2 * sizeof(int16_t))
#define TD_KV_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
#define kvRowLen(r) (*(int16_t *)(r))
#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
#define kvRowLen(r) (*(TDRowLenT *)(r))
#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(uint16_t)))
#define kvRowSetLen(r, len) kvRowLen(r) = (len)
#define kvRowSetNCols(r, n) kvRowNCols(r) = (n)
#define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE)
@ -354,6 +366,9 @@ typedef struct {
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r)))
#define kvRowKey(r) tdGetKey(kvRowTKey(r))
#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r))
SKVRow tdKVRowDup(SKVRow row);
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value);
@ -377,13 +392,44 @@ static FORCE_INLINE void *tdGetKVRowValOfCol(SKVRow row, int16_t colId) {
return kvRowColVal(row, (SColIdx *)ret);
}
static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
return taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ);
}
// offset here not include kvRow header length
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
pColIdx->colId = colId;
pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE
if (IS_VAR_DATA_TYPE(type)) {
memcpy(ptr, value, varDataTLen(value));
kvRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
} else {
memcpy(ptr, value, TYPE_BYTES[type]);
}
kvRowLen(row) += TYPE_BYTES[type];
}
return 0;
}
// ----------------- K-V data row builder
typedef struct {
int16_t tCols;
int16_t nCols;
SColIdx *pColIdx;
int16_t alloc;
int16_t size;
uint16_t alloc;
uint16_t size;
void * buf;
} SKVRowBuilder;
@ -419,8 +465,146 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
return 0;
}
// ----------------- SMemRow appended with sequential data row structure
/*
* |---------|------------------------------------------------- len ---------------------------------->|
* |<-------- Head ------>|<--------- flen -------------->| |
* |---------+---------------------+---------------------------------+---------------------------------+
* | uint8_t | uint16_t | int16_t | | |
* |---------+----------+----------+---------------------------------+---------------------------------+
* | flag | len | sversion | First part | Second part |
* +---------+----------+----------+---------------------------------+---------------------------------+
*
* NOTE: timestamp in this row structure is TKEY instead of TSKEY
*/
// ----------------- SMemRow appended with extended K-V data row structure
/* |--------------------|------------------------------------------------ len ---------------------------------->|
* |<------------- Head ------------>|<--------- flen -------------->| |
* |--------------------+----------+--------------------------------------------+---------------------------------+
* | uint8_t | int16_t | uint16_t | int16_t | | |
* |---------+----------+----------+----------+---------------------------------+---------------------------------+
* | flag | sversion | len | ncols | cols index | data part |
* |---------+----------+----------+----------+---------------------------------+---------------------------------+
*/
typedef void *SMemRow;
#define TD_MEM_ROW_TYPE_SIZE sizeof(uint8_t)
#define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t)
#define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE)
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define SMEM_ROW_DATA 0U // SDataRow
#define SMEM_ROW_KV 1U // SKVRow
#define memRowType(r) (*(uint8_t *)(r))
#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
#define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
#define memRowKvBody(r) \
POINTER_SHIFT(r, TD_MEM_ROW_KV_TYPE_VER_SIZE) // section after flag + sversion as to reuse SKVRow
#define memRowDataLen(r) (*(TDRowLenT *)memRowDataBody(r)) // 0~65535
#define memRowKvLen(r) (*(TDRowLenT *)memRowKvBody(r)) // 0~65535
#define memRowDataTLen(r) \
((TDRowTLenT)(memRowDataLen(r) + TD_MEM_ROW_TYPE_SIZE)) // using uint32_t/int32_t to store the TLen
#define memRowKvTLen(r) ((TDRowTLenT)(memRowKvLen(r) + TD_MEM_ROW_KV_TYPE_VER_SIZE))
#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r))
#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen
#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r))
#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
#define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v))
#define memRowTuple(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowValues(memRowKvBody(r)))
#define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r)))
#define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r)))
#define memRowSetTKey(r, k) \
do { \
if (isDataRow(r)) { \
dataRowTKey(memRowDataBody(r)) = (k); \
} else { \
kvRowTKey(memRowKvBody(r)) = (k); \
} \
} while (0)
#define memRowSetType(r, t) (memRowType(r) = (t))
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v))
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
#define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE)
#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
SMemRow tdMemRowDup(SMemRow row);
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols);
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); }
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) {
if (isDataRow(row)) {
return tdGetRowDataOfCol(row, type, offset);
} else if (isKvRow(row)) {
return tdGetKvRowDataOfCol(row, offset);
} else {
ASSERT(0);
}
return NULL;
}
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
* |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
* +-----------+----------+----------+--------------------------------------|--------------------|
* | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
* +-----------+----------+----------+--------------------------------------+--------------------|
* 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
* 2. dataTLen: total length including the header and body.
*/
#define PAYLOAD_NCOLS_LEN sizeof(uint16_t)
#define PAYLOAD_NCOLS_OFFSET (sizeof(uint8_t) + sizeof(TDRowTLenT))
#define PAYLOAD_HEADER_LEN (PAYLOAD_NCOLS_OFFSET + PAYLOAD_NCOLS_LEN)
#define PAYLOAD_ID_LEN sizeof(int16_t)
#define PAYLOAD_ID_TYPE_LEN (sizeof(int16_t) + sizeof(uint8_t))
#define PAYLOAD_COL_HEAD_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(uint16_t))
#define PAYLOAD_PRIMARY_COL_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(TSKEY))
#define payloadBody(r) POINTER_SHIFT(r, PAYLOAD_HEADER_LEN)
#define payloadType(r) (*(uint8_t *)(r))
#define payloadSetType(r, t) (payloadType(r) = (t))
#define payloadTLen(r) (*(TDRowTLenT *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) // including total header
#define payloadSetTLen(r, l) (payloadTLen(r) = (l))
#define payloadNCols(r) (*(TDRowLenT *)POINTER_SHIFT(r, PAYLOAD_NCOLS_OFFSET))
#define payloadSetNCols(r, n) (payloadNCols(r) = (n))
#define payloadValuesOffset(r) \
(PAYLOAD_HEADER_LEN + payloadNCols(r) * PAYLOAD_COL_HEAD_LEN) // avoid using the macro in loop
#define payloadValues(r) POINTER_SHIFT(r, payloadValuesOffset(r)) // avoid using the macro in loop
#define payloadColId(c) (*(int16_t *)(c))
#define payloadColType(c) (*(uint8_t *)POINTER_SHIFT(c, PAYLOAD_ID_LEN))
#define payloadColOffset(c) (*(uint16_t *)POINTER_SHIFT(c, PAYLOAD_ID_TYPE_LEN))
#define payloadColValue(c) POINTER_SHIFT(c, payloadColOffset(c))
#define payloadColSetId(c, i) (payloadColId(c) = (i))
#define payloadColSetType(c, t) (payloadColType(c) = (t))
#define payloadColSetOffset(c, o) (payloadColOffset(c) = (o))
#define payloadTSKey(r) (*(TSKEY *)POINTER_SHIFT(r, payloadValuesOffset(r)))
#define payloadTKey(r) (*(TKEY *)POINTER_SHIFT(r, payloadValuesOffset(r)))
#define payloadKey(r) tdGetKey(payloadTKey(r))
static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); }
#ifdef __cplusplus
}
#endif
#endif // _TD_DATA_FORMAT_H_
#endif // _TD_DATA_FORMAT_H_

View File

@ -205,6 +205,16 @@ extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;
#ifdef TD_TSZ
// lossy
extern char lossyColumns[];
extern double fPrecision;
extern double dPrecision;
extern uint32_t maxRange;
extern uint32_t curRange;
extern char Compressor[];
#endif
typedef struct {
char dir[TSDB_FILENAME_LEN];
int level;

View File

@ -18,7 +18,58 @@
#include "ttype.h"
#include "tutil.h"
#include "tarithoperator.h"
#include "tcompare.h"
//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i]));
#define ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
{ \
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \
\
if ((len1) == (len2)) { \
for (; i < (len2) && i >= 0; i += step, (out) += 1) { \
if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
double v, z = 0.0; \
GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
*(out) = (double)(left)[i] op(right)[i]; \
} \
} else if ((len1) == 1) { \
for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
double v, z = 0.0; \
GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
*(out) = (double)(left)[0] op(right)[i]; \
} \
} else if ((len2) == 1) { \
for (; i >= 0 && i < (len1); i += step, (out) += 1) { \
if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
double v, z = 0.0; \
GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \
if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
*(out) = (double)(left)[i] op(right)[0]; \
} \
} \
}
#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
{ \
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
@ -62,6 +113,12 @@
SET_DOUBLE_NULL(out); \
continue; \
} \
double v, z = 0.0; \
GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
*(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \
} \
} else if (len1 == 1) { \
@ -70,6 +127,12 @@
SET_DOUBLE_NULL(out); \
continue; \
} \
double v, z = 0.0; \
GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
*(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \
} \
} else if ((len2) == 1) { \
@ -78,6 +141,12 @@
SET_DOUBLE_NULL(out); \
continue; \
} \
double v, z = 0.0; \
GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \
if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
SET_DOUBLE_NULL(out); \
continue; \
} \
*(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[0])) * (right)[0]; \
} \
} \
@ -90,7 +159,7 @@
#define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \
ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord)
#define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \
ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord)
ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord)
#define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \
ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord)

View File

@ -198,6 +198,14 @@ SDataRow tdDataRowDup(SDataRow row) {
return trow;
}
SMemRow tdMemRowDup(SMemRow row) {
SMemRow trow = malloc(memRowTLen(row));
if (trow == NULL) return NULL;
memRowCpy(trow, row);
return trow;
}
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) {
pDataCol->type = colType(pCol);
pDataCol->colId = colColId(pCol);
@ -217,11 +225,22 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
*pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize);
}
}
// value from timestamp should be TKEY here instead of TSKEY
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) {
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
ASSERT(pCol != NULL && value != NULL);
if (isAllRowsNull(pCol)) {
if (isNull(value, pCol->type)) {
// all null value yet, just return
return;
}
if (numOfRows > 0) {
// Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull(pCol, numOfRows, maxPoints);
}
}
if (IS_VAR_DATA_TYPE(pCol->type)) {
// set offset
pCol->dataOff[numOfRows] = pCol->len;
@ -243,7 +262,7 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
return true;
}
void dataColSetNullAt(SDataCol *pCol, int index) {
FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
@ -399,8 +418,7 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
}
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
@ -419,7 +437,8 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols)
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColSetNullAt(pDataCol, pCols->numOfRows);
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dcol++;
continue;
}
@ -433,7 +452,8 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols)
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dcol++;
}
}
@ -441,6 +461,62 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols)
pCols->numOfRows++;
}
static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
int rcol = 0;
int dcol = 0;
if (kvRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, kvRowValues(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
}
} else {
int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
++dcol;
continue;
}
SColIdx *colIdx = kvRowColIdxAt(row, rcol);
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
++dcol;
}
}
}
pCols->numOfRows++;
}
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) {
if (isDataRow(row)) {
tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols);
} else if (isKvRow(row)) {
tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols);
} else {
ASSERT(0);
}
}
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols);
@ -563,7 +639,7 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff);
if (nrow == NULL) return -1;
kvRowSetLen(nrow, kvRowLen(row) + (int16_t)sizeof(SColIdx) + diff);
kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff);
kvRowSetNCols(nrow, kvRowNCols(row) + 1);
if (ptr == NULL) {
@ -605,8 +681,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
memcpy(pOldVal, value, varDataTLen(value));
} else { // need to reallocate the memory
int16_t diff = varDataTLen(value) - varDataTLen(pOldVal);
int16_t nlen = kvRowLen(row) + diff;
uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal);
uint16_t nlen = kvRowLen(row) + diff;
ASSERT(nlen > 0);
nrow = malloc(nlen);
if (nrow == NULL) return -1;
@ -708,4 +784,4 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
return row;
}
}

View File

@ -244,6 +244,19 @@ int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 131;
int32_t fsDebugFlag = 135;
#ifdef TD_TSZ
//
// lossy compress 6
//
char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress.
// below option can take effect when tsLossyColumns not empty
double fPrecision = 1E-8; // float column precision
double dPrecision = 1E-16; // double column precision
uint32_t maxRange = 500; // max range
uint32_t curRange = 100; // range
char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
#endif
int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL;
void (*monExecuteSQLFp)(char *sql) = NULL;
@ -1517,6 +1530,62 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = tListLen(tsTempDir);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
#ifdef TD_TSZ
// lossy compress
cfg.option = "lossyColumns";
cfg.ptr = lossyColumns;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = tListLen(lossyColumns);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "fPrecision";
cfg.ptr = &fPrecision;
cfg.valType = TAOS_CFG_VTYPE_DOUBLE;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = MIN_FLOAT;
cfg.maxValue = MAX_FLOAT;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "dPrecision";
cfg.ptr = &dPrecision;
cfg.valType = TAOS_CFG_VTYPE_DOUBLE;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = MIN_FLOAT;
cfg.maxValue = MAX_FLOAT;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "maxRange";
cfg.ptr = &maxRange;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 65536;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "range";
cfg.ptr = &curRange;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 65536;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
#endif
}
void taosInitGlobalCfg() {

View File

@ -306,7 +306,7 @@ bool tIsValidName(const SName* name) {
SName* tNameDup(const SName* name) {
assert(name != NULL);
SName* p = calloc(1, sizeof(SName));
SName* p = malloc(sizeof(SName));
memcpy(p, name, sizeof(SName));
return p;
}

View File

@ -38,7 +38,11 @@ const int32_t TYPE_BYTES[15] = {
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
do { \
(__sum) += (_list)[(_index)]; \
if (_list[(_index)] >= (INT64_MAX - (__sum))) { \
__sum = INT64_MAX; \
} else { \
(__sum) += (_list)[(_index)]; \
} \
if ((__min) > (_list)[(_index)]) { \
(__min) = (_list)[(_index)]; \
(__minIndex) = (_index); \
@ -518,30 +522,32 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
}
}
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
static uint32_t nullInt = TSDB_DATA_INT_NULL;
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
static uint32_t nullInt = TSDB_DATA_INT_NULL;
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
static SBinaryNullT nullBinary = {1, TSDB_DATA_BINARY_NULL};
static SNCharNullT nullNchar = {4, TSDB_DATA_NCHAR_NULL};
static union {
tstr str;
char pad[sizeof(tstr) + 4];
} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
// static union {
// tstr str;
// char pad[sizeof(tstr) + 4];
// } nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
static void *nullValues[] = {
static const void *nullValues[] = {
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
&nullTinyIntu, &nullSmallIntu, &nullIntu, &nullBigIntu,
};
void *getNullValue(int32_t type) {
const void *getNullValue(int32_t type) {
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_UBIGINT);
return nullValues[type - 1];
}

@ -1 +1 @@
Subproject commit 3530c6df097134a410bacec6b3cd013ef38a61aa
Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.33-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.32</version>
<version>2.0.33</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -37,13 +37,6 @@
<maven.test.jvmargs></maven.test.jvmargs>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
@ -52,12 +45,18 @@
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
<version>1.2.76</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>30.0-jre</version>
<version>30.1.1-jre</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
</dependencies>

View File

@ -1,54 +1,62 @@
# Java Connector
## TAOS-JDBCDriver 概述
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
![tdengine-connector](https://www.taosdata.com/cn/documentation/user/pages/images/tdengine-jdbc-connector.png)
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。
上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* JDBC-JNIJava 应用在物理节点1pnode1上使用 JDBC-JNI 的 API ,直接调用客户端 APIlibtaos.so 或 taos.dll将写入和查询请求发送到位于物理节点2pnode2上的 taosd 实例。
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
## JDBC-JNI和JDBC-RESTful的对比
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
<table >
<tr align="center"><th>对比项</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
<tr align="center">
<td>支持的操作系统</td>
<td>linux、windows</td>
<td>全平台</td>
</tr>
<tr align="center">
<td>是否需要安装 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>server 升级后是否需要升级 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>写入性能</td>
<td colspan="2">JDBC-RESTful 是 JDBC-JNI 的 50%90% </td>
</tr>
<tr align="center">
<td>查询性能</td>
<td colspan="2">JDBC-RESTful 与 JDBC-JNI 没有差别</td>
</tr>
</table>
## TDengine DataType 和 Java DataType
注意:与 JNI 方式不同RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@ -56,56 +64,86 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
maven 项目中使用如下 pom.xml 配置即可:
```xml
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>1.0.3</version>
</dependency>
</dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
## 使用说明
## JDBC的使用说明
### 获取连接
如下所示配置即可获取 TDengine Connection
#### 指定URL获取连接
通过指定URL获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> 端口 6030 为默认连接端口JDBC URL 中的 log 为系统本身的监控数据库。
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
TDengine 的 JDBC URL 规范格式为:
`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下
* user登录 TDengine 用户名,默认值 root。
* password用户登录密码默认值 taosdata。
* charset客户端使用的字符集默认值为系统字符集。
* cfgdir客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
* charset客户端使用的字符集默认值为系统字符集。
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
1. JDBC URL 参数
如上所述,可以在 JDBC URL 的参数中指定。
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
#### 指定URL和Properties获取连接
除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
// Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
@ -114,22 +152,68 @@ public Connection getConn() throws Exception{
}
```
3. 客户端配置文件 taos.cfg
以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
linux 系统默认配置文件为 /var/lib/taos/taos.cfgwindows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
```properties
# client default username
# defaultUser root
properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 root。
* TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 taosdata。
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
* TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
# client default password
# defaultPass taosdata
#### 使用客户端配置文件建立连接
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
2. 在配置文件中指定 firstEp 和 secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
# default system charset
# charset UTF-8
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp 连接集群。
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
#### 配置参数的优先级
通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
2. Properties connProps
3. 客户端配置文件 taos.cfg
例如:在 url 中指定了 password 为 taosdata在 Properties 中指定了 password 为 taosdemo那么JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
### 创建数据库和表
@ -146,6 +230,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@ -156,6 +241,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
@ -177,8 +263,150 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 处理异常
在报错后通过SQLException可以获取到错误的信息和错误码
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
JDBC连接器可能报错的错误码包括3种JDBC driver本身的报错错误码在0x2301到0x2350之间JNI方法的报错错误码在0x2351到0x2400之间TDengine其他功能模块的报错。
具体的错误码请参考:
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据
从 2.1.2.0 版本开始TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入INSERT场景的支持。采用这种方式写入数据时能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
```java
Statement stmt = conn.createStatement();
Random r = new Random();
// INSERT 语句中VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
// 设定数据表名:
s.setTableName("w1");
// 设定 TAGS 取值:
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10;
// VALUES 部分以逐列的方式进行设置:
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s1 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s1.add(r.nextInt(100));
}
s.setInt(1, s1);
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s2.add("test" + r.nextInt(100));
}
s.setString(2, s2, 10);
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
s.columnDataAddBatch();
// 执行绑定数据后的语句:
s.columnDataExecuteBatch();
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
s.columnDataClearBatch();
// 执行完毕,释放资源:
s.columnDataCloseBatch();
```
用于设定 TAGS 取值的方法总共有:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
用于设定 VALUES 数据列的取值的方法总共有:
```java
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
### <a class="anchor" id="subscribe"></a>订阅
#### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
```
`subscribe` 方法的三个参数含义如下:
* topic订阅的主题即名称此参数是订阅的唯一标识
* sql订阅的查询语句此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
* restart如果订阅已经存在是重新开始还是继续之前的订阅
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
#### 消费数据
```java
int total = 0;
while(true) {
TSDBResultSet rs = sub.consume();
int count = 0;
while(rs.next()) {
count++;
}
total += count;
System.out.printf("%d rows consumed, total %d\n", count, total);
Thread.sleep(1000);
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
```java
sub.close(true);
```
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源
@ -187,12 +415,17 @@ resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
@ -202,31 +435,34 @@ conn.close();
```
* 使用示例如下:
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
// jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
// connection pool configurations
config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@ -243,40 +479,32 @@ conn.close();
```
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
DruidDataSource dataSource = new DruidDataSource();
// jdbc properties
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
dataSource.setUrl(url);
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
// pool configurations
dataSource.setInitialSize(10);
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@ -291,29 +519,64 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**原因**:目前 TDengine 只支持 64 位 JDK。
**解决方法**:重新安装 64 位 JDK。
* 其它问题请参考 [Issues][7]
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
@ -324,6 +587,9 @@ Query OK, 1 row(s) in set (0.000141s)
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[13]: https://www.taosdata.com/cn/documentation/administrator/#client
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF

View File

@ -171,11 +171,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
// do nothing
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
private void checkResultSetTypeAndResultSetConcurrency(int resultSetType, int resultSetConcurrency) throws SQLException {
switch (resultSetType) {
case ResultSet.TYPE_FORWARD_ONLY:
break;
@ -194,7 +190,14 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
checkResultSetTypeAndResultSetConcurrency(resultSetType, resultSetConcurrency);
return createStatement();
}
@ -203,24 +206,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
switch (resultSetType) {
case ResultSet.TYPE_FORWARD_ONLY:
break;
case ResultSet.TYPE_SCROLL_INSENSITIVE:
case ResultSet.TYPE_SCROLL_SENSITIVE:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
switch (resultSetConcurrency) {
case ResultSet.CONCUR_READ_ONLY:
break;
case ResultSet.CONCUR_UPDATABLE:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
checkResultSetTypeAndResultSetConcurrency(resultSetType, resultSetConcurrency);
return prepareStatement(sql);
}

View File

@ -2,6 +2,7 @@ package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public abstract class AbstractStatement extends WrapperImpl implements Statement {
@ -196,13 +197,44 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement
if (batchedArgs == null || batchedArgs.isEmpty())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY);
String clientInfo = getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE);
boolean batchErrorIgnore = clientInfo == null ? TSDBConstants.DEFAULT_BATCH_ERROR_IGNORE : Boolean.parseBoolean(clientInfo);
if (batchErrorIgnore) {
return executeBatchIgnoreException();
}
return executeBatchThrowException();
}
private int[] executeBatchIgnoreException() {
return batchedArgs.stream().mapToInt(sql -> {
try {
boolean isSelect = execute(sql);
if (isSelect) {
return SUCCESS_NO_INFO;
} else {
return getUpdateCount();
}
} catch (SQLException e) {
return EXECUTE_FAILED;
}
}).toArray();
}
private int[] executeBatchThrowException() throws BatchUpdateException {
int[] res = new int[batchedArgs.size()];
for (int i = 0; i < batchedArgs.size(); i++) {
boolean isSelect = execute(batchedArgs.get(i));
if (isSelect) {
res[i] = SUCCESS_NO_INFO;
} else {
res[i] = getUpdateCount();
try {
boolean isSelect = execute(batchedArgs.get(i));
if (isSelect) {
res[i] = SUCCESS_NO_INFO;
} else {
res[i] = getUpdateCount();
}
} catch (SQLException e) {
String reason = e.getMessage();
int[] updateCounts = Arrays.copyOfRange(res, 0, i);
throw new BatchUpdateException(reason, updateCounts, e);
}
}
return res;

View File

@ -326,7 +326,7 @@ public class EmptyResultSet implements ResultSet {
@Override
public int getFetchDirection() throws SQLException {
return 0;
return ResultSet.FETCH_FORWARD;
}
@Override
@ -341,12 +341,12 @@ public class EmptyResultSet implements ResultSet {
@Override
public int getType() throws SQLException {
return 0;
return ResultSet.TYPE_FORWARD_ONLY;
}
@Override
public int getConcurrency() throws SQLException {
return 0;
return ResultSet.CONCUR_READ_ONLY;
}
@Override
@ -746,7 +746,7 @@ public class EmptyResultSet implements ResultSet {
@Override
public int getHoldability() throws SQLException {
return 0;
return ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
@Override

View File

@ -74,6 +74,8 @@ public abstract class TSDBConstants {
public static final String DEFAULT_PRECISION = "ms";
public static final boolean DEFAULT_BATCH_ERROR_IGNORE = false;
public static int typeName2JdbcType(String type) {
switch (type.toUpperCase()) {
case "TIMESTAMP":

View File

@ -100,6 +100,11 @@ public class TSDBDriver extends AbstractDriver {
*/
public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat";
/**
* continue process commands in executeBatch
*/
public static final String PROPERTY_KEY_BATCH_ERROR_IGNORE = "batchErrorIgnore";
private TSDBDatabaseMetaData dbMetaData = null;
static {

View File

@ -40,7 +40,7 @@ public class TSDBErrorNumbers {
public static final int ERROR_JNI_FETCH_END = 0x2358; // fetch to the end of resultSet
public static final int ERROR_JNI_OUT_OF_MEMORY = 0x2359; // JNI alloc memory failed
private static final Set<Integer> errorNumbers = new HashSet();
private static final Set<Integer> errorNumbers = new HashSet<>();
static {
errorNumbers.add(ERROR_CONNECTION_CLOSED);

View File

@ -348,4 +348,13 @@ public class TSDBJNIConnector {
}
private native int closeStmt(long stmt, long con);
public void insertLines(String[] lines) throws SQLException {
int code = insertLinesImp(lines, this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to insertLines");
}
}
private native int insertLinesImp(String[] lines, long conn);
}

View File

@ -19,6 +19,7 @@ import com.taosdata.jdbc.utils.NullType;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
@ -463,6 +464,25 @@ public class TSDBResultSetRowData {
data.set(col, tsObj);
}
/**
* this implementation is used for TDengine old version
*/
public void setTimestamp(int col, long ts) {
//TODO: this implementation contains logical error
// when precision is us the (long ts) is 16 digital number
// when precision is ms, the (long ts) is 13 digital number
// we need a JNI function like this:
// public void setTimestamp(int col, long epochSecond, long nanoAdjustment)
if (ts < 1_0000_0000_0000_0L) {
data.set(col, new Timestamp(ts));
} else {
long epochSec = ts / 1000_000L;
long nanoAdjustment = ts % 1000_000L * 1000L;
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
data.set(col, timestamp);
}
}
public Timestamp getTimestamp(int col, int nativeType) {
Object obj = data.get(col - 1);
if (obj == null)

View File

@ -61,7 +61,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return;
// parse row data
for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
List<Object> row = new ArrayList();
List<Object> row = new ArrayList<>();
JSONArray jsonRow = data.getJSONArray(rowIndex);
for (int colIndex = 0; colIndex < this.metaData.getColumnCount(); colIndex++) {
row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type));

View File

@ -9,6 +9,7 @@ import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
@ -34,7 +35,11 @@ public class HttpClientPoolUtil {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).build();
httpClient = HttpClients.custom()
.setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
.setConnectionManager(connectionManager)
.setRetryHandler(new DefaultHttpRequestRetryHandler(3, true))
.build();
}
}

View File

@ -4,14 +4,14 @@ public class OSUtils {
private static final String OS = System.getProperty("os.name").toLowerCase();
public static boolean isWindows() {
return OS.indexOf("win") >= 0;
return OS.contains("win");
}
public static boolean isMac() {
return OS.indexOf("mac") >= 0;
return OS.contains("mac");
}
public static boolean isLinux() {
return OS.indexOf("nux") >= 0;
return OS.contains("nux");
}
}

View File

@ -16,7 +16,7 @@ package com.taosdata.jdbc.utils;
public class SqlSyntaxValidator {
private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe"};
private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"};
private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"};
private static final String[] querySQL = {"select", "show", "describe"};
@ -61,29 +61,11 @@ public class SqlSyntaxValidator {
public static boolean isUseSql(String sql) {
return sql.trim().toLowerCase().startsWith("use");
// || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
}
public static boolean isShowSql(String sql) {
return sql.trim().toLowerCase().startsWith("show");
}
public static boolean isDescribeSql(String sql) {
return sql.trim().toLowerCase().startsWith("describe");
}
public static boolean isInsertSql(String sql) {
return sql.trim().toLowerCase().startsWith("insert") || sql.trim().toLowerCase().startsWith("import");
}
public static boolean isSelectSql(String sql) {
return sql.trim().toLowerCase().startsWith("select");
}
public static boolean isShowDatabaseSql(String sql) {
return sql.trim().toLowerCase().matches("show\\s*databases");
}
}

View File

@ -7,9 +7,9 @@ import java.util.concurrent.atomic.AtomicLong;
public class TaosInfo implements TaosInfoMBean {
private static volatile TaosInfo instance;
private AtomicLong connect_open = new AtomicLong();
private AtomicLong connect_close = new AtomicLong();
private AtomicLong statement_count = new AtomicLong();
private final AtomicLong connect_open = new AtomicLong();
private final AtomicLong connect_close = new AtomicLong();
private final AtomicLong statement_count = new AtomicLong();
static {
try {

View File

@ -5,7 +5,6 @@ import com.google.common.collect.RangeSet;
import com.google.common.collect.TreeRangeSet;
import com.taosdata.jdbc.enums.TimestampPrecision;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.sql.Time;
@ -23,8 +22,7 @@ import java.util.stream.IntStream;
public class Utils {
private static Pattern ptn = Pattern.compile(".*?'");
private static final Pattern ptn = Pattern.compile(".*?'");
private static final DateTimeFormatter milliSecFormatter = new DateTimeFormatterBuilder().appendPattern("yyyy-MM-dd HH:mm:ss.SSS").toFormatter();
private static final DateTimeFormatter microSecFormatter = new DateTimeFormatterBuilder().appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSS").toFormatter();
private static final DateTimeFormatter nanoSecFormatter = new DateTimeFormatterBuilder().appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS").toFormatter();
@ -75,7 +73,7 @@ public class Utils {
public static String escapeSingleQuota(String origin) {
Matcher m = ptn.matcher(origin);
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
int end = 0;
while (m.find()) {
end = m.end();
@ -88,7 +86,7 @@ public class Utils {
sb.append(seg);
}
} else { // len > 1
sb.append(seg.substring(0, seg.length() - 2));
sb.append(seg, 0, seg.length() - 2);
char lastcSec = seg.charAt(seg.length() - 2);
if (lastcSec == '\\') {
sb.append("\\'");
@ -110,15 +108,27 @@ public class Utils {
return rawSql;
// toLowerCase
String preparedSql = rawSql.trim().toLowerCase();
String[] clause = new String[]{"values\\s*\\([\\s\\S]*?\\)", "tags\\s*\\([\\s\\S]*?\\)", "where[\\s\\S]*"};
String[] clause = new String[]{"tags\\s*\\([\\s\\S]*?\\)", "where[\\s\\S]*"};
Map<Integer, Integer> placeholderPositions = new HashMap<>();
RangeSet<Integer> clauseRangeSet = TreeRangeSet.create();
findPlaceholderPosition(preparedSql, placeholderPositions);
// find tags and where clause's position
findClauseRangeSet(preparedSql, clause, clauseRangeSet);
// find values clause's position
findValuesClauseRangeSet(preparedSql, clauseRangeSet);
return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
}
private static void findValuesClauseRangeSet(String preparedSql, RangeSet<Integer> clauseRangeSet) {
Matcher matcher = Pattern.compile("(values|,)\\s*(\\([^)]*\\))").matcher(preparedSql);
while (matcher.find()) {
int start = matcher.start(2);
int end = matcher.end(2);
clauseRangeSet.add(Range.closedOpen(start, end));
}
}
private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet<Integer> clauseRangeSet) {
clauseRangeSet.clear();
for (String regex : regexArr) {
@ -126,7 +136,7 @@ public class Utils {
while (matcher.find()) {
int start = matcher.start();
int end = matcher.end();
clauseRangeSet.add(Range.closed(start, end));
clauseRangeSet.add(Range.closedOpen(start, end));
}
}
}
@ -184,7 +194,7 @@ public class Utils {
public static String formatTimestamp(Timestamp timestamp) {
int nanos = timestamp.getNanos();
if (nanos % 1000000l != 0)
if (nanos % 1000000L != 0)
return timestamp.toLocalDateTime().format(microSecFormatter);
return timestamp.toLocalDateTime().format(milliSecFormatter);
}

View File

@ -5,7 +5,6 @@ import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.management.OperationsException;
import java.sql.*;
import java.util.Properties;

View File

@ -1,16 +1,16 @@
package com.taosdata.jdbc;
import org.junit.Test;
import static org.junit.Assert.*;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.ArrayList;
import java.util.List;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.lang.management.ThreadMXBean;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TSDBJNIConnectorTest {
@ -114,6 +114,10 @@ public class TSDBJNIConnectorTest {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL);
}
// close statement
connector.executeQuery("use d");
String[] lines = new String[] {"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
"st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"};
connector.insertLines(lines);
// close connection
connector.closeConnection();

View File

@ -1,6 +1,5 @@
package com.taosdata.jdbc;
import com.taosdata.jdbc.rs.RestfulParameterMetaData;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;

View File

@ -2,7 +2,6 @@ package com.taosdata.jdbc;
import org.junit.*;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.*;
import java.util.ArrayList;
@ -26,7 +25,7 @@ public class TSDBPreparedStatementTest {
long ts = System.currentTimeMillis();
pstmt_insert.setTimestamp(1, new Timestamp(ts));
pstmt_insert.setInt(2, 2);
pstmt_insert.setLong(3, 3l);
pstmt_insert.setLong(3, 3L);
pstmt_insert.setFloat(4, 3.14f);
pstmt_insert.setDouble(5, 3.1415);
pstmt_insert.setShort(6, (short) 6);
@ -53,8 +52,8 @@ public class TSDBPreparedStatementTest {
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals(2, rs.getInt(2));
Assert.assertEquals(2, rs.getInt("f1"));
Assert.assertEquals(3l, rs.getLong(3));
Assert.assertEquals(3l, rs.getLong("f2"));
Assert.assertEquals(3L, rs.getLong(3));
Assert.assertEquals(3L, rs.getLong("f2"));
Assert.assertEquals(3.14f, rs.getFloat(4), 0.0);
Assert.assertEquals(3.14f, rs.getFloat("f3"), 0.0);
Assert.assertEquals(3.1415, rs.getDouble(5), 0.0);
@ -312,14 +311,14 @@ public class TSDBPreparedStatementTest {
Random r = new Random();
s.setTableName("weather_test");
ArrayList<Long> ts = new ArrayList<Long>();
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
int random = 10 + r.nextInt(5);
ArrayList<String> s2 = new ArrayList<String>();
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
s2.add(null);
@ -330,7 +329,7 @@ public class TSDBPreparedStatementTest {
s.setNString(1, s2, 4);
random = 10 + r.nextInt(5);
ArrayList<Float> s3 = new ArrayList<Float>();
ArrayList<Float> s3 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
s3.add(null);
@ -341,7 +340,7 @@ public class TSDBPreparedStatementTest {
s.setFloat(2, s3);
random = 10 + r.nextInt(5);
ArrayList<Double> s4 = new ArrayList<Double>();
ArrayList<Double> s4 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
s4.add(null);
@ -352,7 +351,7 @@ public class TSDBPreparedStatementTest {
s.setDouble(3, s4);
random = 10 + r.nextInt(5);
ArrayList<Long> ts2 = new ArrayList<Long>();
ArrayList<Long> ts2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
ts2.add(null);
@ -379,13 +378,13 @@ public class TSDBPreparedStatementTest {
if (i % random == 0) {
sb.add(null);
} else {
sb.add(i % 2 == 0 ? true : false);
sb.add(i % 2 == 0);
}
}
s.setBoolean(6, sb);
random = 10 + r.nextInt(5);
ArrayList<String> s5 = new ArrayList<String>();
ArrayList<String> s5 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
s5.add(null);
@ -424,14 +423,14 @@ public class TSDBPreparedStatementTest {
Random r = new Random();
s.setTableName("weather_test");
ArrayList<Long> ts = new ArrayList<Long>();
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
int random = 10 + r.nextInt(5);
ArrayList<String> s2 = new ArrayList<String>();
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
s2.add(null);
@ -442,7 +441,7 @@ public class TSDBPreparedStatementTest {
s.setNString(1, s2, 4);
random = 10 + r.nextInt(5);
ArrayList<String> s3 = new ArrayList<String>();
ArrayList<String> s3 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
if (i % random == 0) {
s3.add(null);
@ -471,7 +470,7 @@ public class TSDBPreparedStatementTest {
public void bindDataWithSingleTagTest() throws SQLException {
Statement stmt = conn.createStatement();
String types[] = new String[]{"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"};
String[] types = new String[]{"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"};
for (String type : types) {
stmt.execute("drop table if exists weather_test");
@ -510,21 +509,21 @@ public class TSDBPreparedStatementTest {
}
ArrayList<Long> ts = new ArrayList<Long>();
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
int random = 10 + r.nextInt(5);
ArrayList<String> s2 = new ArrayList<String>();
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
s2.add("分支" + i % 4);
}
s.setNString(1, s2, 10);
random = 10 + r.nextInt(5);
ArrayList<String> s3 = new ArrayList<String>();
ArrayList<String> s3 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
s3.add("test" + i % 4);
}
@ -561,13 +560,13 @@ public class TSDBPreparedStatementTest {
s.setTagString(1, "test");
ArrayList<Long> ts = new ArrayList<Long>();
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<String> s2 = new ArrayList<String>();
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
s2.add("test" + i % 4);
}
@ -788,7 +787,7 @@ public class TSDBPreparedStatementTest {
public void setBigDecimal() throws SQLException {
// given
long ts = System.currentTimeMillis();
BigDecimal bigDecimal = new BigDecimal(3.14444);
BigDecimal bigDecimal = new BigDecimal("3.14444");
// when
pstmt_insert.setTimestamp(1, new Timestamp(ts));
@ -841,13 +840,13 @@ public class TSDBPreparedStatementTest {
}
@Test
public void setBytes() throws SQLException, IOException {
public void setBytes() throws SQLException {
// given
long ts = System.currentTimeMillis();
byte[] f8 = "{\"name\": \"john\", \"age\": 10, \"address\": \"192.168.1.100\"}".getBytes();
// when
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setTimestamp(1, new Timestamp(ts));
pstmt_insert.setBytes(9, f8);
int result = pstmt_insert.executeUpdate();
@ -999,7 +998,7 @@ public class TSDBPreparedStatementTest {
long ts = System.currentTimeMillis();
pstmt_insert.setTimestamp(1, new Timestamp(ts));
pstmt_insert.setInt(2, 2);
pstmt_insert.setLong(3, 3l);
pstmt_insert.setLong(3, 3L);
pstmt_insert.setFloat(4, 3.14f);
pstmt_insert.setDouble(5, 3.1415);
pstmt_insert.setShort(6, (short) 6);

View File

@ -95,13 +95,13 @@ public class TSDBResultSetTest {
@Test
public void getBigDecimal() throws SQLException {
BigDecimal f1 = rs.getBigDecimal("f1");
Assert.assertEquals(1609430400000l, f1.longValue());
Assert.assertEquals(1609430400000L, f1.longValue());
BigDecimal f2 = rs.getBigDecimal("f2");
Assert.assertEquals(1, f2.intValue());
BigDecimal f3 = rs.getBigDecimal("f3");
Assert.assertEquals(100l, f3.longValue());
Assert.assertEquals(100L, f3.longValue());
BigDecimal f4 = rs.getBigDecimal("f4");
Assert.assertEquals(3.1415f, f4.floatValue(), 0.00000f);
@ -125,13 +125,13 @@ public class TSDBResultSetTest {
Assert.assertEquals(1, Ints.fromByteArray(f2));
byte[] f3 = rs.getBytes("f3");
Assert.assertEquals(100l, Longs.fromByteArray(f3));
Assert.assertEquals(100L, Longs.fromByteArray(f3));
byte[] f4 = rs.getBytes("f4");
Assert.assertEquals(3.1415f, Float.valueOf(new String(f4)), 0.000000f);
Assert.assertEquals(3.1415f, Float.parseFloat(new String(f4)), 0.000000f);
byte[] f5 = rs.getBytes("f5");
Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f);
Assert.assertEquals(3.1415926, Double.parseDouble(new String(f5)), 0.000000f);
byte[] f6 = rs.getBytes("f6");
Assert.assertTrue(Arrays.equals("abc".getBytes(), f6));
@ -223,7 +223,7 @@ public class TSDBResultSetTest {
Object f3 = rs.getObject("f3");
Assert.assertEquals(Long.class, f3.getClass());
Assert.assertEquals(100l, f3);
Assert.assertEquals(100L, f3);
Object f4 = rs.getObject("f4");
Assert.assertEquals(Float.class, f4.getClass());
@ -421,7 +421,7 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateLong() throws SQLException {
rs.updateLong(1, 1l);
rs.updateLong(1, 1L);
}
@Test(expected = SQLFeatureNotSupportedException.class)

View File

@ -3,12 +3,13 @@ package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.*;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
public class BadLocaleSettingTest {

View File

@ -0,0 +1,144 @@
package com.taosdata.jdbc.cases;
import org.junit.*;
import java.sql.*;
import java.util.stream.IntStream;
public class BatchErrorIgnoreTest {
private static final String host = "127.0.0.1";
@Test
public void batchErrorThrowException() throws SQLException {
// given
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
// when
try (Statement stmt = conn.createStatement()) {
IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("insert into t11 values(now, 11)");
IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("select count(*) from test.weather");
stmt.executeBatch();
} catch (BatchUpdateException e) {
int[] updateCounts = e.getUpdateCounts();
Assert.assertEquals(5, updateCounts.length);
Assert.assertEquals(1, updateCounts[0]);
Assert.assertEquals(1, updateCounts[1]);
Assert.assertEquals(1, updateCounts[2]);
Assert.assertEquals(1, updateCounts[3]);
Assert.assertEquals(1, updateCounts[4]);
}
}
@Test
public void batchErrorIgnore() throws SQLException {
// given
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&batchErrorIgnore=true");
// when
int[] results = null;
try (Statement stmt = conn.createStatement()) {
IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("insert into t11 values(now, 11)");
IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("select count(*) from test.weather");
results = stmt.executeBatch();
} catch (SQLException e) {
e.printStackTrace();
}
// then
assert results != null;
Assert.assertEquals(12, results.length);
Assert.assertEquals(1, results[0]);
Assert.assertEquals(1, results[1]);
Assert.assertEquals(1, results[2]);
Assert.assertEquals(1, results[3]);
Assert.assertEquals(1, results[4]);
Assert.assertEquals(Statement.EXECUTE_FAILED, results[5]);
Assert.assertEquals(2, results[6]);
Assert.assertEquals(2, results[7]);
Assert.assertEquals(2, results[8]);
Assert.assertEquals(2, results[9]);
Assert.assertEquals(2, results[10]);
Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[11]);
}
@Before
public void before() {
try {
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("use test");
stmt.execute("drop table if exists weather");
stmt.execute("create table weather (ts timestamp, f1 float) tags(t1 int)");
IntStream.range(1, 11).mapToObj(i -> "create table t" + i + " using weather tags(" + i + ")").forEach(sql -> {
try {
stmt.execute(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@BeforeClass
public static void beforeClass() {
try {
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test");
stmt.execute("create database if not exists test");
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@AfterClass
public static void afterClass() {
try {
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test");
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,6 +1,7 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -9,13 +10,13 @@ import java.util.Properties;
public class ConnectMultiTaosdByRestfulWithDifferentTokenTest {
private static String host1 = "192.168.17.156";
private static String user1 = "root";
private static String password1 = "tqueue";
private static final String host1 = "192.168.17.156";
private static final String user1 = "root";
private static final String password1 = "tqueue";
private Connection conn1;
private static String host2 = "192.168.17.82";
private static String user2 = "root";
private static String password2 = "taosdata";
private static final String host2 = "192.168.17.82";
private static final String user2 = "root";
private static final String password2 = "taosdata";
private Connection conn2;
@Test
@ -30,6 +31,7 @@ public class ConnectMultiTaosdByRestfulWithDifferentTokenTest {
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("select server_status()");
ResultSetMetaData meta = rs.getMetaData();
Assert.assertNotNull(meta);
while (rs.next()) {
}
} catch (SQLException e) {

View File

@ -13,7 +13,7 @@ import java.util.Properties;
public class DriverAutoloadTest {
private Properties properties;
private String host = "127.0.0.1";
private final String host = "127.0.0.1";
@Test
public void testRestful() throws SQLException {

View File

@ -13,9 +13,9 @@ import java.util.Random;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class InsertDbwithoutUseDbTest {
private static String host = "127.0.0.1";
private static final String host = "127.0.0.1";
private static Properties properties;
private static Random random = new Random(System.currentTimeMillis());
private static final Random random = new Random(System.currentTimeMillis());
@Test
public void case001() throws ClassNotFoundException, SQLException {

View File

@ -8,14 +8,14 @@ public class InsertSpecialCharacterJniTest {
private static final String host = "127.0.0.1";
private static Connection conn;
private static String dbName = "spec_char_test";
private static String tbname1 = "test";
private static String tbname2 = "weather";
private static String special_character_str_1 = "$asd$$fsfsf$";
private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
private static String special_character_str_3 = "\\\\asdfsfsf\\";
private static String special_character_str_4 = "?asd??fsf?sf?";
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
private static final String dbName = "spec_char_test";
private static final String tbname1 = "test";
private static final String tbname2 = "weather";
private static final String special_character_str_1 = "$asd$$fsfsf$";
private static final String special_character_str_2 = "\\\\asdfsfsf\\\\";
private static final String special_character_str_3 = "\\\\asdfsfsf\\";
private static final String special_character_str_4 = "?asd??fsf?sf?";
private static final String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
@Test
public void testCase01() throws SQLException {

View File

@ -8,14 +8,14 @@ public class InsertSpecialCharacterRestfulTest {
private static final String host = "127.0.0.1";
private static Connection conn;
private static String dbName = "spec_char_test";
private static String tbname1 = "test";
private static String tbname2 = "weather";
private static String special_character_str_1 = "$asd$$fsfsf$";
private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
private static String special_character_str_3 = "\\\\asdfsfsf\\";
private static String special_character_str_4 = "?asd??fsf?sf?";
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
private static final String dbName = "spec_char_test";
private static final String tbname1 = "test";
private static final String tbname2 = "weather";
private static final String special_character_str_1 = "$asd$$fsfsf$";
private static final String special_character_str_2 = "\\\\asdfsfsf\\\\";
private static final String special_character_str_3 = "\\\\asdfsfsf\\";
private static final String special_character_str_4 = "?asd??fsf?sf?";
private static final String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
@Test
public void testCase01() throws SQLException {

View File

@ -8,13 +8,13 @@ import java.util.Properties;
public class InvalidResultSetPointerTest {
private static String host = "127.0.0.1";
private static final String host = "127.0.0.1";
private static final String dbName = "test";
private static final String stbName = "stb";
private static final String tbName = "tb";
private static Connection connection;
private static int numOfSTb = 30000;
private static int numOfTb = 3;
private static final int numOfSTb = 30000;
private static final int numOfTb = 3;
private static int numOfThreads = 100;
@Test
@ -74,7 +74,7 @@ public class InvalidResultSetPointerTest {
b = numOfSTb % numOfThreads;
}
multiThreadingClass instance[] = new multiThreadingClass[numOfThreads];
multiThreadingClass[] instance = new multiThreadingClass[numOfThreads];
int last = 0;
for (int i = 0; i < numOfThreads; i++) {

View File

@ -9,8 +9,7 @@ import java.util.concurrent.TimeUnit;
public class MultiThreadsWithSameStatementTest {
private class Service {
private static class Service {
public Connection conn;
public Statement stmt;

View File

@ -0,0 +1,51 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
public class ResetQueryCacheTest {
static Connection connection;
static Statement statement;
static String host = "127.0.0.1";
@Before
public void init() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
} catch (SQLException e) {
return;
}
}
@Test
public void testResetQueryCache() throws SQLException {
String resetSql = "reset query cache";
statement.execute(resetSql);
}
@After
public void close() {
try {
if (statement != null)
statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -16,9 +16,9 @@ import static org.junit.Assert.assertEquals;
public class StableTest {
private static Connection connection;
private static String dbName = "test";
private static String stbName = "st";
private static String host = "127.0.0.1";
private static final String dbName = "test";
private static final String stbName = "st";
private static final String host = "127.0.0.1";
@BeforeClass
public static void createDatabase() {

View File

@ -25,7 +25,7 @@ public class TaosInfoMonitorTest {
return null;
}).collect(Collectors.toList());
connectionList.stream().forEach(conn -> {
connectionList.forEach(conn -> {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("show databases");
while (rs.next()) {
@ -37,7 +37,7 @@ public class TaosInfoMonitorTest {
}
});
connectionList.stream().forEach(conn -> {
connectionList.forEach(conn -> {
try {
conn.close();
TimeUnit.MILLISECONDS.sleep(100);

View File

@ -22,7 +22,7 @@ public class TimestampPrecisionInNanoInJniTest {
private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456;
private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
private static final String date1 = format.format(new Date(timestamp1));
private static final String date4 = format.format(new Date(timestamp1 + 10l));
private static final String date4 = format.format(new Date(timestamp1 + 10L));
private static final String date2 = date1 + "123455";
private static final String date3 = date4 + "123456";

View File

@ -44,7 +44,7 @@ public class UnsignedNumberJniTest {
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
Assert.assertEquals(9223372036854775807l, rs.getLong(5));
Assert.assertEquals(9223372036854775807L, rs.getLong(5));
}
} catch (SQLException e) {
e.printStackTrace();

View File

@ -45,7 +45,7 @@ public class UnsignedNumberRestfulTest {
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
Assert.assertEquals(9223372036854775807l, rs.getLong(5));
Assert.assertEquals(9223372036854775807L, rs.getLong(5));
}
} catch (SQLException e) {
e.printStackTrace();

View File

@ -95,13 +95,13 @@ public class RestfulResultSetTest {
public void getBigDecimal() throws SQLException {
BigDecimal f1 = rs.getBigDecimal("f1");
long actual = (f1 == null) ? 0 : f1.longValue();
Assert.assertEquals(1609430400000l, actual);
Assert.assertEquals(1609430400000L, actual);
BigDecimal f2 = rs.getBigDecimal("f2");
Assert.assertEquals(1, f2.intValue());
BigDecimal f3 = rs.getBigDecimal("f3");
Assert.assertEquals(100l, f3.longValue());
Assert.assertEquals(100L, f3.longValue());
BigDecimal f4 = rs.getBigDecimal("f4");
Assert.assertEquals(3.1415f, f4.floatValue(), 0.00000f);
@ -125,13 +125,13 @@ public class RestfulResultSetTest {
Assert.assertEquals(1, Ints.fromByteArray(f2));
byte[] f3 = rs.getBytes("f3");
Assert.assertEquals(100l, Longs.fromByteArray(f3));
Assert.assertEquals(100L, Longs.fromByteArray(f3));
byte[] f4 = rs.getBytes("f4");
Assert.assertEquals(3.1415f, Float.valueOf(new String(f4)), 0.000000f);
Assert.assertEquals(3.1415f, Float.parseFloat(new String(f4)), 0.000000f);
byte[] f5 = rs.getBytes("f5");
Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f);
Assert.assertEquals(3.1415926, Double.parseDouble(new String(f5)), 0.000000f);
byte[] f6 = rs.getBytes("f6");
Assert.assertEquals("abc", new String(f6));
@ -222,7 +222,7 @@ public class RestfulResultSetTest {
Object f3 = rs.getObject("f3");
Assert.assertEquals(Long.class, f3.getClass());
Assert.assertEquals(100l, f3);
Assert.assertEquals(100L, f3);
Object f4 = rs.getObject("f4");
Assert.assertEquals(Float.class, f4.getClass());
@ -434,7 +434,7 @@ public class RestfulResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateLong() throws SQLException {
rs.updateLong(1, 1l);
rs.updateLong(1, 1L);
}
@Test(expected = SQLFeatureNotSupportedException.class)

View File

@ -10,17 +10,17 @@ public class OSUtilsTest {
@Test
public void inWindows() {
Assert.assertEquals(OS.indexOf("win") >= 0, OSUtils.isWindows());
Assert.assertEquals(OS.contains("win"), OSUtils.isWindows());
}
@Test
public void isMac() {
Assert.assertEquals(OS.indexOf("mac") >= 0, OSUtils.isMac());
Assert.assertEquals(OS.contains("mac"), OSUtils.isMac());
}
@Test
public void isLinux() {
Assert.assertEquals(OS.indexOf("nux") >= 0, OSUtils.isLinux());
Assert.assertEquals(OS.contains("nux"), OSUtils.isLinux());
}
@Before

View File

@ -21,47 +21,4 @@ public class TimestampUtil {
SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat);
return sdf.format(new Date(time));
}
public static class TimeTuple {
public Long start;
public Long end;
public Long timeGap;
TimeTuple(long start, long end, long timeGap) {
this.start = start;
this.end = end;
this.timeGap = timeGap;
}
}
public static TimeTuple range(long start, long timeGap, long size) {
long now = System.currentTimeMillis();
if (timeGap < 1)
timeGap = 1;
if (start == 0)
start = now - size * timeGap;
// 如果size小于1异常
if (size < 1)
throw new IllegalArgumentException("size less than 1.");
// 如果timeGap为1已经超长需要前移start
if (start + size > now) {
start = now - size;
return new TimeTuple(start, now, 1);
}
long end = start + (long) (timeGap * size);
if (end > now) {
//压缩timeGap
end = now;
double gap = (end - start) / (size * 1.0f);
if (gap < 1.0f) {
timeGap = 1;
start = end - size;
} else {
timeGap = (long) gap;
end = start + (long) (timeGap * size);
}
}
return new TimeTuple(start, end, timeGap);
}
}

View File

@ -17,14 +17,14 @@ public class UtilsTest {
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
// given
s = "\'''''a\\'";
s = "'''''a\\'";
// when
news = Utils.escapeSingleQuota(s);
// then
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
// given
s = "\'\'\'\''a\\'";
s = "'''''a\\'";
// when
news = Utils.escapeSingleQuota(s);
// then
@ -32,7 +32,7 @@ public class UtilsTest {
}
@Test
public void getNativeSqlReplaceQuestionMarks() {
public void lowerCase() {
// given
String nativeSql = "insert into ?.? (ts, temperature, humidity) using ?.? tags(?,?) values(now, ?, ?)";
Object[] parameters = Stream.of("test", "t1", "test", "weather", "beijing", 1, 12.2, 4).toArray();
@ -46,7 +46,7 @@ public class UtilsTest {
}
@Test
public void getNativeSqlReplaceQuestionMarks2() {
public void upperCase() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 123, 3.14, 220, 4).toArray();
@ -59,9 +59,49 @@ public class UtilsTest {
Assert.assertEquals(expected, actual);
}
@Test
public void multiValues() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?),(?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4),(200,3.1415,'xyz',5)";
Assert.assertEquals(expected, actual);
}
@Test
public void getNativeSqlReplaceNothing() {
public void lineTerminator() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,\r\n?,?),(?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,\r\n'abc',4),(200,3.1415,'xyz',5)";
Assert.assertEquals(expected, actual);
}
@Test
public void lineTerminatorAndMultiValues() {
String nativeSql = "INSERT Into ? TAGS(?) VALUES(?,?,\r\n?,?),(?,? ,\r\n?,?) t? tags (?) Values (?,?,?\r\n,?),(?,?,?,?) t? Tags(?) values (?,?,?,?) , (?,?,?,?)";
Object[] parameters = Stream.of("t1", "abc", 100, 1.1, "xxx", "xxx", 200, 2.2, "xxx", "xxx", 2, "bcd", 300, 3.3, "xxx", "xxx", 400, 4.4, "xxx", "xxx", 3, "cde", 500, 5.5, "xxx", "xxx", 600, 6.6, "xxx", "xxx").toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT Into t1 TAGS('abc') VALUES(100,1.1,\r\n'xxx','xxx'),(200,2.2 ,\r\n'xxx','xxx') t2 tags ('bcd') Values (300,3.3,'xxx'\r\n,'xxx'),(400,4.4,'xxx','xxx') t3 Tags('cde') values (500,5.5,'xxx','xxx') , (600,6.6,'xxx','xxx')";
Assert.assertEquals(expected, actual);
}
@Test
public void replaceNothing() {
// given
String nativeSql = "insert into test.t1 (ts, temperature, humidity) using test.weather tags('beijing',1) values(now, 12.2, 4)";
@ -73,7 +113,7 @@ public class UtilsTest {
}
@Test
public void getNativeSqlReplaceNothing2() {
public void replaceNothing2() {
// given
String nativeSql = "insert into test.t1 (ts, temperature, humidity) using test.weather tags('beijing',1) values(now, 12.2, 4)";
Object[] parameters = Stream.of("test", "t1", "test", "weather", "beijing", 1, 12.2, 4).toArray();
@ -86,7 +126,7 @@ public class UtilsTest {
}
@Test
public void getNativeSqlReplaceNothing3() {
public void replaceNothing3() {
// given
String nativeSql = "insert into ?.? (ts, temperature, humidity) using ?.? tags(?,?) values(now, ?, ?)";

View File

@ -403,6 +403,20 @@ class CTaosInterface(object):
"""
return CTaosInterface.libtaos.taos_affected_rows(result)
@staticmethod
def insertLines(connection, lines):
'''
insert through lines protocol
@lines: list of str
@rtype: tsdb error codes
'''
numLines = len(lines)
c_lines_type = ctypes.c_char_p*numLines
c_lines = c_lines_type()
for i in range(numLines):
c_lines[i] = ctypes.c_char_p(lines[i].encode('utf-8'))
return CTaosInterface.libtaos.taos_insert_lines(connection, c_lines, ctypes.c_int(numLines))
@staticmethod
def subscribe(connection, restart, topic, sql, interval):
"""Create a subscription

View File

@ -66,6 +66,14 @@ class TDengineConnection(object):
self._conn, restart, topic, sql, interval)
return TDengineSubscription(sub)
def insertLines(self, lines):
"""
insert lines through line protocol
"""
if self._conn is None:
return None
return CTaosInterface.insertLines(self._conn, lines)
def cursor(self):
"""Return a new Cursor object using the connection.
"""

View File

@ -476,21 +476,23 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_MEM_ROW_DATA_HEAD_SIZE + pObj->rowSize;
char *buffer = calloc(size, 1);
SWalHead *pHead = (SWalHead *)buffer;
SSubmitMsg *pMsg = (SSubmitMsg *) (buffer + sizeof(SWalHead));
SSubmitBlk *pBlk = (SSubmitBlk *) (buffer + sizeof(SWalHead) + sizeof(SSubmitMsg));
SDataRow trow = (SDataRow)pBlk->data;
tdInitDataRow(trow, pSchema);
SMemRow trow = (SMemRow)pBlk->data;
SDataRow dataRow = (SDataRow)memRowDataBody(trow);
memRowSetType(trow, SMEM_ROW_DATA);
tdInitDataRow(dataRow, pSchema);
for (int32_t i = 0; i < pSchema->numOfCols; i++) {
STColumn *c = pSchema->columns + i;
void* val = row[i];
void *val = row[i];
if (val == NULL) {
val = getNullValue(c->type);
val = (void *)getNullValue(c->type);
} else if (c->type == TSDB_DATA_TYPE_BINARY) {
val = ((char*)val) - sizeof(VarDataLenT);
} else if (c->type == TSDB_DATA_TYPE_NCHAR) {
@ -500,9 +502,9 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
memcpy((char *)val + sizeof(VarDataLenT), buf, len);
varDataLen(val) = len;
}
tdAppendColVal(trow, val, c->type, c->bytes, c->offset);
tdAppendColVal(dataRow, val, c->type, c->offset);
}
pBlk->dataLen = htonl(dataRowLen(trow));
pBlk->dataLen = htonl(memRowDataTLen(trow));
pBlk->schemaLen = 0;
pBlk->uid = htobe64(pObj->uid);
@ -511,7 +513,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
pBlk->sversion = htonl(pSchema->version);
pBlk->padding = 0;
pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + dataRowLen(trow);
pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + memRowDataTLen(trow);
pMsg->header.vgId = htonl(pContext->vgId);
pMsg->header.contLen = htonl(pHead->len);

View File

@ -18,7 +18,7 @@ ELSE ()
ENDIF ()
ADD_EXECUTABLE(taosd ${SRC})
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4 balance sync ${LINK_JEMALLOC})
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosd taos_static)

View File

@ -150,6 +150,8 @@ static void *dnodeProcessMPeerQueue(void *param) {
SMnodeMsg *pPeerMsg;
int32_t type;
void * unUsed;
setThreadName("dnodeMPeerQ");
while (1) {
if (taosReadQitemFromQset(tsMPeerQset, &type, (void **)&pPeerMsg, &unUsed) == 0) {

View File

@ -155,6 +155,8 @@ static void *dnodeProcessMReadQueue(void *param) {
int32_t type;
void * unUsed;
setThreadName("dnodeMReadQ");
while (1) {
if (taosReadQitemFromQset(tsMReadQset, &type, (void **)&pRead, &unUsed) == 0) {
dDebug("qset:%p, mnode read got no message from qset, exiting", tsMReadQset);

View File

@ -168,7 +168,9 @@ static void *dnodeProcessMWriteQueue(void *param) {
SMnodeMsg *pWrite;
int32_t type;
void * unUsed;
setThreadName("dnodeMWriteQ");
while (1) {
if (taosReadQitemFromQset(tsMWriteQset, &type, (void **)&pWrite, &unUsed) == 0) {
dDebug("qset:%p, mnode write got no message from qset, exiting", tsMWriteQset);

View File

@ -41,6 +41,9 @@
#include "dnodeTelemetry.h"
#include "module.h"
#include "mnode.h"
#include "qScript.h"
#include "tcache.h"
#include "tscompression.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
int32_t moduleStart() { return 0; }
@ -84,6 +87,7 @@ static SStep tsDnodeSteps[] = {
{"dnode-shell", dnodeInitShell, dnodeCleanupShell},
{"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer},
{"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry},
{"dnode-script", scriptEnvPoolInit, scriptEnvPoolCleanup},
};
static SStep tsDnodeCompactSteps[] = {
@ -205,6 +209,7 @@ void dnodeCleanUpSystem() {
dnodeCleanupComponents();
taos_cleanup();
taosCloseLog();
taosStopCacheRefreshWorker();
}
}
@ -234,6 +239,12 @@ static void dnodeCheckDataDirOpenned(char *dir) {
}
static int32_t dnodeInitStorage() {
#ifdef TD_TSZ
// compress module init
tsCompressInit();
#endif
// storage module init
if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno));
return -1;
@ -266,7 +277,7 @@ static int32_t dnodeInitStorage() {
return -1;
}
}
//TODO(dengyihao): no need to init here
//TODO(dengyihao): no need to init here
if (dnodeCreateDir(tsMnodeDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno));
return -1;
@ -309,7 +320,15 @@ static int32_t dnodeInitStorage() {
return 0;
}
static void dnodeCleanupStorage() { tfsDestroy(); }
static void dnodeCleanupStorage() {
// storage destroy
tfsDestroy();
#ifdef TD_TSZ
// compress destroy
tsCompressExit();
#endif
}
bool dnodeIsFirstDeploy() {
return strcmp(tsFirst, tsLocalEp) == 0;

View File

@ -48,9 +48,11 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE]= dnodeDispatchToMWriteQueue;
@ -72,6 +74,7 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeSendStartupStep;

View File

@ -245,6 +245,8 @@ static void* telemetryThread(void* param) {
clock_gettime(CLOCK_REALTIME, &end);
end.tv_sec += 300; // wait 5 minutes before send first report
setThreadName("telemetryThrd");
while (!tsExit) {
int r = 0;
struct timespec ts = end;

View File

@ -103,6 +103,8 @@ static void *dnodeProcessMgmtQueue(void *wparam) {
int32_t qtype;
void * handle;
setThreadName("dnodeMgmtQ");
while (1) {
if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pMgmt, &handle) == 0) {
dDebug("qdnode mgmt got no message from qset:%p, , exit", pPool->qset);

View File

@ -118,6 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) {
SVReadMsg * pRead;
int32_t qtype;
void * pVnode;
char name[16];
memset(name, 0, 16);
snprintf(name, 16, "%s-dnReadQ", pPool->name);
setThreadName(name);
while (1) {
if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) {

View File

@ -191,6 +191,8 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
taosBlockSIGPIPE();
dDebug("dnode vwrite worker:%d is running", pWorker->workerId);
setThreadName("dnodeWriteQ");
while (1) {
numOfMsgs = taosReadAllQitemsFromQset(pWorker->qset, pWorker->qall, &pVnode);
if (numOfMsgs == 0) {
@ -202,12 +204,12 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
for (int32_t i = 0; i < numOfMsgs; ++i) {
taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite);
dTrace("msg:%p, app:%p type:%s will be processed in vwrite queue, qtype:%s hver:%" PRIu64, pWrite,
pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version);
pWrite->rpcMsg.ahandle, taosMsg[pWrite->walHead.msgType], qtypeStr[qtype], pWrite->walHead.version);
pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite);
pWrite->code = vnodeProcessWrite(pVnode, &pWrite->walHead, qtype, pWrite);
if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1);
if (pWrite->code > 0) pWrite->code = 0;
if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true;
if (pWrite->code == 0 && pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true;
dTrace("msg:%p is processed in vwrite queue, code:0x%x", pWrite, pWrite->code);
}
@ -222,7 +224,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code);
} else {
if (qtype == TAOS_QTYPE_FWD) {
vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
vnodeConfirmForward(pVnode, pWrite->walHead.version, pWrite->code, pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT);
}
if (pWrite->rspRet.rsp) {
rpcFreeCont(pWrite->rspRet.rsp);

Some files were not shown because too many files have changed in this diff Show More