diff --git a/.appveyor.yml b/.appveyor.yml
index fe4816688b..ee1dc91767 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -1,30 +1,49 @@
version: 1.0.{build}
-os: Visual Studio 2015
+image:
+ - Visual Studio 2015
+ - macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
+matrix:
+ exclude:
+ - image: macos
+ ARCH: x86
+for:
+ -
+ matrix:
+ only:
+ - image: Visual Studio 2015
+ clone_folder: c:\dev\TDengine
+ clone_depth: 1
-clone_folder: c:\dev\TDengine
-clone_depth: 1
+ init:
+ - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
-init:
- - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
+ before_build:
+ - cd c:\dev\TDengine
+ - md build
-before_build:
- - cd c:\dev\TDengine
- - md build
-
-build_script:
- - cd build
- - cmake -G "NMake Makefiles" ..
- - nmake install
+ build_script:
+ - cd build
+ - cmake -G "NMake Makefiles" ..
+ - nmake install
+ -
+ matrix:
+ only:
+ - image: macos
+ clone_depth: 1
+ build_script:
+ - mkdir debug
+ - cd debug
+ - cmake .. > /dev/null
+ - make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
-
on_build_success: true
on_build_failure: true
on_build_status_changed: true
diff --git a/.drone.yml b/.drone.yml
new file mode 100644
index 0000000000..f7ee4e976f
--- /dev/null
+++ b/.drone.yml
@@ -0,0 +1,198 @@
+---
+kind: pipeline
+name: test_amd64
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: gcc
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: test_arm64
+
+platform:
+ os: linux
+ arch: arm64
+
+steps:
+- name: build
+ image: gcc
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch64 > /dev/null
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: test_arm
+
+platform:
+ os: linux
+ arch: arm
+
+steps:
+- name: build
+ image: arm32v7/ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch32 > /dev/null
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_trusty
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:trusty
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake3 build-essential git binutils-2.26
+
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: build_xenial
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:xenial
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_bionic
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: build_centos7
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ansible/centos7-ansible
+ commands:
+ - yum install -y gcc gcc-c++ make cmake
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ trigger:
+ event:
+ - pull_request
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: goodbye
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: 64-bit
+ image: alpine
+ commands:
+ - echo 64-bit is good.
+ when:
+ branch:
+ - develop
+ - master
+
+
+depends_on:
+- test_arm64
+- test_amd64
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 1ff1108056..da47590a2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@ build/
.vscode/
.idea/
cmake-build-debug/
+cmake-build-release/
cscope.out
.DS_Store
debug/
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 0617d75976..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,296 +0,0 @@
-#
-# Configuration
-#
-#
-# Build Matrix
-#
-branches:
- only:
- - master
- - develop
- - coverity_scan
- - /^.*ci-.*$/
-
-matrix:
- - os: linux
- dist: focal
- language: c
-
- git:
- - depth: 1
-
- compiler: gcc
- env: DESC="linux/gcc build and test"
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - net-tools
- - python3-pip
- - python3-setuptools
- - valgrind
- - psmisc
- - unixodbc
- - unixodbc-dev
- - mono-complete
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- after_success:
- - travis_wait 20
- - |-
- case $TRAVIS_OS_NAME in
- linux)
- cd ${TRAVIS_BUILD_DIR}/debug
- make install > /dev/null || travis_terminate $?
-
- py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
- pip3 install psutil
- pip3 install guppy3
- pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
-
- cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
- mcs -out:taosdemo *.cs || travis_terminate $?
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
- sleep 5
- mono taosdemo -Q DEFAULT -y || travis_terminate $?
- pkill -KILL -x taosd
- fuser -k -n tcp 6030
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests
- ./test-all.sh smoke || travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- ./valgrind-test.sh 2>&1 > mem-error-out.log
- sleep 1
-
-
- # Color setting
- RED='\033[0;31m'
- GREEN='\033[1;32m'
- GREEN_DARK='\033[0;32m'
- GREEN_UNDERLINE='\033[4;32m'
- NC='\033[0m'
-
- grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
-
- for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
- do
- if [ -n "$memError" ]; then
- if [ "$memError" -gt 12 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
- More than our threshold! ## ${NC}"
- travis_terminate $memError
- fi
- fi
- done
-
- grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
- for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
- do
- if [ -n "$defiMemError" ]; then
- if [ "$defiMemError" -gt 13 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports \
- Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
- travis_terminate $defiMemError
- fi
- fi
- done
-
- ;;
- esac
-
- - os: linux
- dist: bionic
- language: c
- compiler: gcc
- env: COVERITY_SCAN=true
- git:
- - depth: 1
-
- script:
- - echo "this job is for coverity scan"
-
- addons:
- coverity_scan:
- # GitHub project metadata
- # ** specific to your project **
- project:
- name: TDengine
- version: 2.x
- description: TDengine
-
- # Where email notification of build analysis results will be sent
- notification_email: sdsang@taosdata.com, slguan@taosdata.com
-
- # Commands to prepare for build_command
- # ** likely specific to your build **
- build_command_prepend: cmake . > /dev/null
-
- # The command that will be added as an argument to "cov-build" to compile your project for analysis,
- # ** likely specific to your build **
- build_command: make
-
- # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
- # Take care in resource usage, and consider the build frequency allowances per
- # https://scan.coverity.com/faq#frequency
- branch_pattern: coverity_scan
-
- - os: linux
- dist: trusty
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - binutils-2.26
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="trusty/gcc-4.8/bintuils-2.26 build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
-
- - os: linux
- dist: bionic
- language: c
- compiler: clang
- env: DESC="linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: bionic
- language: c
- compiler: clang
- env: DESC="arm64 linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: xenial
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="arm64 xenial build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: osx
- osx_image: xcode11.4
- language: c
- compiler: clang
- env: DESC="mac/clang build"
- git:
- - depth: 1
- addons:
- homebrew:
- - cmake
- - unixodbc
-
- script:
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
- - cmake .. > /dev/null
- - make > /dev/null
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e0d6e82923..6f50aca079 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,7 +3,7 @@ IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
SET(PROJECT_VERSION_MINOR "${LIB_MINOR_VERSION}")
- SET(PROJECT_VERSION_PATCH"${LIB_PATCH_VERSION}")
+ SET(PROJECT_VERSION_PATCH "${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION "${LIB_VERSION_STRING}")
ELSE ()
CMAKE_POLICY(SET CMP0048 NEW)
@@ -42,6 +42,13 @@ INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)
INCLUDE(cmake/install.inc)
+IF (CMAKE_SYSTEM_NAME MATCHES "Linux")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pipe -Wall -Wshadow -Werror")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pipe -Wall -Wshadow -Werror")
+ENDIF ()
+MESSAGE(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
+MESSAGE(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
+
ADD_SUBDIRECTORY(deps)
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tests)
diff --git a/Jenkinsfile b/Jenkinsfile
index dfe9ed4389..b48dca0241 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -94,6 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
+ pip3 install ${WKC}/src/connector/python
'''
return 1
}
diff --git a/README.md b/README.md
index 45a955f458..78f902babe 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[](https://travis-ci.org/taosdata/TDengine)
+[](https://cloud.drone.io/taosdata/TDengine)
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
diff --git a/cmake/define.inc b/cmake/define.inc
index e825dce024..57351e5478 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -157,7 +157,7 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
- SET(DEBUG_FLAGS "/Zi /W3 /GL")
+ SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()
diff --git a/cmake/env.inc b/cmake/env.inc
index efcc996176..3989993953 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -32,6 +32,7 @@ ENDIF ()
#
# Set compiler options
+SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
diff --git a/cmake/install.inc b/cmake/install.inc
index 9e325531d5..f8b3b7c3c6 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
diff --git a/cmake/version.inc b/cmake/version.inc
index 8035b31cc7..0ee23f319a 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.0.20.0")
+ SET(TD_VER_NUMBER "2.1.0.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/deps/rmonotonic/src/monotonic.c b/deps/rmonotonic/src/monotonic.c
index 1470f91b56..c6d2df9097 100644
--- a/deps/rmonotonic/src/monotonic.c
+++ b/deps/rmonotonic/src/monotonic.c
@@ -36,6 +36,15 @@ static char monotonic_info_string[32];
static long mono_ticksPerMicrosecond = 0;
+#ifdef _TD_NINGSI_60
+// implement __rdtsc in ningsi60
+uint64_t __rdtsc(){
+ unsigned int lo,hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return ((uint64_t)hi << 32) | lo;
+}
+#endif
+
static monotime getMonotonicUs_x86() {
return __rdtsc() / mono_ticksPerMicrosecond;
}
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index aba10a14e3..50b31a55d3 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -117,9 +117,9 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## 常用工具
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
-* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
+* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
-* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
+* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md))
## TDengine与其他数据库的对比测试
diff --git a/documentation20/cn/06.queries/docs.md b/documentation20/cn/06.queries/docs.md
index a161778a72..5557134aac 100644
--- a/documentation20/cn/06.queries/docs.md
+++ b/documentation20/cn/06.queries/docs.md
@@ -12,7 +12,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, G
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
-例如:在TAOS Shell中,从表d1001中查询出vlotage > 215的记录,按时间降序排列,仅仅输出2条。
+例如:在TAOS Shell中,从表d1001中查询出voltage > 215的记录,按时间降序排列,仅仅输出2条。
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 3442a2248c..5eec33e2f1 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)。
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
@@ -447,7 +446,7 @@ Query OK, 1 row(s) in set (0.000141s)
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 54cc1a9e03..9edeb78c68 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Linux**
-**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
@@ -68,7 +68,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Windows x64/x86**
-**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载 :**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 :**
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
@@ -345,11 +345,11 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* taos:已经建立好的数据库连接
* sql:SQL查询语句(仅能使用查询语句)
* fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。
- * stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)
+ * stime:是流式计算开始的时间。如果是“64位整数最小值”,表示从现在开始;如果不为“64位整数最小值”,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)。
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
- 返回值为NULL,表示创建成功,返回值不为空,表示成功。
+ 返回值为NULL,表示创建失败;返回值不为空,表示成功。
- `void taos_close_stream (TAOS_STREAM *tstr)`
@@ -400,27 +400,22 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/
#### Linux
-用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装:
+用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到connector安装包。用户可以通过pip命令安装:
- `pip install src/connector/python/linux/python2/`
+ `pip install src/connector/python/`
或
- `pip3 install src/connector/python/linux/python3/`
+ `pip3 install src/connector/python/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面
```cmd
-cd C:\TDengine\connector\python\windows
-python -m pip install python2\
-```
-或
-```cmd
-cd C:\TDengine\connector\python\windows
-python -m pip install python3\
+cd C:\TDengine\connector\python
+python -m pip install .
```
-* 如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
+* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md
index 79380f3bbd..6a2ead3766 100644
--- a/documentation20/cn/09.connections/docs.md
+++ b/documentation20/cn/09.connections/docs.md
@@ -16,7 +16,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin
以CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
```bash
-sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine
+sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### 使用 Grafana
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 72fcd05d52..bfa0456c7d 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -144,7 +144,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- numOfMnodes:系统中管理节点个数。默认值:3。
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
-- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*100(即100天)。
+- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
@@ -462,31 +462,31 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- |
-| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
-| ABORT | COPY | ID | NCHAR | SMALLINT |
-| ACCOUNT | COUNT | IF | NE | SPREAD |
-| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
-| ADD | CTIME | IMMEDIATE | NOT | STABLES |
-| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
-| ALL | DATABASES | IN | NOW | STATEMENT |
-| ALTER | DAYS | INITIALLY | OF | STDDEV |
-| AND | DEFERRED | INSERT | OFFSET | STREAM |
-| AS | DELIMITERS | INSTEAD | OR | STREAMS |
-| ASC | DESC | INTEGER | ORDER | STRING |
-| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
-| AVG | DETACH | INTO | PERCENTILE | TABLE |
-| BEFORE | DIFF | IP | PLUS | TABLES |
-| BEGIN | DISTINCT | IS | PRAGMA | TAG |
-| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
-| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
-| BINARY | DNODES | KEEP | QUERIES | TBNAME |
-| BITAND | DOT | KEY | QUERY | TIMES |
-| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
-| BITOR | DROP | LAST | REM | TINYINT |
-| BOOL | EACH | LE | REPLACE | TOP |
-| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
-| BY | EQ | LIKE | RESET | TRIGGER |
-| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
+| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
+| ABORT | COPY | ID | NCHAR | SPREAD |
+| ACCOUNT | COUNT | IF | NE | STABLE |
+| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
+| ADD | CTIME | IMMEDIATE | NOT | STAR |
+| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
+| ALL | DATABASES | IN | NOW | STDDEV |
+| ALTER | DAYS | INITIALLY | OF | STREAM |
+| AND | DEFERRED | INSERT | OFFSET | STREAMS |
+| AS | DELIMITERS | INSTEAD | OR | STRING |
+| ASC | DESC | INTEGER | ORDER | SUM |
+| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
+| AVG | DETACH | INTO | PERCENTILE | TABLES |
+| BEFORE | DIFF | IP | PLUS | TAG |
+| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
+| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
+| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
+| BINARY | DNODES | KEEP | QUERIES | TIMES |
+| BITAND | DOT | KEY | QUERY | TIMESTAMP |
+| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
+| BITOR | DROP | LAST | REM | TOP |
+| BOOL | EACH | LE | REPLACE | TOPIC |
+| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
+| BY | EQ | LIKE | RESET | UMINUS |
+| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
| CHANGE | FAIL | LOCAL | ROWS | USE |
| CLOG | FILL | LP | RP | USER |
@@ -498,5 +498,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| CONCAT | GLOB | METRICS | SHOW | VIEW |
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
-| CONNECTION | GT | MNODES | | |
+| CONNECTION | GT | MNODES | SLIMIT | |
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 04c90748f2..fbb82ee140 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -135,6 +135,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
SHOW DATABASES;
```
+- **显示一个数据库的创建语句**
+
+ ```mysql
+ SHOW CREATE DATABASE db_name;
+ ```
+ 常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
+
+
## 表管理
- **创建数据表**
@@ -200,6 +208,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
+- **显示一个数据表的创建语句**
+
+ ```mysql
+ SHOW CREATE TABLE tb_name;
+ ```
+ 常用于数据库迁移。对一个已经存在的数据表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的数据表。
+
- **在线修改显示字符宽度**
```mysql
@@ -265,6 +280,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
+- **显示一个超级表的创建语句**
+
+ ```mysql
+ SHOW CREATE STABLE stb_name;
+ ```
+ 常用于数据库迁移。对一个已经存在的超级表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的超级表。
+
- **获取超级表的结构信息**
```mysql
@@ -407,7 +429,7 @@ SELECT select_expr [, select_expr ...]
[INTERVAL (interval_val [, interval_offset])]
[SLIDING sliding_val]
[FILL fill_val]
- [GROUP BY col_list ]
+ [GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
[LIMIT limit_val [OFFSET offset_val]]
@@ -647,7 +669,7 @@ Query OK, 1 row(s) in set (0.001091s)
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+### UNION ALL 操作符
+
+```mysql
+SELECT ...
+UNION ALL SELECT ...
+[UNION ALL SELECT ...]
+```
+
+TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。
+
### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 36870b2ebe..28be037e6c 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -58,7 +58,12 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
-cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector
+if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
+ cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
+else
+ echo "grafanaplugin bundled directory not found!"
+ exit 1
+fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
diff --git a/packaging/release.sh b/packaging/release.sh
index 68f947ccab..1e54bc2872 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -1,11 +1,11 @@
#!/bin/bash
#
-# Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os
+# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
-# releash.sh -v [cluster | edge]
+# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 92c917cb3d..9910e20bfe 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -66,7 +66,12 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
-cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
+if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
+ cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
+else
+ echo grafanaplugin bundled directory not found!
+ exit 1
+fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index dca3dd2ff6..178a248cfe 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -607,6 +607,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
@@ -630,6 +631,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
@@ -655,6 +657,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh
index a89d2257dc..f47c3672cb 100755
--- a/packaging/tools/install_arbi.sh
+++ b/packaging/tools/install_arbi.sh
@@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh
index 4b12913760..3f27175151 100755
--- a/packaging/tools/install_arbi_power.sh
+++ b/packaging/tools/install_arbi_power.sh
@@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
index ba6ace4009..9f28435cb5 100755
--- a/packaging/tools/install_power.sh
+++ b/packaging/tools/install_power.sh
@@ -577,6 +577,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/power/bin/startPre.sh' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}"
@@ -599,6 +600,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
@@ -624,6 +626,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 1fd0e943b1..d6ace0a063 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -243,9 +243,17 @@ function install_data() {
}
function install_connector() {
- ${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin ${install_main_dir}/connector
+ if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
+ ${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
+ fi
+ if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
+ ${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
- ${csudo} cp -rf ${source_dir}/src/connector/go ${install_main_dir}/connector
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
}
@@ -333,6 +341,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 30e9fa51a7..d0eeffc86a 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -117,10 +117,18 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
- cp -r ${connector_dir}/python ${install_dir}/connector/
- cp -r ${connector_dir}/go ${install_dir}/connector
- cp -r ${connector_dir}/nodejs ${install_dir}/connector
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector
+ cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
index 181536b7f1..8241319e4f 100755
--- a/packaging/tools/makeclient_power.sh
+++ b/packaging/tools/makeclient_power.sh
@@ -144,24 +144,23 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
- cp -r ${connector_dir}/python ${install_dir}/connector/
- cp -r ${connector_dir}/go ${install_dir}/connector
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 36b1fe5bd8..d114d5eef8 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -131,9 +131,17 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
- cp -r ${connector_dir}/python ${install_dir}/connector/
- cp -r ${connector_dir}/go ${install_dir}/connector
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
index 554e7884b1..633a135c14 100755
--- a/packaging/tools/makepkg_power.sh
+++ b/packaging/tools/makepkg_power.sh
@@ -166,24 +166,24 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
+
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
cp -r ${connector_dir}/python ${install_dir}/connector/
- cp -r ${connector_dir}/go ${install_dir}/connector
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
- sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index 8665b3fec3..3aa8083175 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -405,6 +405,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 31343ed293..43006928a6 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,7 @@
name: tdengine
base: core18
-version: '2.0.20.0'
+
+version: '2.1.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +73,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.20.0
+ - usr/lib/libtaos.so.2.1.0.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index 15ef54b7b1..f0349c2b3d 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -48,6 +48,8 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
+int tsInsertInitialCheck(SSqlObj *pSql);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index b09d34cd46..506b579c15 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-#include
+#include "tsched.h"
#include "exception.h"
#include "os.h"
#include "qExtbuffer.h"
@@ -170,9 +170,12 @@ void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArr
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
-int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
+int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t uid);
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
+void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
+
int32_t tscGetResRowLength(SArray* pExprList);
SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
@@ -316,7 +319,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
-int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
+int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h
index a9dcd230a6..0026a27e19 100644
--- a/src/client/inc/tschemautil.h
+++ b/src/client/inc/tschemautil.h
@@ -21,8 +21,8 @@ extern "C" {
#endif
#include "taosmsg.h"
-#include "tstoken.h"
#include "tsclient.h"
+#include "ttoken.h"
/**
* get the number of tags of this table
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 9a22961f30..fa19deb5cc 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -84,6 +84,7 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
+ uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray
@@ -154,13 +155,12 @@ typedef struct STagCond {
typedef struct SParamInfo {
int32_t idx;
- char type;
+ uint8_t type;
uint8_t timePrec;
int16_t bytes;
uint32_t offset;
} SParamInfo;
-
typedef struct SBoundColumn {
bool hasVal; // denote if current column has bound or not
int32_t offset; // all column offset value
@@ -386,7 +386,8 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
-
+ bool isBind;
+
SSubqueryState subState;
struct SSqlObj **pSubs;
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index b3060e2c82..04bccc1a4a 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: isUpdateQueryImp
- * Signature: (J)J
+ * Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
@@ -185,6 +185,44 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
(JNIEnv *, jobject, jlong, jbyteArray);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: prepareStmtImp
+ * Signature: ([BJ)I
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
+ (JNIEnv *, jobject, jbyteArray, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setBindTableNameImp
+ * Signature: (JLjava/lang/String;J)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
+ (JNIEnv *, jobject, jlong, jstring, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: bindColDataImp
+ * Signature: (J[B[B[BIIIIJ)J
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
+(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c
index 7447e36ac9..da7da17aa3 100644
--- a/src/client/src/TSDBJNIConnector.c
+++ b/src/client/src/TSDBJNIConnector.c
@@ -687,4 +687,194 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec
}
return taos_result_precision(result);
-}
\ No newline at end of file
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ if (jsql == NULL) {
+ jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ jsize len = (*env)->GetArrayLength(env, jsql);
+
+ char *str = (char *) calloc(1, sizeof(char) * (len + 1));
+ if (str == NULL) {
+ jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
+ return JNI_OUT_OF_MEMORY;
+ }
+
+ (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ TAOS_STMT* pStmt = taos_stmt_init(tscon);
+ int32_t code = taos_stmt_prepare(pStmt, str, len);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ free(str);
+ return (jlong) pStmt;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) {
+ TAOS *tsconn = (TAOS *)conn;
+ if (tsconn == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
+ return JNI_SQL_NULL;
+ }
+
+ const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
+
+ int32_t code = taos_stmt_set_tbname((void*)stmt, name);
+ if (code != TSDB_CODE_SUCCESS) {
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name);
+
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt,
+ jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ // todo refactor
+ jsize len = (*env)->GetArrayLength(env, colDataList);
+ char *colBuf = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ len = (*env)->GetArrayLength(env, lengthList);
+ char *lengthArray = (char*) calloc(1, len);
+ (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ len = (*env)->GetArrayLength(env, nullList);
+ char *nullArray = (char*) calloc(1, len);
+ (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ // bind multi-rows with only one invoke.
+ TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND));
+
+ b->num = numOfRows;
+ b->buffer_type = dataType; // todo check data type
+ b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes;
+ b->is_null = nullArray;
+ b->buffer = colBuf;
+ b->length = (int32_t*)lengthArray;
+
+ // set the length and is_null array
+ switch(dataType) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDB_DATA_TYPE_BIGINT: {
+ int32_t bytes = tDataTypes[dataType].bytes;
+ for(int32_t i = 0; i < numOfRows; ++i) {
+ b->length[i] = bytes;
+ }
+ break;
+ }
+
+ case TSDB_DATA_TYPE_NCHAR:
+ case TSDB_DATA_TYPE_BINARY: {
+ // do nothing
+ }
+ }
+
+ int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex);
+ tfree(b->length);
+ tfree(b->buffer);
+ tfree(b->is_null);
+ tfree(b);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ taos_stmt_add_batch(pStmt);
+ int32_t code = taos_stmt_execute(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ int32_t code = taos_stmt_close(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
+ return JNI_SUCCESS;
+}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 89c79ade7e..f8ec36c55a 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -326,6 +326,7 @@ TAOS_ROW tscFetchRow(void *param) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
@@ -679,6 +680,9 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta != NULL);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
+ if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@@ -709,13 +713,12 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder));
+ SCreateBuilder *param = (SCreateBuilder *)calloc(1, sizeof(SCreateBuilder));
if (param == NULL) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
-
- strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
+ tNameGetDbName(&pTableMetaInfo->name, param->buf);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
@@ -907,7 +910,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
*/
pRes->qId = 0x1;
pRes->numOfRows = 0;
- } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
+ } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) {
pRes->code = tscProcessShowCreateTable(pSql);
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 3866db6c35..d55521c0c8 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -29,8 +29,7 @@
#include "taosdef.h"
#include "tscLog.h"
-#include "tscSubquery.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tdataformat.h"
@@ -68,7 +67,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
// do nothing
} else if (pToken->type == TK_INTEGER) {
- useconds = tsosStr2int64(pToken->z);
+ useconds = taosStr2int64(pToken->z);
} else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
@@ -386,7 +385,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
*/
-static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
// once the data block is disordered, we do NOT keep previous timestamp any more
if (!pDataBlocks->ordered) {
return TSDB_CODE_SUCCESS;
@@ -411,6 +410,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
pDataBlocks->ordered = false;
+ tscWarn("NOT ordered input timestamp");
}
pDataBlocks->prevTS = k;
@@ -463,23 +463,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
// Remove quotation marks
if (TK_STRING == sToken.type) {
// delete escape character: \\, \', \"
- char delim = sToken.z[0];
+ char delim = sToken.z[0];
+
int32_t cnt = 0;
int32_t j = 0;
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
- if (sToken.z[k] == delim || sToken.z[k] == '\\') {
- if (sToken.z[k + 1] == delim) {
- cnt++;
+ if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
- j++;
- k++;
- continue;
- }
+
+ cnt++;
+ j++;
+ k++;
+ continue;
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
+
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
@@ -576,12 +577,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
index = 0;
sToken = tStrGetToken(*str, &index, false);
- *str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
- return -1;
+ return code;
}
+
+ *str += index;
(*numOfRows)++;
}
@@ -693,6 +695,8 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
+
+ dataBuf->prevTS = INT64_MIN;
}
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
@@ -705,15 +709,10 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
}
code = TSDB_CODE_TSC_INVALID_SQL;
- char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
- if (NULL == tmpTokenBuf) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
+ char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
-
- free(tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -934,6 +933,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
+ /* parse columns after super table tags values.
+ * insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
+ * (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
+ * */
+ index = 0;
+ sToken = tStrGetToken(sql, &index, false);
+ sql += index;
+ int numOfColsAfterTags = 0;
+ if (sToken.type == TK_LP) {
+ if (*boundColumn != NULL) {
+ return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
+ } else {
+ *boundColumn = &sToken.z[0];
+ }
+
+ while (1) {
+ index = 0;
+ sToken = tStrGetToken(sql, &index, false);
+
+ if (sToken.type == TK_RP) {
+ break;
+ }
+
+ sql += index;
+ ++numOfColsAfterTags;
+ }
+
+ if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+
+ sToken = tStrGetToken(sql, &index, false);
+ }
+
+ sql = sToken.z;
+
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
@@ -975,7 +1010,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
psTblToken->n = len;
psTblToken->type = TK_ID;
- tSQLGetToken(psTblToken->z, &psTblToken->type);
+ tGetToken(psTblToken->z, &psTblToken->type);
return tscValidateName(psTblToken);
}
@@ -1262,7 +1297,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
- if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
+ if ((pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index 50c79ccff1..e76219b320 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -24,6 +24,7 @@
#include "tscSubquery.h"
int tsParseInsertSql(SSqlObj *pSql);
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start);
////////////////////////////////////////////////////////////////////////////////
// functions for normal statement preparation
@@ -43,10 +44,32 @@ typedef struct SNormalStmt {
tVariant* params;
} SNormalStmt;
+typedef struct SMultiTbStmt {
+ bool nameSet;
+ uint64_t currentUid;
+ uint32_t tbNum;
+ SStrToken tbname;
+ SHashObj *pTableHash;
+ SHashObj *pTableBlockHashList; // data block for each table
+} SMultiTbStmt;
+
+typedef enum {
+ STMT_INIT = 1,
+ STMT_PREPARE,
+ STMT_SETTBNAME,
+ STMT_BIND,
+ STMT_BIND_COL,
+ STMT_ADD_BATCH,
+ STMT_EXECUTE
+} STMT_ST;
+
typedef struct STscStmt {
bool isInsert;
+ bool multiTbInsert;
+ int16_t last;
STscObj* taos;
SSqlObj* pSql;
+ SMultiTbStmt mtb;
SNormalStmt normal;
} STscStmt;
@@ -135,7 +158,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
break;
default:
- tscDebug("param %d: type mismatch or invalid", i);
+ tscDebug("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
@@ -151,7 +174,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
while (sql[i] != 0) {
SStrToken token = {0};
- token.n = tSQLGetToken(sql + i, &token.type);
+ token.n = tGetToken(sql + i, &token.type);
if (token.type == TK_QUESTION) {
sql[i] = 0;
@@ -253,14 +276,69 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
return taosStringBuilderGetResult(&sb, NULL);
}
+static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
+ SParsedDataColInfo* spd = &pBlock->boundColumnInfo;
+ int32_t offset = 0;
+ SSchema *schema = (SSchema*)pBlock->pTableMeta->schema;
+
+ for (int32_t i = 0; i < spd->numOfCols; ++i) {
+ if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
+ for (int32_t n = 0; n < rowNum; ++n) {
+ char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset;
+
+ if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
+ varDataSetLen(ptr, sizeof(int8_t));
+ *(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
+ } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) {
+ varDataSetLen(ptr, sizeof(int32_t));
+ *(uint32_t*) varDataVal(ptr) = TSDB_DATA_NCHAR_NULL;
+ } else {
+ setNull(ptr, schema[i].type, schema[i].bytes);
+ }
+ }
+ }
+
+ offset += schema[i].bytes;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+int32_t fillTablesColumnsNull(SSqlObj* pSql) {
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
+
+ STableDataBlocks* pOneTableBlock = *p;
+ while(pOneTableBlock) {
+ SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
+ if (pBlocks->numOfRows > 0 && pOneTableBlock->boundColumnInfo.numOfBound < pOneTableBlock->boundColumnInfo.numOfCols) {
+ fillColumnsNull(pOneTableBlock, pBlocks->numOfRows);
+ }
+
+ p = taosHashIterate(pCmd->pTableBlockHashList, p);
+ if (p == NULL) {
+ break;
+ }
+
+ pOneTableBlock = *p;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
-static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
+static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
+#if 0
if (0) {
// allow user bind param data with different type
union {
@@ -641,6 +719,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
}
}
+#endif
if (bind->buffer_type != param->type) {
return TSDB_CODE_TSC_INVALID_VALUE;
@@ -690,29 +769,106 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
memcpy(data + param->offset, bind->buffer, size);
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
+ if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) {
+ tscError("BINARY/NCHAR no length");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ for (int i = 0; i < bind->num; ++i) {
+ char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
+
+ if (bind->is_null != NULL && bind->is_null[i]) {
+ setNull(data + param->offset, param->type, param->bytes);
+ continue;
+ }
+
+ if (!IS_VAR_DATA_TYPE(param->type)) {
+ memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes);
+
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+ } else if (param->type == TSDB_DATA_TYPE_BINARY) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ int16_t bsize = (short)bind->length[i];
+ STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize);
+ } else if (param->type == TSDB_DATA_TYPE_NCHAR) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int32_t output = 0;
+ if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i));
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ varDataSetLen(data + param->offset, output);
+ }
+ }
+
return TSDB_CODE_SUCCESS;
}
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
-
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
-
- STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
- }
+ STscStmt* pStmt = (STscStmt*)stmt;
STableDataBlocks* pBlock = NULL;
- int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
- if (ret != 0) {
- // todo handle error
+ if (pStmt->multiTbInsert) {
+ if (pCmd->pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->pTableBlockHashList == NULL) {
+ pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
}
- uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + 1) * pBlock->rowSize;
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
@@ -729,9 +885,9 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
- int code = doBindParam(data, param, &bind[param->idx]);
+ int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
if (code != TSDB_CODE_SUCCESS) {
- tscDebug("param %d: type mismatch or invalid", param->idx);
+ tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return code;
}
}
@@ -739,9 +895,134 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
return TSDB_CODE_SUCCESS;
}
+
+static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ SSqlCmd* pCmd = &stmt->pSql->cmd;
+ STscStmt* pStmt = (STscStmt*)stmt;
+ int rowNum = bind->num;
+
+ STableDataBlocks* pBlock = NULL;
+
+ if (pStmt->multiTbInsert) {
+ if (pCmd->pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->pTableBlockHashList == NULL) {
+ pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams));
+
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize;
+ if (totalDataSize > pBlock->nAllocSize) {
+ const double factor = 1.5;
+
+ void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pBlock->pData = (char*)tmp;
+ pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
+ }
+
+ if (colIdx == -1) {
+ for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
+ SParamInfo* param = &pBlock->params[j];
+ if (bind[param->idx].num != rowNum) {
+ tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return code;
+ }
+ }
+
+ pCmd->batchSize += rowNum - 1;
+ } else {
+ SParamInfo* param = &pBlock->params[colIdx];
+
+ int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return code;
+ }
+
+ if (colIdx == (pBlock->numOfParams - 1)) {
+ pCmd->batchSize += rowNum - 1;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int insertStmtUpdateBatch(STscStmt* stmt) {
+ SSqlObj* pSql = stmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+ STableDataBlocks* pBlock = NULL;
+
+ if (pCmd->batchSize > INT16_MAX) {
+ tscError("too many record:%d", pCmd->batchSize);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, stmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+
+ STableMeta* pTableMeta = pBlock->pTableMeta;
+
+ pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
+ pBlk->numOfRows = pCmd->batchSize;
+ pBlk->dataLen = 0;
+ pBlk->uid = pTableMeta->id.uid;
+ pBlk->tid = pTableMeta->id.tid;
+
+ return TSDB_CODE_SUCCESS;
+}
+
static int insertStmtAddBatch(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
++pCmd->batchSize;
+
+ if (stmt->multiTbInsert) {
+ return insertStmtUpdateBatch(stmt);
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -798,6 +1079,8 @@ static int insertStmtExecute(STscStmt* stmt) {
pBlk->uid = pTableMeta->id.uid;
pBlk->tid = pTableMeta->id.tid;
+ fillTablesColumnsNull(stmt->pSql);
+
int code = tscMergeTableDataBlocks(stmt->pSql, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -834,6 +1117,88 @@ static int insertStmtExecute(STscStmt* stmt) {
return pSql->res.code;
}
+static void insertBatchClean(STscStmt* pStmt) {
+ SSqlCmd *pCmd = &pStmt->pSql->cmd;
+ SSqlObj *pSql = pStmt->pSql;
+ int32_t size = taosHashGetSize(pCmd->pTableBlockHashList);
+
+ // data block reset
+ pCmd->batchSize = 0;
+
+ for(int32_t i = 0; i < size; ++i) {
+ if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
+ tfree(pCmd->pTableNameList[i]);
+ }
+ }
+
+ tfree(pCmd->pTableNameList);
+
+/*
+ STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
+
+ STableDataBlocks* pOneTableBlock = *p;
+
+ while (1) {
+ SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
+
+ pOneTableBlock->size = sizeof(SSubmitBlk);
+
+ pBlocks->numOfRows = 0;
+
+ p = taosHashIterate(pCmd->pTableBlockHashList, p);
+ if (p == NULL) {
+ break;
+ }
+
+ pOneTableBlock = *p;
+ }
+*/
+
+ pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
+ pCmd->numOfTables = 0;
+
+ taosHashEmpty(pCmd->pTableBlockHashList);
+ tscFreeSqlResult(pSql);
+ tscFreeSubobj(pSql);
+ tfree(pSql->pSubs);
+ pSql->subState.numOfSub = 0;
+}
+
+static int insertBatchStmtExecute(STscStmt* pStmt) {
+ int32_t code = 0;
+
+ if(pStmt->mtb.nameSet == false) {
+ tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
+
+ if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) <= 0) { // merge according to vgId
+ tscError("0x%"PRIx64" no data block to insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ fillTablesColumnsNull(pStmt->pSql);
+
+ if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ code = tscHandleMultivnodeInsert(pStmt->pSql);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ insertBatchClean(pStmt);
+
+ return pStmt->pSql->res.code;
+}
+
////////////////////////////////////////////////////////////////////////////////
// interface functions
@@ -865,7 +1230,9 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
+ pSql->isBind = true;
pStmt->pSql = pSql;
+ pStmt->last = STMT_INIT;
return pStmt;
}
@@ -878,6 +1245,13 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return TSDB_CODE_TSC_DISCONNECTED;
}
+ if (pStmt->last != STMT_INIT) {
+ tscError("prepare status error, last:%d", pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_PREPARE;
+
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
@@ -916,6 +1290,36 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
registerSqlObj(pSql);
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ if ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+
+ int32_t index = 0;
+ SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false);
+
+ if (sToken.n == 0) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+
+ if (sToken.n == 1 && sToken.type == TK_QUESTION) {
+ pStmt->multiTbInsert = true;
+ pStmt->mtb.tbname = sToken;
+ pStmt->mtb.nameSet = false;
+ if (pStmt->mtb.pTableHash == NULL) {
+ pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ }
+ if (pStmt->mtb.pTableBlockHashList == NULL) {
+ pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStmt->multiTbInsert = false;
+ memset(&pStmt->mtb, 0, sizeof(pStmt->mtb));
+
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
@@ -930,6 +1334,104 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return normalStmtPrepare(pStmt);
}
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ SSqlObj* pSql = pStmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (name == NULL) {
+ terrno = TSDB_CODE_TSC_APP_ERROR;
+ tscError("0x%"PRIx64" name is NULL", pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) {
+ terrno = TSDB_CODE_TSC_APP_ERROR;
+ tscError("0x%"PRIx64" not multi table insert", pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) {
+ tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_SETTBNAME;
+
+ uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name));
+ if (uid != NULL) {
+ pStmt->mtb.currentUid = *uid;
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
+ pCmd->batchSize = pBlk->numOfRows;
+
+ taosHashPut(pCmd->pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
+
+ tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
+ pStmt->mtb.nameSet = true;
+
+ tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
+
+ pSql->cmd.parseFinished = 0;
+ pSql->cmd.numOfParams = 0;
+ pSql->cmd.batchSize = 0;
+
+ if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) {
+ SHashObj* hashList = pCmd->pTableBlockHashList;
+ pCmd->pTableBlockHashList = NULL;
+ tscResetSqlCmd(pCmd, true);
+ pCmd->pTableBlockHashList = hashList;
+ }
+
+ int32_t code = tsParseSql(pStmt->pSql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ code = pStmt->pSql->res.code;
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ STableDataBlocks* pBlock = NULL;
+ code = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData;
+ blk->numOfRows = 0;
+
+ pStmt->mtb.currentUid = pTableMeta->id.uid;
+ pStmt->mtb.tbNum++;
+
+ taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
+ taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
+
+ tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
+ }
+
+ return code;
+}
+
int taos_stmt_close(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (!pStmt->isInsert) {
@@ -942,6 +1444,13 @@ int taos_stmt_close(TAOS_STMT* stmt) {
}
free(normal->parts);
free(normal->sql);
+ } else {
+ if (pStmt->multiTbInsert) {
+ taosHashCleanup(pStmt->mtb.pTableHash);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
+ taosHashCleanup(pStmt->pSql->cmd.pTableBlockHashList);
+ pStmt->pSql->cmd.pTableBlockHashList = NULL;
+ }
}
taos_free_result(pStmt->pSql);
@@ -951,18 +1460,122 @@ int taos_stmt_close(TAOS_STMT* stmt) {
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
return insertStmtBindParam(pStmt, bind);
} else {
return normalStmtBindParam(pStmt, bind);
}
}
+
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
+ return insertStmtBindParamBatch(pStmt, bind, -1);
+}
+
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND_COL;
+
+ return insertStmtBindParamBatch(pStmt, bind, colIdx);
+}
+
+
+
int taos_stmt_add_batch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
+ if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_ADD_BATCH;
+
return insertStmtAddBatch(pStmt);
}
+
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
}
@@ -977,8 +1590,24 @@ int taos_stmt_reset(TAOS_STMT* stmt) {
int taos_stmt_execute(TAOS_STMT* stmt) {
int ret = 0;
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
- ret = insertStmtExecute(pStmt);
+ if (pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_EXECUTE;
+
+ if (pStmt->multiTbInsert) {
+ ret = insertBatchStmtExecute(pStmt);
+ } else {
+ ret = insertStmtExecute(pStmt);
+ }
} else { // normal stmt query
char* sql = normalStmtBuildSql(pStmt);
if (sql == NULL) {
@@ -1073,7 +1702,7 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
}
if (idx<0 || idx>=pBlock->numOfParams) {
- tscError("param %d: out of range", idx);
+ tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx);
abort();
}
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 777a136a6e..b9ef986810 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -54,14 +54,14 @@ void tscAddIntoSqlList(SSqlObj *pSql) {
pSql->next = pObj->sqlList;
if (pObj->sqlList) pObj->sqlList->prev = pSql;
pObj->sqlList = pSql;
- pSql->queryId = queryId++;
+ pSql->queryId = atomic_fetch_add_32(&queryId, 1);
pthread_mutex_unlock(&pObj->mutex);
pSql->stime = taosGetTimestampMs();
pSql->listed = 1;
- tscDebug("0x%"PRIx64" added into sqlList", pSql->self);
+ tscDebug("0x%"PRIx64" added into sqlList, queryId:%u", pSql->self, pSql->queryId);
}
void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) {
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index aeddbda1d6..4b92f69bd9 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -21,19 +21,19 @@
#endif // __APPLE__
#include "os.h"
-#include "ttype.h"
-#include "texpr.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
+#include "texpr.h"
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
-#include "tstoken.h"
#include "tstrbuild.h"
+#include "ttoken.h"
#include "ttokendef.h"
+#include "ttype.h"
#include "qUtil.h"
#include "qPlan.h"
@@ -65,7 +65,7 @@ static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
-static bool hasSpecifyDB(SStrToken* pTableName);
+static int32_t getDelimiterIndex(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@@ -247,6 +247,38 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
return TSDB_CODE_SUCCESS;
}
+// validate the out put field type for "UNION ALL" subclause
+static int32_t normalizeVarDataTypeLength(SSqlCmd* pCmd) {
+ const char* msg1 = "columns in select clause not identical";
+
+ int32_t diffSize = 0;
+
+ // if there is only one element, the limit of clause is the limit of global result.
+ SQueryInfo* pQueryInfo1 = pCmd->pQueryInfo;
+ SQueryInfo* pSibling = pQueryInfo1->sibling;
+
+ while(pSibling != NULL) {
+ int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo, &diffSize);
+ if (ret != 0) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ pSibling = pSibling->sibling;
+ }
+
+ if (diffSize) {
+ pQueryInfo1 = pCmd->pQueryInfo;
+ pSibling = pQueryInfo1->sibling;
+
+ while(pSibling->sibling != NULL) {
+ tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo);
+ pSibling = pSibling->sibling;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pInfo == NULL || pSql == NULL) {
return TSDB_CODE_TSC_APP_ERROR;
@@ -429,17 +461,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: {
const char* msg1 = "invalid table name";
- const char* msg2 = "table name too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
- if (!tscValidateTableNameLength(pToken->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
// additional msg has been attached already
code = tscSetTableFullName(&pTableMetaInfo->name, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
@@ -448,19 +474,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
+ case TSDB_SQL_SHOW_CREATE_STABLE:
case TSDB_SQL_SHOW_CREATE_TABLE: {
const char* msg1 = "invalid table name";
- const char* msg2 = "table name is too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (!tscValidateTableNameLength(pToken->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
code = tscSetTableFullName(&pTableMetaInfo->name, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -479,8 +501,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pToken->n > TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
- return tscSetTableFullName(&pTableMetaInfo->name, pToken, pSql);
+ return tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
}
case TSDB_SQL_CFG_DNODE: {
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";
@@ -618,8 +639,6 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SELECT: {
- const char* msg1 = "columns in select clause not identical";
-
code = loadAllTableMeta(pSql, pInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -648,6 +667,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
}
+ if ((code = normalizeVarDataTypeLength(pCmd)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
// restore the clause index
pCmd->clauseIndex = 0;
@@ -655,17 +678,6 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pCmd->active = pCmd->pQueryInfo;
pCmd->command = pCmd->pQueryInfo->command;
- // if there is only one element, the limit of clause is the limit of global result.
- // validate the select node for "UNION ALL" subclause
- SQueryInfo* pQueryInfo1 = pCmd->pQueryInfo;
- while(pQueryInfo1->sibling != NULL) {
- int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo1->sibling->fieldsInfo);
- if (ret != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
- pQueryInfo1 = pQueryInfo1->sibling;
- }
-
pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here
}
@@ -992,11 +1004,13 @@ int32_t tscSetTableFullName(SName* pName, SStrToken* pTableName, SSqlObj* pSql)
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
+ const char* msg4 = "db name too long";
+ const char* msg5 = "table name too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
-
- if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
+ int32_t idx = getDelimiterIndex(pTableName);
+ if (idx != -1) { // db has been specified in sql string so we ignore current db path
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -1006,7 +1020,14 @@ int32_t tscSetTableFullName(SName* pName, SStrToken* pTableName, SSqlObj* pSql)
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
-
+ if (idx >= TSDB_DB_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
+
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
@@ -1348,14 +1369,13 @@ static char* cloneCurrentDBName(SSqlObj* pSql) {
}
/* length limitation, strstr cannot be applied */
-static bool hasSpecifyDB(SStrToken* pTableName) {
+static int32_t getDelimiterIndex(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
- return true;
+ return i;
}
}
-
- return false;
+ return -1;
}
int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
@@ -1611,11 +1631,27 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
return false;
}
+static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
+
+ if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery,
bool timeWindowQuery) {
assert(pSelNodeList != NULL && pCmd != NULL);
const char* msg1 = "too many items in selection clause";
+
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one tag";
@@ -1680,7 +1716,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
- if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
+ if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
addPrimaryTsColIntoResult(pQueryInfo, pCmd);
}
@@ -4620,7 +4656,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
}
} else {
SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID};
- int32_t len = tSQLGetToken(pRight->value.pz, &token.type);
+ int32_t len = tGetToken(pRight->value.pz, &token.type);
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -5507,13 +5543,13 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) {
}
int32_t validateColumnName(char* name) {
- bool ret = isKeyWord(name, (int32_t)strlen(name));
+ bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name));
if (ret) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SStrToken token = {.z = name};
- token.n = tSQLGetToken(name, &token.type);
+ token.n = tGetToken(name, &token.type);
if (token.type != TK_STRING && token.type != TK_ID) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -5524,7 +5560,7 @@ int32_t validateColumnName(char* name) {
strntolower(token.z, token.z, token.n);
token.n = (uint32_t)strtrim(token.z);
- int32_t k = tSQLGetToken(token.z, &token.type);
+ int32_t k = tGetToken(token.z, &token.type);
if (k != token.n) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5606,7 +5642,6 @@ int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlN
*/
// assert(allVgroupInfoRetrieved(pQueryInfo));
-
// No tables included. No results generated. Query results are empty.
if (pTableMetaInfo->vgroupList->numOfVgroups == 0) {
tscDebug("0x%"PRIx64" no table in super table, no output result", pSql->self);
@@ -7142,6 +7177,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SArray* tableNameList = NULL;
SArray* pVgroupList = NULL;
SArray* plist = NULL;
+ STableMeta* pTableMeta = NULL;
pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@@ -7174,7 +7210,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char buf[80 * 1024] = {0};
assert(maxSize < 80 * 1024);
- STableMeta* pTableMeta = (STableMeta*)buf;
+ pTableMeta = calloc(1, maxSize);
plist = taosArrayInit(4, POINTER_BYTES);
pVgroupList = taosArrayInit(4, POINTER_BYTES);
@@ -7189,7 +7225,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pTableMeta->id.uid > 0) {
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
- code = tscCreateTableMetaFromCChildMeta(pTableMeta, name);
+ code = tscCreateTableMetaFromCChildMeta(pTableMeta, name, buf);
// create the child table meta from super table failed, try load it from mnode
if (code != TSDB_CODE_SUCCESS) {
@@ -7222,7 +7258,6 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
_end:
-
if (plist != NULL) {
taosArrayDestroyEx(plist, freeElem);
}
@@ -7235,6 +7270,8 @@ _end:
taosArrayDestroy(tableNameList);
}
+ tfree(pTableMeta);
+
return code;
}
@@ -7733,4 +7770,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}
-
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index dbe5c8c8fd..37ea6ff8b9 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -2082,14 +2082,14 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
}
assert(parent->signature == parent && (int64_t)pSql->param == parent->self);
-
+
SSqlRes* pRes = &pSql->res;
-
+
// NOTE: the order of several table must be preserved.
SSTableVgroupRspMsg *pStableVgroup = (SSTableVgroupRspMsg *)pRes->pRsp;
pStableVgroup->numOfTables = htonl(pStableVgroup->numOfTables);
char *pMsg = pRes->pRsp + sizeof(SSTableVgroupRspMsg);
-
+
SSqlCmd* pCmd = &parent->cmd;
for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) {
STableMetaInfo *pInfo = tscGetTableMetaInfoFromCmd(pCmd, i);
@@ -2503,10 +2503,23 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(tIsValidName(&pTableMetaInfo->name));
- tfree(pTableMetaInfo->pTableMeta);
-
uint32_t size = tscGetTableMetaMaxSize();
- pTableMetaInfo->pTableMeta = calloc(1, size);
+ if (pTableMetaInfo->pTableMeta == NULL) {
+ pTableMetaInfo->pTableMeta = calloc(1, size);
+ pTableMetaInfo->tableMetaSize = size;
+ } else if (pTableMetaInfo->tableMetaSize < size) {
+ char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
+ memset(pTableMetaInfo->pTableMeta, 0, size);
+ pTableMetaInfo->tableMetaSize = size;
+ } else {
+ //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
+ memset(pTableMetaInfo->pTableMeta, 0, size);
+ pTableMetaInfo->tableMetaSize = size;
+ }
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
@@ -2518,10 +2531,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
+ char buf[80*1024] = {0};
+ assert(size < 80*1024);
+
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
- int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
+ int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@@ -2600,7 +2616,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
tscFreeSqlObj(pNew);
return code;
}
-
+
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta);
@@ -2698,6 +2714,7 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
+ tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
tscKeepConn[TSDB_SQL_SHOW] = 1;
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 99613b1e77..d8c930d341 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -461,6 +461,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 5df75d0cd2..15100bfb4d 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -55,9 +55,9 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
while (tsBufNextPos(pTSBuf)) {
- STSElem el1 = tsBufGetElem(pTSBuf);
+ el1 = tsBufGetElem(pTSBuf);
- int32_t res = tVariantCompare(el1.tag, tag1);
+ res = tVariantCompare(el1.tag, tag1);
if (res != 0) { // it is a record with new tag
return;
}
@@ -624,7 +624,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value
+#ifndef _TD_NINGSI_60
pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
+#else
+ pExpr->base.param[0].i64 = colId;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
+ pExpr->base.param[0].nLen = sizeof(int64_t);
+#endif
pExpr->base.numOfParams = 1;
}
@@ -2842,7 +2848,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
tscDebug("0x%"PRIx64" sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
- if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
+ if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinctTag)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 871b105e91..a727731536 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -1349,7 +1349,8 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
tfree(pTableMetaInfo->pTableMeta);
}
- pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
+ pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
}
/*
@@ -1566,67 +1567,73 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* pOneTableBlock = *p;
while(pOneTableBlock) {
- // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
- int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
- STableDataBlocks* dataBuf = NULL;
-
- int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
- INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
- if (ret != TSDB_CODE_SUCCESS) {
- tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
- taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
- return ret;
- }
-
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
- int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
-
- if (dataBuf->nAllocSize < destSize) {
- while (dataBuf->nAllocSize < destSize) {
- dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
- }
-
- char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
- if (tmp != NULL) {
- dataBuf->pData = tmp;
- memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
- } else { // failed to allocate memory, free already allocated memory and return error code
- tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
-
+ if (pBlocks->numOfRows > 0) {
+ // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
+ int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
+ STableDataBlocks* dataBuf = NULL;
+
+ int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
+ INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
- tfree(dataBuf->pData);
-
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return ret;
}
+
+ int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+
+ if (dataBuf->nAllocSize < destSize) {
+ while (dataBuf->nAllocSize < destSize) {
+ dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
+ }
+
+ char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
+ if (tmp != NULL) {
+ dataBuf->pData = tmp;
+ memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
+ } else { // failed to allocate memory, free already allocated memory and return error code
+ tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
+
+ taosHashCleanup(pVnodeDataBlockHashList);
+ tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tfree(dataBuf->pData);
+
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ }
+
+ tscSortRemoveDataBlockDupRows(pOneTableBlock);
+ char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
+
+ tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
+ pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
+
+ int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+
+ pBlocks->tid = htonl(pBlocks->tid);
+ pBlocks->uid = htobe64(pBlocks->uid);
+ pBlocks->sversion = htonl(pBlocks->sversion);
+ pBlocks->numOfRows = htons(pBlocks->numOfRows);
+ pBlocks->schemaLen = 0;
+
+ // erase the empty space reserved for binary data
+ int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
+ assert(finalLen <= len);
+
+ dataBuf->size += (finalLen + sizeof(SSubmitBlk));
+ assert(dataBuf->size <= dataBuf->nAllocSize);
+
+ // the length does not include the SSubmitBlk structure
+ pBlocks->dataLen = htonl(finalLen);
+ dataBuf->numOfTables += 1;
+
+ pBlocks->numOfRows = 0;
+ }else {
+ tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname);
}
-
- tscSortRemoveDataBlockDupRows(pOneTableBlock);
- char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
-
- tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
- pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
-
- int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
-
- pBlocks->tid = htonl(pBlocks->tid);
- pBlocks->uid = htobe64(pBlocks->uid);
- pBlocks->sversion = htonl(pBlocks->sversion);
- pBlocks->numOfRows = htons(pBlocks->numOfRows);
- pBlocks->schemaLen = 0;
-
- // erase the empty space reserved for binary data
- int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
- assert(finalLen <= len);
-
- dataBuf->size += (finalLen + sizeof(SSubmitBlk));
- assert(dataBuf->size <= dataBuf->nAllocSize);
-
- // the length does not include the SSubmitBlk structure
- pBlocks->dataLen = htonl(finalLen);
- dataBuf->numOfTables += 1;
-
+
p = taosHashIterate(pCmd->pTableBlockHashList, p);
if (p == NULL) {
break;
@@ -1748,7 +1755,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
return pInfo->pExpr->base.offset;
}
-int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
+int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
@@ -1760,15 +1767,37 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
if (pField1->type != pField2->type ||
- pField1->bytes != pField2->bytes ||
strcasecmp(pField1->name, pField2->name) != 0) {
return 1;
}
+
+ if (pField1->bytes != pField2->bytes) {
+ *diffSize = 1;
+
+ if (pField2->bytes > pField1->bytes) {
+ assert(IS_VAR_DATA_TYPE(pField1->type));
+ pField1->bytes = pField2->bytes;
+ }
+ }
}
return 0;
}
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
+ assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
+
+ for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) {
+ TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i);
+ TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
+
+ pField2->bytes = pField1->bytes;
+ }
+
+ return 0;
+}
+
+
int32_t tscGetResRowLength(SArray* pExprList) {
size_t num = taosArrayGetSize(pExprList);
if (num == 0) {
@@ -2246,7 +2275,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
static int32_t validateQuoteToken(SStrToken* pToken) {
tscDequoteAndTrimToken(pToken);
- int32_t k = tSQLGetToken(pToken->z, &pToken->type);
+ int32_t k = tGetToken(pToken->z, &pToken->type);
if (pToken->type == TK_STRING) {
return tscValidateName(pToken);
@@ -2314,7 +2343,7 @@ int32_t tscValidateName(SStrToken* pToken) {
tscStrToLower(pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
- int len = tSQLGetToken(pToken->z, &pToken->type);
+ int len = tGetToken(pToken->z, &pToken->type);
// single token, validate it
if (len == pToken->n) {
@@ -2340,7 +2369,7 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->n = (uint32_t)strtrim(pToken->z);
}
- pToken->n = tSQLGetToken(pToken->z, &pToken->type);
+ pToken->n = tGetToken(pToken->z, &pToken->type);
if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2357,7 +2386,7 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->z = sep + 1;
pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1);
- int32_t len = tSQLGetToken(pToken->z, &pToken->type);
+ int32_t len = tGetToken(pToken->z, &pToken->type);
if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2893,6 +2922,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pTableMeta = pTableMeta;
+ if (pTableMetaInfo->pTableMeta == NULL) {
+ pTableMetaInfo->tableMetaSize = 0;
+ } else {
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
+ }
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
@@ -3168,6 +3202,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
+
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
@@ -3377,16 +3412,21 @@ void tscDoQuery(SSqlObj* pSql) {
return;
}
- if (pCmd->command == TSDB_SQL_SELECT) {
- tscAddIntoSqlList(pSql);
- }
-
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
tscImportDataFromFile(pSql);
} else {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
uint16_t type = pQueryInfo->type;
+ if ((pCmd->command == TSDB_SQL_SELECT) && (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) && (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_STABLE_SUBQUERY))) {
+ tscAddIntoSqlList(pSql);
+ }
+
+ if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion
+ tscHandleMultivnodeInsert(pSql);
+ return;
+ }
+
if (QUERY_IS_JOIN_QUERY(type)) {
if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) {
tscHandleMasterJoinQuery(pSql);
@@ -3648,7 +3688,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
+
+
+ // DON't free final since it may be recoreded and used later in APP
+ TAOS_FIELD* finalBk = pRes->final;
+ pRes->final = NULL;
tscFreeSqlResult(pSql);
+ pRes->final = finalBk;
pRes->numOfTotal = num;
@@ -3881,11 +3927,9 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
-int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
- assert(pChild != NULL);
-
- uint32_t size = tscGetTableMetaMaxSize();
- STableMeta* p = calloc(1, size);
+int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
+ assert(pChild != NULL && buf != NULL);
+ STableMeta* p = buf;
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
@@ -3896,13 +3940,9 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags;
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
-
- tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
-
- tfree(p);
return -1;
}
}
@@ -4257,7 +4297,7 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt
len = (int32_t)strtrim(tblName);
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
- tSQLGetToken(tblName, &sToken.type);
+ tGetToken(tblName, &sToken.type);
// Check if the table name available or not
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp
index d7325430cd..692398e3b7 100644
--- a/src/client/tests/timeParseTest.cpp
+++ b/src/client/tests/timeParseTest.cpp
@@ -4,7 +4,7 @@
#include
#include "taos.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tutil.h"
int main(int argc, char** argv) {
@@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
- int64_t k = timezone;
+// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
@@ -163,7 +163,7 @@ TEST(testCase, parse_time) {
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND);
- char* t = "2021-01-08T02:11:40.000+00:00";
+ char t[] = "2021-01-08T02:11:40.000+00:00";
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
printf("%ld\n", time);
}
diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h
index be16e80124..adf210cfeb 100644
--- a/src/common/inc/tcmdtype.h
+++ b/src/common/inc/tcmdtype.h
@@ -80,6 +80,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index e8c0760997..88d5b85010 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -15,10 +15,7 @@
#ifndef _TD_DATA_FORMAT_H_
#define _TD_DATA_FORMAT_H_
-#include
-#include
-#include
-
+#include "os.h"
#include "talgo.h"
#include "ttype.h"
#include "tutil.h"
diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h
index a0854ce81b..275dd12fd7 100644
--- a/src/common/inc/texpr.h
+++ b/src/common/inc/texpr.h
@@ -87,10 +87,9 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
tExprNode* exprdup(tExprNode* pTree);
-bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
+void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
-typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
- int32_t rightType, void *output, int32_t order);
+bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 26475834d5..2f4aa4c2b2 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -44,6 +44,7 @@ extern int32_t tsDnodeId;
// common
extern int tsRpcTimer;
extern int tsRpcMaxTime;
+extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 3c39716528..ec2d76147a 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -18,7 +18,7 @@
#include "os.h"
#include "taosmsg.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tvariant.h"
typedef struct SDataStatis {
diff --git a/src/common/inc/tvariant.h b/src/common/inc/tvariant.h
index f8f715c6ca..21b7fd8223 100644
--- a/src/common/inc/tvariant.h
+++ b/src/common/inc/tvariant.h
@@ -16,8 +16,8 @@
#ifndef TDENGINE_TVARIANT_H
#define TDENGINE_TVARIANT_H
-#include "tstoken.h"
#include "tarray.h"
+#include "ttoken.h"
#ifdef __cplusplus
extern "C" {
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 1524f15b7d..db97c3a5af 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -48,6 +48,7 @@ int32_t tsDnodeId = 0;
// common
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
+int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second
@@ -625,6 +626,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_MS;
taosInitConfigOption(cfg);
+ cfg.option = "rpcForceTcp";
+ cfg.ptr = &tsRpcForceTcp;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "rpcMaxTime";
cfg.ptr = &tsRpcMaxTime;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -921,7 +932,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
- cfg.maxValue = 10000000000.0f;
+ cfg.maxValue = 100000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index f1ddc60637..dc868d8057 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -2,7 +2,7 @@
#include "tutil.h"
#include "tname.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c
index c872d8731b..9988450c30 100644
--- a/src/common/src/tvariant.c
+++ b/src/common/src/tvariant.c
@@ -14,14 +14,14 @@
*/
#include "os.h"
-#include "tvariant.h"
#include "hash.h"
#include "taos.h"
#include "taosdef.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "ttokendef.h"
-#include "tutil.h"
#include "ttype.h"
+#include "tutil.h"
+#include "tvariant.h"
void tVariantCreate(tVariant *pVar, SStrToken *token) {
int32_t ret = 0;
@@ -49,7 +49,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
if (ret != 0) {
SStrToken t = {0};
- tSQLGetToken(token->z, &t.type);
+ tGetToken(token->z, &t.type);
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
pVar->nType = -1; // -1 means error type
return;
@@ -460,7 +460,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
*result = (int64_t) pVariant->dKey;
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
- /*int32_t n = */tSQLGetToken(pVariant->pz, &token.type);
+ /*int32_t n = */tGetToken(pVariant->pz, &token.type);
if (token.type == TK_NULL) {
if (releaseVariantPtr) {
@@ -495,10 +495,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
wchar_t *endPtr = NULL;
SStrToken token = {0};
- token.n = tSQLGetToken(pVariant->pz, &token.type);
+ token.n = tGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
- token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
+ token.n = tGetToken(pVariant->pz + token.n, &token.type);
}
if (token.type == TK_FLOAT) {
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index de4b8f6bfb..61e976cb18 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml
index a31796ffde..968a9bf470 100755
--- a/src/connector/jdbc/deploy-pom.xml
+++ b/src/connector/jdbc/deploy-pom.xml
@@ -5,7 +5,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.28
+ 2.0.29
jar
JDBCDriver
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index 3400a82e73..ef353d1d19 100755
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.28
+ 2.0.29
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
@@ -122,6 +122,7 @@
**/FailOverTest.java
**/InvalidResultSetPointerTest.java
**/RestfulConnectionTest.java
+ **/TD4144Test.java
true
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
index 4b5b88d93b..f8ea9af423 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
@@ -84,10 +84,12 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
+ @Deprecated
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
-
+ }
+
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -171,6 +173,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
+ @Deprecated
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
index c8ab9fb15a..02fee74eb5 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
@@ -49,7 +49,7 @@ public class TSDBConnection extends AbstractConnection {
this.databaseMetaData.setConnection(this);
}
- public TSDBJNIConnector getConnection() {
+ public TSDBJNIConnector getConnector() {
return this.connector;
}
@@ -58,7 +58,7 @@ public class TSDBConnection extends AbstractConnection {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
- return new TSDBStatement(this, this.connector);
+ return new TSDBStatement(this);
}
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
@@ -74,14 +74,18 @@ public class TSDBConnection extends AbstractConnection {
}
public PreparedStatement prepareStatement(String sql) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
- return new TSDBPreparedStatement(this, this.connector, sql);
+ }
+
+ return new TSDBPreparedStatement(this, sql);
}
public void close() throws SQLException {
- if (isClosed)
+ if (isClosed) {
return;
+ }
+
this.connector.closeConnection();
this.isClosed = true;
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
index 37073e243f..f38555ce8a 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
@@ -30,6 +30,7 @@ public abstract class TSDBConstants {
public static final int JNI_FETCH_END = -6;
public static final int JNI_OUT_OF_MEMORY = -7;
// TSDB Data Types
+ public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index bbd8519a03..55533bd28c 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -104,7 +104,7 @@ public class TSDBDriver extends AbstractDriver {
static {
try {
- java.sql.DriverManager.registerDriver(new TSDBDriver());
+ DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index 5e3ffffa4f..d6934b8e46 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -18,6 +18,7 @@ package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TaosInfo;
+import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
@@ -29,10 +30,13 @@ public class TSDBJNIConnector {
private static volatile Boolean isInitialized = false;
private TaosInfo taosInfo = TaosInfo.getInstance();
+
// Connection pointer used in C
private long taos = TSDBConstants.JNI_NULL_POINTER;
+
// result set status in current connection
- private boolean isResultsetClosed = true;
+ private boolean isResultsetClosed;
+
private int affectedRows = -1;
static {
@@ -75,7 +79,6 @@ public class TSDBJNIConnector {
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
-// this.closeConnectionImp(this.taos);
closeConnection();
this.taos = TSDBConstants.JNI_NULL_POINTER;
}
@@ -97,12 +100,6 @@ public class TSDBJNIConnector {
* @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
- // close previous result set if the user forgets to invoke the
- // free method to close previous result set.
-// if (!this.isResultsetClosed) {
-// freeResultSet(taosResultSetPointer);
-// }
-
Long pSql = 0l;
try {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
@@ -135,6 +132,7 @@ public class TSDBJNIConnector {
// Try retrieving result set for the executed SQL using the current connection pointer.
pSql = this.getResultSetImp(this.taos, pSql);
+ // if pSql == 0L that means resultset is closed
isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER);
return pSql;
@@ -169,37 +167,14 @@ public class TSDBJNIConnector {
private native long isUpdateQueryImp(long connection, long pSql);
/**
- * Free resultset operation from C to release resultset pointer by JNI
+ * Free result set operation from C to release result set pointer by JNI
*/
public int freeResultSet(long pSql) {
- int res = TSDBConstants.JNI_SUCCESS;
-// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
-// throw new RuntimeException("Invalid result set pointer");
-// }
-
-// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
- res = this.freeResultSetImp(this.taos, pSql);
-// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
-// }
-
+ int res = this.freeResultSetImp(this.taos, pSql);
isResultsetClosed = true;
return res;
}
- /**
- * Close the open result set which is associated to the current connection. If the result set is already
- * closed, return 0 for success.
- */
-// public int freeResultSet() {
-// int resCode = TSDBConstants.JNI_SUCCESS;
-// if (!isResultsetClosed) {
-// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer);
-// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
-// isResultsetClosed = true;
-// }
-// return resCode;
-// }
-
private native int freeResultSetImp(long connection, long result);
/**
@@ -246,6 +221,7 @@ public class TSDBJNIConnector {
*/
public void closeConnection() throws SQLException {
int code = this.closeConnectionImp(this.taos);
+
if (code < 0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
} else if (code == 0) {
@@ -253,6 +229,7 @@ public class TSDBJNIConnector {
} else {
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
}
+
// invoke closeConnectionImpl only here
taosInfo.connect_close_increment();
}
@@ -289,7 +266,7 @@ public class TSDBJNIConnector {
private native void unsubscribeImp(long subscription, boolean isKeep);
/**
- * Validate if a create table sql statement is correct without actually creating that table
+ * Validate if a create table SQL statement is correct without actually creating that table
*/
public boolean validateCreateTableSql(String sql) {
int res = validateCreateTableSqlImp(taos, sql.getBytes());
@@ -297,4 +274,66 @@ public class TSDBJNIConnector {
}
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
+
+ public long prepareStmt(String sql) throws SQLException {
+ Long stmt = 0L;
+ try {
+ stmt = prepareStmtImp(sql.getBytes(), this.taos);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
+ }
+
+ if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ }
+
+ if (stmt == TSDBConstants.JNI_SQL_NULL) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
+ }
+
+ if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
+ }
+
+ return stmt;
+ }
+
+ private native long prepareStmtImp(byte[] sql, long con);
+
+ public void setBindTableName(long stmt, String tableName) throws SQLException {
+ int code = setBindTableNameImp(stmt, tableName, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name");
+ }
+ }
+
+ private native int setBindTableNameImp(long stmt, String name, long conn);
+
+ public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException {
+ int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data");
+ }
+ }
+
+ private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn);
+
+ public void executeBatch(long stmt) throws SQLException {
+ int code = executeBatchImp(stmt, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind");
+ }
+ }
+
+ private native int executeBatchImp(long stmt, long con);
+
+ public void closeBatch(long stmt) throws SQLException {
+ int code = closeStmt(stmt, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind");
+ }
+ }
+
+ private native int closeStmt(long stmt, long con);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
index e545bbc8f2..71e07252a3 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
@@ -14,36 +14,44 @@
*****************************************************************************/
package com.taosdata.jdbc;
+import com.taosdata.jdbc.utils.Utils;
+
import java.io.InputStream;
import java.io.Reader;
+import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.net.URL;
-import java.nio.charset.Charset;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
+ * TDengine only supports a subset of the standard SQL, thus this implementation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
-
private String rawSql;
private Object[] parameters;
private boolean isPrepared;
-
+
+ private ArrayList colData;
+ private String tableName;
+ private long nativeStmtHandle = 0;
+
private volatile TSDBParameterMetaData parameterMetaData;
- TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
- super(connection, connecter);
+ TSDBPreparedStatement(TSDBConnection connection, String sql) {
+ super(connection);
init(sql);
+ int parameterCnt = 0;
if (sql.contains("?")) {
- int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
if ('?' == sql.charAt(i)) {
parameterCnt++;
@@ -52,6 +60,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
parameters = new Object[parameterCnt];
this.isPrepared = true;
}
+
+ if (parameterCnt > 1) {
+ // the table name is also a parameter, so ignore it.
+ this.colData = new ArrayList(parameterCnt - 1);
+ this.colData.addAll(Collections.nCopies(parameterCnt - 1, null));
+ }
}
private void init(String sql) {
@@ -126,28 +140,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
* @return a string of the native sql statement for TSDB
*/
private String getNativeSql(String rawSql) throws SQLException {
- String sql = rawSql;
- for (int i = 0; i < parameters.length; ++i) {
- Object para = parameters[i];
- if (para != null) {
- String paraStr;
- if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
- } else {
- paraStr = para.toString();
- }
- // if para is timestamp or String or byte[] need to translate ' character
- if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
- paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
- paraStr = "'" + paraStr + "'";
- }
- sql = sql.replaceFirst("[?]", paraStr);
- } else {
- sql = sql.replaceFirst("[?]", "NULL");
- }
- }
- clearParameters();
- return sql;
+ return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@@ -275,15 +268,19 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- setObject(parameterIndex,x);
+ setObject(parameterIndex, x);
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (parameterIndex < 1 && parameterIndex >= parameters.length)
+ }
+
+ if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
+ }
+
parameters[parameterIndex - 1] = x;
}
@@ -320,9 +317,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-
+ }
+
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -535,4 +533,276 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
+
+ ///////////////////////////////////////////////////////////////////////
+ // NOTE: the following APIs are not JDBC compatible
+ // set the bind table name
+ private static class ColumnInfo {
+ @SuppressWarnings("rawtypes")
+ private ArrayList data;
+ private int type;
+ private int bytes;
+ private boolean typeIsSet;
+
+ public ColumnInfo() {
+ this.typeIsSet = false;
+ }
+
+ public void setType(int type) throws SQLException {
+ if (this.isTypeSet()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type has been set");
+ }
+
+ this.typeIsSet = true;
+ this.type = type;
+ }
+
+ public boolean isTypeSet() {
+ return this.typeIsSet;
+ }
+ };
+
+ public void setTableName(String name) {
+ this.tableName = name;
+ }
+
+ public void setValueImpl(int columnIndex, ArrayList list, int type, int bytes) throws SQLException {
+ ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex);
+ if (col == null) {
+ ColumnInfo p = new ColumnInfo();
+ p.setType(type);
+ p.bytes = bytes;
+ p.data = (ArrayList>) list.clone();
+ this.colData.set(columnIndex, p);
+ } else {
+ if (col.type != type) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch");
+ }
+ col.data.addAll(list);
+ }
+ }
+
+ public void setInt(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT, Integer.BYTES);
+ }
+
+ public void setFloat(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT, Float.BYTES);
+ }
+
+ public void setTimestamp(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP, Long.BYTES);
+ }
+
+ public void setLong(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT, Long.BYTES);
+ }
+
+ public void setDouble(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE, Double.BYTES);
+ }
+
+ public void setBoolean(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL, Byte.BYTES);
+ }
+
+ public void setByte(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT, Byte.BYTES);
+ }
+
+ public void setShort(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT, Short.BYTES);
+ }
+
+ public void setString(int columnIndex, ArrayList list, int size) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY, size);
+ }
+
+ // note: expand the required space for each NChar character
+ public void setNString(int columnIndex, ArrayList list, int size) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES);
+ }
+
+ public void columnDataAddBatch() throws SQLException {
+ // pass the data block to native code
+ if (rawSql == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "sql statement not set yet");
+ }
+
+ // table name is not set yet, abort
+ if (this.tableName == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet");
+ }
+
+ int numOfCols = this.colData.size();
+ if (numOfCols == 0) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ this.nativeStmtHandle = connector.prepareStmt(rawSql);
+ connector.setBindTableName(this.nativeStmtHandle, this.tableName);
+
+ ColumnInfo colInfo = (ColumnInfo) this.colData.get(0);
+ if (colInfo == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ int rows = colInfo.data.size();
+ for (int i = 0; i < numOfCols; ++i) {
+ ColumnInfo col1 = this.colData.get(i);
+ if (col1 == null || !col1.isTypeSet()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ if (rows != col1.data.size()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical");
+ }
+
+ ByteBuffer colDataList = ByteBuffer.allocate(rows * col1.bytes);
+ colDataList.order(ByteOrder.LITTLE_ENDIAN);
+
+ ByteBuffer lengthList = ByteBuffer.allocate(rows * Integer.BYTES);
+ lengthList.order(ByteOrder.LITTLE_ENDIAN);
+
+ ByteBuffer isNullList = ByteBuffer.allocate(rows * Byte.BYTES);
+ isNullList.order(ByteOrder.LITTLE_ENDIAN);
+
+ switch (col1.type) {
+ case TSDBConstants.TSDB_DATA_TYPE_INT: {
+ for (int j = 0; j < rows; ++j) {
+ Integer val = (Integer) col1.data.get(j);
+ colDataList.putInt(val == null? Integer.MIN_VALUE:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
+ for (int j = 0; j < rows; ++j) {
+ Byte val = (Byte) col1.data.get(j);
+ colDataList.put(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
+ for (int j = 0; j < rows; ++j) {
+ Boolean val = (Boolean) col1.data.get(j);
+ if (val == null) {
+ colDataList.put((byte) 0);
+ } else {
+ colDataList.put((byte) (val? 1:0));
+ }
+
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
+ for (int j = 0; j < rows; ++j) {
+ Short val = (Short) col1.data.get(j);
+ colDataList.putShort(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
+ for (int j = 0; j < rows; ++j) {
+ Long val = (Long) col1.data.get(j);
+ colDataList.putLong(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
+ for (int j = 0; j < rows; ++j) {
+ Float val = (Float) col1.data.get(j);
+ colDataList.putFloat(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
+ for (int j = 0; j < rows; ++j) {
+ Double val = (Double) col1.data.get(j);
+ colDataList.putDouble(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
+ case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
+ String charset = TaosGlobalConfig.getCharset();
+ for (int j = 0; j < rows; ++j) {
+ String val = (String) col1.data.get(j);
+
+ colDataList.position(j * col1.bytes); // seek to the correct position
+ if (val != null) {
+ byte[] b = null;
+ try {
+ if (col1.type == TSDBConstants.TSDB_DATA_TYPE_BINARY) {
+ b = val.getBytes();
+ } else {
+ b = val.getBytes(charset);
+ }
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+
+ if (val.length() > col1.bytes) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long");
+ }
+
+ colDataList.put(b);
+ lengthList.putInt(b.length);
+ isNullList.put((byte) 0);
+ } else {
+ lengthList.putInt(0);
+ isNullList.put((byte) 1);
+ }
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
+ case TSDBConstants.TSDB_DATA_TYPE_USMALLINT:
+ case TSDBConstants.TSDB_DATA_TYPE_UINT:
+ case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types");
+ }
+ };
+
+ connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i);
+ }
+ }
+
+ public void columnDataExecuteBatch() throws SQLException {
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ connector.executeBatch(this.nativeStmtHandle);
+ this.columnDataClearBatch();
+ }
+
+ public void columnDataClearBatch() {
+ int size = this.colData.size();
+ this.colData.clear();
+
+ this.colData.addAll(Collections.nCopies(size, null));
+ this.tableName = null; // clear the table name
+ }
+
+ public void columnDataCloseBatch() throws SQLException {
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ connector.closeBatch(this.nativeStmtHandle);
+
+ this.nativeStmtHandle = 0L;
+ this.tableName = null;
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
index 2576a25f0d..aba29d602b 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
@@ -109,6 +109,8 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
public void close() throws SQLException {
if (isClosed)
return;
+ if (this.statement == null)
+ return;
if (this.jniConnector != null) {
int code = this.jniConnector.freeResultSet(this.resultSetPointer);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
@@ -461,12 +463,13 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
}
public boolean isClosed() throws SQLException {
- if (isClosed)
- return true;
- if (jniConnector != null) {
- isClosed = jniConnector.isResultsetClosed();
- }
return isClosed;
+// if (isClosed)
+// return true;
+// if (jniConnector != null) {
+// isClosed = jniConnector.isResultsetClosed();
+// }
+// return isClosed;
}
public String getNString(int columnIndex) throws SQLException {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
index ce5290de66..7b3be5d263 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import com.taosdata.jdbc.utils.NullType;
+
public class TSDBResultSetBlockData {
private int numOfRows = 0;
private int rowIndex = 0;
@@ -164,59 +166,7 @@ public class TSDBResultSetBlockData {
}
}
- private static class NullType {
- private static final byte NULL_BOOL_VAL = 0x2;
- private static final String NULL_STR = "null";
-
- public String toString() {
- return NullType.NULL_STR;
- }
-
- public static boolean isBooleanNull(byte val) {
- return val == NullType.NULL_BOOL_VAL;
- }
-
- private static boolean isTinyIntNull(byte val) {
- return val == Byte.MIN_VALUE;
- }
-
- private static boolean isSmallIntNull(short val) {
- return val == Short.MIN_VALUE;
- }
-
- private static boolean isIntNull(int val) {
- return val == Integer.MIN_VALUE;
- }
-
- private static boolean isBigIntNull(long val) {
- return val == Long.MIN_VALUE;
- }
-
- private static boolean isFloatNull(float val) {
- return Float.isNaN(val);
- }
-
- private static boolean isDoubleNull(double val) {
- return Double.isNaN(val);
- }
-
- private static boolean isBinaryNull(byte[] val, int length) {
- if (length != Byte.BYTES) {
- return false;
- }
-
- return val[0] == 0xFF;
- }
-
- private static boolean isNcharNull(byte[] val, int length) {
- if (length != Integer.BYTES) {
- return false;
- }
-
- return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
- }
-
- }
+
/**
* The original type may not be a string type, but will be converted to by
@@ -488,8 +438,8 @@ public class TSDBResultSetBlockData {
}
try {
- String ss = TaosGlobalConfig.getCharset();
- return new String(dest, ss);
+ String charset = TaosGlobalConfig.getCharset();
+ return new String(dest, charset);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
index 34470fbc4e..618e896a6d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
@@ -84,7 +84,8 @@ public class TSDBResultSetRowData {
data.set(col, value);
}
- public int getInt(int col, int srcType) throws SQLException {
+ @SuppressWarnings("deprecation")
+ public int getInt(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch (srcType) {
@@ -128,7 +129,7 @@ public class TSDBResultSetRowData {
long value = (long) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
- return new Long(value).intValue();
+ return Long.valueOf(value).intValue();
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
index fb20a621b0..d8ba67576d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
@@ -19,8 +19,6 @@ import java.sql.ResultSet;
import java.sql.SQLException;
public class TSDBStatement extends AbstractStatement {
-
- private TSDBJNIConnector connector;
/**
* Status of current statement
*/
@@ -29,29 +27,26 @@ public class TSDBStatement extends AbstractStatement {
private TSDBConnection connection;
private TSDBResultSet resultSet;
- public void setConnection(TSDBConnection connection) {
+ TSDBStatement(TSDBConnection connection) {
this.connection = connection;
}
- TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
- this.connection = connection;
- this.connector = connector;
- }
-
public ResultSet executeQuery(String sql) throws SQLException {
// check if closed
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
+ }
+
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
// execute query
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (this.connector.isUpdateQuery(pSql)) {
- this.connector.freeResultSet(pSql);
+ if (this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY);
}
- TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql);
+ TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql);
res.setBatchFetch(this.connection.getBatchFetch());
return res;
}
@@ -60,14 +55,14 @@ public class TSDBStatement extends AbstractStatement {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (!this.connector.isUpdateQuery(pSql)) {
- this.connector.freeResultSet(pSql);
+ if (!this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE);
}
- int affectedRows = this.connector.getAffectedRows(pSql);
- this.connector.freeResultSet(pSql);
+ int affectedRows = this.connection.getConnector().getAffectedRows(pSql);
+ this.connection.getConnector().freeResultSet(pSql);
return affectedRows;
}
@@ -81,30 +76,29 @@ public class TSDBStatement extends AbstractStatement {
public boolean execute(String sql) throws SQLException {
// check if closed
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
+ }
+
// execute query
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (this.connector.isUpdateQuery(pSql)) {
- this.affectedRows = this.connector.getAffectedRows(pSql);
- this.connector.freeResultSet(pSql);
+ if (this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.affectedRows = this.connection.getConnector().getAffectedRows(pSql);
+ this.connection.getConnector().freeResultSet(pSql);
return false;
}
- this.resultSet = new TSDBResultSet(this, this.connector, pSql);
+ this.resultSet = new TSDBResultSet(this, this.connection.getConnector(), pSql);
this.resultSet.setBatchFetch(this.connection.getBatchFetch());
return true;
}
public ResultSet getResultSet() throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-// long resultSetPointer = connector.getResultSet();
-// TSDBResultSet resSet = null;
-// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
-// resSet = new TSDBResultSet(connector, resultSetPointer);
-// }
+ }
+
return this.resultSet;
}
@@ -115,12 +109,20 @@ public class TSDBStatement extends AbstractStatement {
}
public Connection getConnection() throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (this.connector == null)
+ }
+
+ if (this.connection.getConnector() == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ }
+
return this.connection;
}
+
+ public void setConnection(TSDBConnection connection) {
+ this.connection = connection;
+ }
public boolean isClosed() throws SQLException {
return isClosed;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
index 6efe13561d..a94cfa6e07 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
@@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver {
static {
try {
- java.sql.DriverManager.registerDriver(new RestfulDriver());
+ DriverManager.registerDriver(new RestfulDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
index f82955ca9d..f58e3f8cd2 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
@@ -2,12 +2,12 @@ package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
+import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
-import java.nio.charset.Charset;
import java.sql.*;
import java.util.Calendar;
@@ -21,6 +21,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) {
super(conn, database);
this.rawSql = sql;
+
if (sql.contains("?")) {
int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
@@ -58,29 +59,14 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
return executeUpdate(sql);
}
- private String getNativeSql(String rawSql) throws SQLException {
- String sql = rawSql;
- for (int i = 0; i < parameters.length; ++i) {
- Object para = parameters[i];
- if (para != null) {
- String paraStr;
- if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
- } else {
- paraStr = para.toString();
- }
- // if para is timestamp or String or byte[] need to translate ' character
- if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
- paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
- paraStr = "'" + paraStr + "'";
- }
- sql = sql.replaceFirst("[?]", paraStr);
- } else {
- sql = sql.replaceFirst("[?]", "NULL");
- }
- }
- clearParameters();
- return sql;
+ /****
+ * 将rawSql转换成一条可执行的sql语句,使用属性parameters中的变脸进行替换
+ * 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?)
+ * @param rawSql,可能是insert、select或其他,使用?做占位符
+ * @return
+ */
+ private String getNativeSql(String rawSql) {
+ return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@@ -220,8 +206,8 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-
- setObject(parameterIndex,x);
+
+ setObject(parameterIndex, x);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
index db635f5f79..530b433d42 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
@@ -6,11 +6,13 @@ import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import com.taosdata.jdbc.*;
+import com.taosdata.jdbc.utils.Utils;
import java.math.BigDecimal;
import java.sql.*;
import java.time.Instant;
import java.time.ZoneOffset;
+import java.time.format.DateTimeParseException;
import java.util.ArrayList;
import java.util.Calendar;
@@ -18,14 +20,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
private volatile boolean isClosed;
private int pos = -1;
-
private final String database;
private final Statement statement;
// data
- private final ArrayList> resultSet;
+ private final ArrayList> resultSet = new ArrayList<>();
// meta
- private ArrayList columnNames;
- private ArrayList columns;
+ private ArrayList columnNames = new ArrayList<>();
+ private ArrayList columns = new ArrayList<>();
private RestfulResultSetMetaData metaData;
/**
@@ -37,10 +38,46 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
this.database = database;
this.statement = statement;
- // column metadata
+ // get column metadata
JSONArray columnMeta = resultJson.getJSONArray("column_meta");
- columnNames = new ArrayList<>();
- columns = new ArrayList<>();
+ // get row data
+ JSONArray data = resultJson.getJSONArray("data");
+ if (data == null || data.isEmpty()) {
+ columnNames.clear();
+ columns.clear();
+ this.resultSet.clear();
+ return;
+ }
+ // get head
+ JSONArray head = resultJson.getJSONArray("head");
+ // get rows
+ Integer rows = resultJson.getInteger("rows");
+ // parse column_meta
+ if (columnMeta != null) {
+ parseColumnMeta_new(columnMeta);
+ } else {
+ parseColumnMeta_old(head, data, rows);
+ }
+ this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
+ // parse row data
+ resultSet.clear();
+ for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
+ ArrayList row = new ArrayList();
+ JSONArray jsonRow = data.getJSONArray(rowIndex);
+ for (int colIndex = 0; colIndex < this.metaData.getColumnCount(); colIndex++) {
+ row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type));
+ }
+ resultSet.add(row);
+ }
+ }
+
+ /***
+ * use this method after TDengine-2.0.18.0 to parse column meta, restful add column_meta in resultSet
+ * @Param columnMeta
+ */
+ private void parseColumnMeta_new(JSONArray columnMeta) throws SQLException {
+ columnNames.clear();
+ columns.clear();
for (int colIndex = 0; colIndex < columnMeta.size(); colIndex++) {
JSONArray col = columnMeta.getJSONArray(colIndex);
String col_name = col.getString(0);
@@ -50,23 +87,55 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
columnNames.add(col_name);
columns.add(new Field(col_name, col_type, col_length, "", taos_type));
}
- this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
+ }
- // row data
- JSONArray data = resultJson.getJSONArray("data");
- resultSet = new ArrayList<>();
- for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
- ArrayList row = new ArrayList();
- JSONArray jsonRow = data.getJSONArray(rowIndex);
- for (int colIndex = 0; colIndex < jsonRow.size(); colIndex++) {
- row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type));
+ /**
+ * use this method before TDengine-2.0.18.0 to parse column meta
+ */
+ private void parseColumnMeta_old(JSONArray head, JSONArray data, int rows) {
+ columnNames.clear();
+ columns.clear();
+ for (int colIndex = 0; colIndex < head.size(); colIndex++) {
+ String col_name = head.getString(colIndex);
+ columnNames.add(col_name);
+
+ int col_type = Types.NULL;
+ int col_length = 0;
+ int taos_type = TSDBConstants.TSDB_DATA_TYPE_NULL;
+
+ JSONArray row0Json = data.getJSONArray(0);
+ if (colIndex < row0Json.size()) {
+ Object value = row0Json.get(colIndex);
+ if (value instanceof Boolean) {
+ col_type = Types.BOOLEAN;
+ col_length = 1;
+ taos_type = TSDBConstants.TSDB_DATA_TYPE_BOOL;
+ }
+ if (value instanceof Byte || value instanceof Short || value instanceof Integer || value instanceof Long) {
+ col_type = Types.BIGINT;
+ col_length = 8;
+ taos_type = TSDBConstants.TSDB_DATA_TYPE_BIGINT;
+ }
+ if (value instanceof Float || value instanceof Double || value instanceof BigDecimal) {
+ col_type = Types.DOUBLE;
+ col_length = 8;
+ taos_type = TSDBConstants.TSDB_DATA_TYPE_DOUBLE;
+ }
+ if (value instanceof String) {
+ col_type = Types.NCHAR;
+ col_length = ((String) value).length();
+ taos_type = TSDBConstants.TSDB_DATA_TYPE_NCHAR;
+ }
}
- resultSet.add(row);
+ columns.add(new Field(col_name, col_type, col_length, "", taos_type));
}
}
+
private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException {
switch (taosType) {
+ case TSDBConstants.TSDB_DATA_TYPE_NULL:
+ return null;
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
return row.getBoolean(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
@@ -290,8 +359,10 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return 0;
}
wasNull = false;
- if (value instanceof Float || value instanceof Double)
+ if (value instanceof Float)
return (float) value;
+ if (value instanceof Double)
+ return new Float((Double) value);
return Float.parseFloat(value.toString());
}
@@ -329,6 +400,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return Shorts.toByteArray((short) value);
if (value instanceof Byte)
return new byte[]{(byte) value};
+ if (value instanceof Timestamp) {
+ return Utils.formatTimestamp((Timestamp) value).getBytes();
+ }
return value.toString().getBytes();
}
@@ -342,7 +416,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return new Date(((Timestamp) value).getTime());
- return Date.valueOf(value.toString());
+ Date date = null;
+ date = Utils.parseDate(value.toString());
+ return date;
}
@Override
@@ -354,7 +430,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return new Time(((Timestamp) value).getTime());
- return Time.valueOf(value.toString());
+ Time time = null;
+ try {
+ time = Utils.parseTime(value.toString());
+ } catch (DateTimeParseException e) {
+ time = null;
+ }
+ return time;
}
@Override
@@ -366,14 +448,20 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return (Timestamp) value;
-// if (value instanceof Long) {
-// if (1_0000_0000_0000_0L > (long) value)
-// return Timestamp.from(Instant.ofEpochMilli((long) value));
-// long epochSec = (long) value / 1000_000L;
-// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000);
-// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
-// }
- return Timestamp.valueOf(value.toString());
+ if (value instanceof Long) {
+ if (1_0000_0000_0000_0L > (long) value)
+ return Timestamp.from(Instant.ofEpochMilli((long) value));
+ long epochSec = (long) value / 1000_000L;
+ long nanoAdjustment = (long) value % 1000_000L * 1000;
+ return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
+ }
+ Timestamp ret;
+ try {
+ ret = Utils.parseTimestamp(value.toString());
+ } catch (Exception e) {
+ ret = null;
+ }
+ return ret;
}
@Override
@@ -415,7 +503,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return new BigDecimal(Double.valueOf(value.toString()));
if (value instanceof Timestamp)
return new BigDecimal(((Timestamp) value).getTime());
- return new BigDecimal(value.toString());
+ BigDecimal ret;
+ try {
+ ret = new BigDecimal(value.toString());
+ } catch (Exception e) {
+ ret = null;
+ }
+ return ret;
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index e9cc3a009f..fbc3a50a27 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -136,21 +136,21 @@ public class RestfulStatement extends AbstractStatement {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
}
this.resultSet = null;
- this.affectedRows = checkJsonResultSet(jsonObject);
+ this.affectedRows = getAffectedRows(jsonObject);
return this.affectedRows;
}
- private int checkJsonResultSet(JSONObject jsonObject) {
+ private int getAffectedRows(JSONObject jsonObject) throws SQLException {
// create ... SQLs should return 0 , and Restful result is this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head");
+ if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
JSONArray data = jsonObject.getJSONArray("data");
- int rows = Integer.parseInt(jsonObject.getString("rows"));
- if (head.size() == 1 && "affected_rows".equals(head.getString(0))
- && data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) {
- return 0;
- }
- return rows;
+ if (data != null)
+ return data.getJSONArray(0).getInteger(0);
+
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java
new file mode 100755
index 0000000000..0e05aeeee7
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java
@@ -0,0 +1,91 @@
+package com.taosdata.jdbc.utils;
+
+public class NullType {
+ private static final byte NULL_BOOL_VAL = 0x2;
+ private static final String NULL_STR = "null";
+
+ public String toString() {
+ return NullType.NULL_STR;
+ }
+
+ public static boolean isBooleanNull(byte val) {
+ return val == NullType.NULL_BOOL_VAL;
+ }
+
+ public static boolean isTinyIntNull(byte val) {
+ return val == Byte.MIN_VALUE;
+ }
+
+ public static boolean isSmallIntNull(short val) {
+ return val == Short.MIN_VALUE;
+ }
+
+ public static boolean isIntNull(int val) {
+ return val == Integer.MIN_VALUE;
+ }
+
+ public static boolean isBigIntNull(long val) {
+ return val == Long.MIN_VALUE;
+ }
+
+ public static boolean isFloatNull(float val) {
+ return Float.isNaN(val);
+ }
+
+ public static boolean isDoubleNull(double val) {
+ return Double.isNaN(val);
+ }
+
+ public static boolean isBinaryNull(byte[] val, int length) {
+ if (length != Byte.BYTES) {
+ return false;
+ }
+
+ return val[0] == 0xFF;
+ }
+
+ public static boolean isNcharNull(byte[] val, int length) {
+ if (length != Integer.BYTES) {
+ return false;
+ }
+
+ return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
+ }
+
+ public static byte getBooleanNull() {
+ return NullType.NULL_BOOL_VAL;
+ }
+
+ public static byte getTinyintNull() {
+ return Byte.MIN_VALUE;
+ }
+
+ public static int getIntNull() {
+ return Integer.MIN_VALUE;
+ }
+
+ public static short getSmallIntNull() {
+ return Short.MIN_VALUE;
+ }
+
+ public static long getBigIntNull() {
+ return Long.MIN_VALUE;
+ }
+
+ public static int getFloatNull() {
+ return 0x7FF00000;
+ }
+
+ public static long getDoubleNull() {
+ return 0x7FFFFF0000000000L;
+ }
+
+ public static byte getBinaryNull() {
+ return (byte) 0xFF;
+ }
+
+ public static byte[] getNcharNull() {
+ return new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
+ }
+
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java
deleted file mode 100644
index 04a11a2beb..0000000000
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java
+++ /dev/null
@@ -1,12 +0,0 @@
-package com.taosdata.jdbc.utils;
-
-import java.time.format.DateTimeFormatter;
-import java.time.format.DateTimeFormatterBuilder;
-
-public class UtcTimestampUtil {
- public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
- .appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+")
-// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true)
- .toFormatter();
-
-}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
new file mode 100644
index 0000000000..eeb936a1d0
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
@@ -0,0 +1,188 @@
+package com.taosdata.jdbc.utils;
+
+import com.google.common.collect.Range;
+import com.google.common.collect.RangeSet;
+import com.google.common.collect.TreeRangeSet;
+
+import java.nio.charset.Charset;
+import java.sql.Date;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.format.DateTimeFormatter;
+import java.time.format.DateTimeFormatterBuilder;
+import java.time.format.DateTimeParseException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class Utils {
+
+ private static Pattern ptn = Pattern.compile(".*?'");
+
+ private static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
+ .appendPattern("yyyy-MM-dd HH:mm:ss.SSS").toFormatter();
+ private static final DateTimeFormatter formatter2 = new DateTimeFormatterBuilder()
+ .appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSS").toFormatter();
+
+ public static Time parseTime(String timestampStr) throws DateTimeParseException {
+ LocalTime time;
+ try {
+ time = LocalTime.parse(timestampStr, formatter);
+ } catch (DateTimeParseException e) {
+ time = LocalTime.parse(timestampStr, formatter2);
+ }
+ return Time.valueOf(time);
+ }
+
+ public static Date parseDate(String timestampStr) throws DateTimeParseException {
+ LocalDate date;
+ try {
+ date = LocalDate.parse(timestampStr, formatter);
+ } catch (DateTimeParseException e) {
+ date = LocalDate.parse(timestampStr, formatter2);
+ }
+ return Date.valueOf(date);
+ }
+
+ public static Timestamp parseTimestamp(String timeStampStr) {
+ LocalDateTime dateTime;
+ try {
+ dateTime = LocalDateTime.parse(timeStampStr, formatter);
+ } catch (DateTimeParseException e) {
+ dateTime = LocalDateTime.parse(timeStampStr, formatter2);
+ }
+ return Timestamp.valueOf(dateTime);
+ }
+
+ public static String escapeSingleQuota(String origin) {
+ Matcher m = ptn.matcher(origin);
+ StringBuffer sb = new StringBuffer();
+ int end = 0;
+ while (m.find()) {
+ end = m.end();
+ String seg = origin.substring(m.start(), end);
+ int len = seg.length();
+ if (len == 1) {
+ if ('\'' == seg.charAt(0)) {
+ sb.append("\\'");
+ } else {
+ sb.append(seg);
+ }
+ } else { // len > 1
+ sb.append(seg.substring(0, seg.length() - 2));
+ char lastcSec = seg.charAt(seg.length() - 2);
+ if (lastcSec == '\\') {
+ sb.append("\\'");
+ } else {
+ sb.append(lastcSec);
+ sb.append("\\'");
+ }
+ }
+ }
+
+ if (end < origin.length()) {
+ sb.append(origin.substring(end));
+ }
+ return sb.toString();
+ }
+
+ public static String getNativeSql(String rawSql, Object[] parameters) {
+ // toLowerCase
+ String preparedSql = rawSql.trim().toLowerCase();
+
+ String[] clause = new String[0];
+ if (SqlSyntaxValidator.isInsertSql(preparedSql)) {
+ // insert or import
+ clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"};
+ }
+ if (SqlSyntaxValidator.isSelectSql(preparedSql)) {
+ // select
+ clause = new String[]{"where\\s*.*"};
+ }
+ Map placeholderPositions = new HashMap<>();
+ RangeSet clauseRangeSet = TreeRangeSet.create();
+ findPlaceholderPosition(preparedSql, placeholderPositions);
+ findClauseRangeSet(preparedSql, clause, clauseRangeSet);
+
+ return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
+ }
+
+ private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet clauseRangeSet) {
+ clauseRangeSet.clear();
+ for (String regex : regexArr) {
+ Matcher matcher = Pattern.compile(regex).matcher(preparedSql);
+ while (matcher.find()) {
+ int start = matcher.start();
+ int end = matcher.end();
+ clauseRangeSet.add(Range.closed(start, end));
+ }
+ }
+ }
+
+ private static void findPlaceholderPosition(String preparedSql, Map placeholderPosition) {
+ placeholderPosition.clear();
+ Matcher matcher = Pattern.compile("\\?").matcher(preparedSql);
+ int index = 0;
+ while (matcher.find()) {
+ int pos = matcher.start();
+ placeholderPosition.put(index, pos);
+ index++;
+ }
+ }
+
+ /***
+ *
+ * @param rawSql
+ * @param paramArr
+ * @param placeholderPosition
+ * @param clauseRangeSet
+ * @return
+ */
+ private static String transformSql(String rawSql, Object[] paramArr, Map placeholderPosition, RangeSet clauseRangeSet) {
+ String[] sqlArr = rawSql.split("\\?");
+
+ return IntStream.range(0, sqlArr.length).mapToObj(index -> {
+ if (index == paramArr.length)
+ return sqlArr[index];
+
+ Object para = paramArr[index];
+ String paraStr;
+ if (para != null) {
+ if (para instanceof byte[]) {
+ paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
+ } else {
+ paraStr = para.toString();
+ }
+ // if para is timestamp or String or byte[] need to translate ' character
+ if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
+ paraStr = Utils.escapeSingleQuota(paraStr);
+
+ Integer pos = placeholderPosition.get(index);
+ boolean contains = clauseRangeSet.contains(pos);
+ if (contains) {
+ paraStr = "'" + paraStr + "'";
+ }
+ }
+ } else {
+ paraStr = "NULL";
+ }
+ return sqlArr[index] + paraStr;
+ }).collect(Collectors.joining());
+ }
+
+
+ public static String formatTimestamp(Timestamp timestamp) {
+ int nanos = timestamp.getNanos();
+ if (nanos % 1000000l != 0)
+ return timestamp.toLocalDateTime().format(formatter2);
+ return timestamp.toLocalDateTime().format(formatter);
+ }
+
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index 11c3de3052..24c73fdd5c 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -1,6 +1,7 @@
package com.taosdata.jdbc;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -12,69 +13,76 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeTest {
+
Connection connection;
Statement statement;
String dbName = "test";
String tName = "t0";
String host = "127.0.0.1";
String topic = "test";
-
- @Before
- public void createDatabase() {
- try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- Properties properties = new Properties();
- properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
-
- statement = connection.createStatement();
- statement.execute("drop database if exists " + dbName);
- statement.execute("create database if not exists " + dbName);
- statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
- long ts = System.currentTimeMillis();
- for (int i = 0; i < 2; i++) {
- ts += i;
- String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
- statement.executeUpdate(sql);
- }
-
- } catch (ClassNotFoundException | SQLException e) {
- return;
- }
- }
+ private long ts;
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
- System.out.println(rawSql);
-// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
+ TSDBConnection conn = connection.unwrap(TSDBConnection.class);
+ TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
-// int a = 0;
-// while (true) {
-// TimeUnit.MILLISECONDS.sleep(1000);
-// TSDBResultSet resSet = subscribe.consume();
-// while (resSet.next()) {
-// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
-// System.out.printf(i + ": " + resSet.getString(i) + "\t");
-// }
-// System.out.println("\n======" + a + "==========");
-// }
-// a++;
-// if (a >= 2) {
-// break;
-// }
-// resSet.close();
-// }
-//
-// subscribe.close(true);
- } catch (Exception e) {
- e.printStackTrace();
+ for (int j = 0; j < 10; j++) {
+ TimeUnit.SECONDS.sleep(1);
+ TSDBResultSet resSet = subscribe.consume();
+
+ int rowCnt = 0;
+ while (resSet.next()) {
+ if (rowCnt == 0) {
+ long cur_ts = resSet.getTimestamp(1).getTime();
+ int k = resSet.getInt(2);
+ int v = resSet.getInt(3);
+ Assert.assertEquals(ts, cur_ts);
+ Assert.assertEquals(100, k);
+ Assert.assertEquals(1, v);
+ }
+ if (rowCnt == 1) {
+ long cur_ts = resSet.getTimestamp(1).getTime();
+ int k = resSet.getInt(2);
+ int v = resSet.getInt(3);
+ Assert.assertEquals(ts + 1, cur_ts);
+ Assert.assertEquals(101, k);
+ Assert.assertEquals(2, v);
+
+ }
+ rowCnt++;
+ }
+ if (j == 0)
+ Assert.assertEquals(2, rowCnt);
+ resSet.close();
+ }
+ subscribe.close(true);
+
+
+ } catch (SQLException | InterruptedException throwables) {
+ throwables.printStackTrace();
}
}
+ @Before
+ public void createDatabase() throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
+
+ statement = connection.createStatement();
+ statement.execute("drop database if exists " + dbName);
+ statement.execute("create database if not exists " + dbName);
+ statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
+ ts = System.currentTimeMillis();
+ statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
+ statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
+ }
+
@After
public void close() {
try {
@@ -86,6 +94,5 @@ public class SubscribeTest {
} catch (SQLException e) {
e.printStackTrace();
}
-
}
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
index dc6fd4c501..8804cc5da0 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
@@ -8,6 +8,8 @@ import org.junit.Test;
import java.io.IOException;
import java.io.Serializable;
import java.sql.*;
+import java.util.ArrayList;
+import java.util.Random;
public class TSDBPreparedStatementTest {
private static final String host = "127.0.0.1";
@@ -97,6 +99,118 @@ public class TSDBPreparedStatementTest {
Assert.assertEquals(1, result);
}
+ @Test
+ public void executeTest() throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ int numOfRows = 1000;
+
+ for (int loop = 0; loop < 10; loop++){
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))");
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?)");
+ Random r = new Random();
+ s.setTableName("weather_test");
+
+ ArrayList ts = new ArrayList();
+ for(int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ int random = 10 + r.nextInt(5);
+ ArrayList s2 = new ArrayList();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ s2.add(null);
+ }else{
+ s2.add("分支" + i % 4);
+ }
+ }
+ s.setNString(1, s2, 4);
+
+ random = 10 + r.nextInt(5);
+ ArrayList s3 = new ArrayList();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ s3.add(null);
+ }else{
+ s3.add(r.nextFloat());
+ }
+ }
+ s.setFloat(2, s3);
+
+ random = 10 + r.nextInt(5);
+ ArrayList s4 = new ArrayList();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ s4.add(null);
+ }else{
+ s4.add(r.nextDouble());
+ }
+ }
+ s.setDouble(3, s4);
+
+ random = 10 + r.nextInt(5);
+ ArrayList ts2 = new ArrayList();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ ts2.add(null);
+ }else{
+ ts2.add(System.currentTimeMillis() + i);
+ }
+ }
+ s.setTimestamp(4, ts2);
+
+ random = 10 + r.nextInt(5);
+ ArrayList vals = new ArrayList<>();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ vals.add(null);
+ }else{
+ vals.add(r.nextInt());
+ }
+ }
+ s.setInt(5, vals);
+
+ random = 10 + r.nextInt(5);
+ ArrayList sb = new ArrayList<>();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ sb.add(null);
+ }else{
+ sb.add(i % 2 == 0 ? true : false);
+ }
+ }
+ s.setBoolean(6, sb);
+
+ random = 10 + r.nextInt(5);
+ ArrayList s5 = new ArrayList();
+ for(int i = 0; i < numOfRows; i++) {
+ if(i % random == 0) {
+ s5.add(null);
+ }else{
+ s5.add("test" + i % 10);
+ }
+ }
+ s.setString(7, s5, 10);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+
+ String sql = "select * from weather_test";
+ PreparedStatement statement = conn.prepareStatement(sql);
+ ResultSet rs = statement.executeQuery();
+ int rows = 0;
+ while(rs.next()) {
+ rows++;
+ }
+ Assert.assertEquals(numOfRows, rows);
+ }
+ }
+
@Test
public void setBoolean() throws SQLException {
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
index c5c6f7bca5..f304fd6874 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
@@ -3,7 +3,6 @@ package com.taosdata.jdbc;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
-import com.taosdata.jdbc.rs.RestfulResultSet;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -177,7 +176,8 @@ public class TSDBResultSetTest {
rs.getAsciiStream("f1");
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @SuppressWarnings("deprecation")
+ @Test(expected = SQLFeatureNotSupportedException.class)
public void getUnicodeStream() throws SQLException {
rs.getUnicodeStream("f1");
}
@@ -326,7 +326,7 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void getRow() throws SQLException {
- int row = rs.getRow();
+ rs.getRow();
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -405,12 +405,12 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateByte() throws SQLException {
- rs.updateByte(1, new Byte("0"));
+ rs.updateByte(1, (byte) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateShort() throws SQLException {
- rs.updateShort(1, new Short("0"));
+ rs.updateShort(1, (short) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
new file mode 100644
index 0000000000..4b4e83719f
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
@@ -0,0 +1,401 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class InsertSpecialCharacterJniTest {
+
+ private static final String host = "127.0.0.1";
+ private static Connection conn;
+ private static String dbName = "spec_char_test";
+ private static String tbname1 = "test";
+ private static String tbname2 = "weather";
+ private static String special_character_str_1 = "$asd$$fsfsf$";
+ private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
+ private static String special_character_str_3 = "\\\\asdfsfsf\\";
+ private static String special_character_str_4 = "?asd??fsf?sf?";
+ private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
+
+ @Test
+ public void testCase01() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_1.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from ?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, tbname1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_1, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+
+ @Test
+ public void testCase02() throws SQLException {
+ //TODO:
+ // Expected :\asdfsfsf\\
+ // Actual :\asdfsfsf\
+
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_2.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ //TODO: bug to be fixed
+// Assert.assertEquals(special_character_str_2, f1);
+ Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase03() throws SQLException {
+ //TODO:
+ // TDengine ERROR (216): Syntax error in SQL
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_3.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_3, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase04() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase05() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase06() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_4);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query t1
+ final String query = "select * from t1";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase07() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ pstmt.setString(3, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase08() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_5);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase09() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ // query t1
+ String query = "select * from t?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t2";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test
+ public void testCase10() throws SQLException {
+ final long now = System.currentTimeMillis();
+
+ // insert
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ //query t1
+ String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(4, new Timestamp(0));
+ pstmt.setString(5, "f1");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ byte[] f2 = rs.getBytes(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 2);
+ pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(3, new Timestamp(0));
+ pstmt.setString(4, "f2");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase11() throws SQLException {
+ final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
+ final long now = System.currentTimeMillis();
+
+ final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setTimestamp(2, new Timestamp(now));
+ pstmt.setBytes(3, speicalCharacterStr.getBytes());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+
+ @Test
+ public void testCase12() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setString(2, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals("HelloTDengine", f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Before
+ public void before() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tbname1 + "");
+ stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
+ stmt.execute("drop table if exists " + tbname2);
+ stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (conn != null)
+ conn.close();
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
new file mode 100644
index 0000000000..fa6cbd22b5
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
@@ -0,0 +1,400 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class InsertSpecialCharacterRestfulTest {
+
+ private static final String host = "127.0.0.1";
+ private static Connection conn;
+ private static String dbName = "spec_char_test";
+ private static String tbname1 = "test";
+ private static String tbname2 = "weather";
+ private static String special_character_str_1 = "$asd$$fsfsf$";
+ private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
+ private static String special_character_str_3 = "\\\\asdfsfsf\\";
+ private static String special_character_str_4 = "?asd??fsf?sf?";
+ private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
+
+ @Test
+ public void testCase01() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_1.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from ?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, tbname1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_1, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+
+ @Test
+ public void testCase02() throws SQLException {
+ //TODO:
+ // Expected :\asdfsfsf\
+ // Actual :\asdfsfsf\
+
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_2.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ //TODO: bug to be fixed
+// Assert.assertEquals(special_character_str_2, f1);
+ Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase03() throws SQLException {
+ //TODO:
+ // TDengine ERROR (216): Syntax error in SQL
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_3.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_3, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase04() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase05() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase06() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_4);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query t1
+ final String query = "select * from t1";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase07() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ pstmt.setString(3, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase08() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_5);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase09() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ // query t1
+ String query = "select * from t?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t2";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test
+ public void testCase10() throws SQLException {
+ final long now = System.currentTimeMillis();
+
+ // insert
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ //query t1
+ String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(4, new Timestamp(0));
+ pstmt.setString(5, "f1");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ byte[] f2 = rs.getBytes(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 2);
+ pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(3, new Timestamp(0));
+ pstmt.setString(4, "f2");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase11() throws SQLException {
+ final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
+ final long now = System.currentTimeMillis();
+
+ final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setTimestamp(2, new Timestamp(now));
+ pstmt.setBytes(3, speicalCharacterStr.getBytes());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase12() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setString(2, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals("HelloTDengine", f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Before
+ public void before() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tbname1 + "");
+ stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
+ stmt.execute("drop table if exists " + tbname2);
+ stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (conn != null)
+ conn.close();
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4144Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4144Test.java
new file mode 100644
index 0000000000..6f29f64111
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4144Test.java
@@ -0,0 +1,105 @@
+package com.taosdata.jdbc.cases;
+
+import com.taosdata.jdbc.TSDBConnection;
+import com.taosdata.jdbc.TSDBDriver;
+import com.taosdata.jdbc.TSDBResultSet;
+import com.taosdata.jdbc.TSDBSubscribe;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.sql.DriverManager;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+public class TD4144Test {
+
+ private static TSDBConnection connection;
+ private static final String host = "127.0.0.1";
+
+ private static final String topic = "topic-meter-current-bg-10";
+ private static final String sql = "select * from meters where current > 10";
+ private static final String sql2 = "select * from meters where ts >= '2020-08-15 12:20:00.000'";
+
+
+ @Test
+ public void test() throws SQLException {
+ TSDBSubscribe subscribe = null;
+ TSDBResultSet res = null;
+ boolean hasNext = false;
+
+ try {
+ subscribe = connection.subscribe(topic, sql, false);
+ int count = 0;
+ while (true) {
+ // 等待1秒,避免频繁调用 consume,给服务端造成压力
+ TimeUnit.SECONDS.sleep(1);
+ if (res == null) {
+ // 消费数据
+ res = subscribe.consume();
+ hasNext = res.next();
+ }
+
+ if (res == null) {
+ continue;
+ }
+ ResultSetMetaData metaData = res.getMetaData();
+ int number = 0;
+ while (hasNext) {
+ int columnCount = metaData.getColumnCount();
+ for (int i = 1; i <= columnCount; i++) {
+ System.out.print(metaData.getColumnLabel(i) + ": " + res.getString(i) + "\t");
+ }
+ System.out.println();
+ count++;
+ number++;
+ hasNext = res.next();
+ if (!hasNext) {
+ res.close();
+ res = null;
+ System.out.println("rows: " + count);
+ }
+ if (hasNext == true && number >= 10) {
+ System.out.println("batch" + number);
+ break;
+ }
+ }
+
+ }
+
+ } catch (SQLException | InterruptedException throwables) {
+ throwables.printStackTrace();
+ } finally {
+ if (subscribe != null)
+ subscribe.close(true);
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ connection = (DriverManager.getConnection(url, properties)).unwrap(TSDBConnection.class);
+ try (Statement stmt = connection.createStatement()) {
+ stmt.execute("drop database if exists power");
+ stmt.execute("create database if not exists power");
+ stmt.execute("use power");
+ stmt.execute("create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int)");
+ stmt.execute("create table d1001 using meters tags(\"Beijing.Chaoyang\", 2)");
+ stmt.execute("create table d1002 using meters tags(\"Beijing.Haidian\", 2)");
+ stmt.execute("insert into d1001 values(\"2020-08-15 12:00:00.000\", 12, 220, 1),(\"2020-08-15 12:10:00.000\", 12.3, 220, 2),(\"2020-08-15 12:20:00.000\", 12.2, 220, 1)");
+ stmt.execute("insert into d1002 values(\"2020-08-15 12:00:00.000\", 9.9, 220, 1),(\"2020-08-15 12:10:00.000\", 10.3, 220, 1),(\"2020-08-15 12:20:00.000\", 11.2, 220, 1)");
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (connection != null)
+ connection.close();
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4174Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4174Test.java
new file mode 100644
index 0000000000..2704d4cfa5
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4174Test.java
@@ -0,0 +1,62 @@
+package com.taosdata.jdbc.cases;
+
+import com.alibaba.fastjson.JSONObject;
+import com.taosdata.jdbc.TSDBDriver;
+import org.junit.*;
+
+import java.sql.*;
+import java.util.Properties;
+
+public class TD4174Test {
+ private Connection conn;
+ private static final String host = "127.0.0.1";
+
+ @Test
+ public void test() {
+ long ts = System.currentTimeMillis();
+ try (PreparedStatement pstmt = conn.prepareStatement("insert into weather values(" + ts + ", ?)")) {
+ JSONObject value = new JSONObject();
+ value.put("name", "John Smith");
+ value.put("age", 20);
+ Assert.assertEquals("{\"name\":\"John Smith\",\"age\":20}",value.toJSONString());
+ pstmt.setString(1, value.toJSONString());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String[] args) {
+ JSONObject value = new JSONObject();
+ value.put("name", "John Smith");
+ value.put("age", 20);
+ System.out.println(value.toJSONString());
+ }
+
+ @Before
+ public void before() throws SQLException {
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+
+ conn = DriverManager.getConnection(url, properties);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists td4174");
+ stmt.execute("create database if not exists td4174");
+ stmt.execute("use td4174");
+ stmt.execute("create table weather(ts timestamp, text binary(64))");
+ }
+ }
+
+ @After
+ public void after() throws SQLException {
+ if (conn != null)
+ conn.close();
+
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java
index ed4f979ef3..5c83b5a9da 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java
@@ -13,6 +13,7 @@ import java.util.Properties;
public class TwoTypeTimestampPercisionInRestfulTest {
private static final String host = "127.0.0.1";
+
private static final String ms_timestamp_db = "ms_precision_test";
private static final String us_timestamp_db = "us_precision_test";
private static final long timestamp1 = System.currentTimeMillis();
@@ -94,7 +95,8 @@ public class TwoTypeTimestampPercisionInRestfulTest {
try (Statement stmt = conn3.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
rs.next();
- long ts = rs.getTimestamp(1).getTime();
+ Timestamp actual = rs.getTimestamp(1);
+ long ts = actual == null ? 0 : actual.getTime();
Assert.assertEquals(timestamp1, ts);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
@@ -110,7 +112,7 @@ public class TwoTypeTimestampPercisionInRestfulTest {
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
- long ts = timestamp.getTime();
+ long ts = timestamp == null ? 0 : timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java
index d1816a3e7c..fb23c0e64a 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java
@@ -9,19 +9,19 @@ import java.util.Properties;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class UnsignedNumberJniTest {
+
private static final String host = "127.0.0.1";
private static Connection conn;
+ private static long ts;
@Test
public void testCase001() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
+ Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals("127", rs.getString(2));
Assert.assertEquals("32767", rs.getString(3));
Assert.assertEquals("2147483647", rs.getString(4));
@@ -37,13 +37,10 @@ public class UnsignedNumberJniTest {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
+
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
@@ -61,16 +58,14 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,2147483647, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
+ Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getLong(5);
}
}
}
@@ -82,15 +77,15 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
+
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
+ Assert.assertEquals("4294967294", rs.getString(4));
+ Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getInt(4);
}
}
}
@@ -102,15 +97,15 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
- while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ assertResultSetMetaData(meta);
+ while (rs.next()) {
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
+ Assert.assertEquals("65534", rs.getString(3));
+ Assert.assertEquals("4294967294", rs.getString(4));
+ Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getShort(3);
}
}
}
@@ -122,37 +117,27 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
- while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
- }
- }
- }
+ assertResultSetMetaData(meta);
- @Test
- public void testCase007() throws SQLException {
- try (Statement stmt = conn.createStatement()) {
- long now = System.currentTimeMillis();
- stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
- ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
- ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals("254", rs.getString(2));
Assert.assertEquals("65534", rs.getString(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getByte(2);
}
}
}
+ private void assertResultSetMetaData(ResultSetMetaData meta) throws SQLException {
+ Assert.assertEquals(5, meta.getColumnCount());
+ Assert.assertEquals("ts", meta.getColumnLabel(1));
+ Assert.assertEquals("f1", meta.getColumnLabel(2));
+ Assert.assertEquals("f2", meta.getColumnLabel(3));
+ Assert.assertEquals("f3", meta.getColumnLabel(4));
+ Assert.assertEquals("f4", meta.getColumnLabel(5));
+ }
@BeforeClass
public static void beforeClass() {
@@ -160,20 +145,19 @@ public class UnsignedNumberJniTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ ts = System.currentTimeMillis();
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
conn = DriverManager.getConnection(url, properties);
-
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists unsign_jni");
stmt.execute("create database if not exists unsign_jni");
stmt.execute("use unsign_jni");
stmt.execute("create table us_table(ts timestamp, f1 tinyint unsigned, f2 smallint unsigned, f3 int unsigned, f4 bigint unsigned)");
- stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(now, 127, 32767,2147483647, 9223372036854775807)");
+ stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + ts + ", 127, 32767,2147483647, 9223372036854775807)");
stmt.close();
- } catch (ClassNotFoundException | SQLException e) {
+ } catch (SQLException e) {
e.printStackTrace();
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java
index 4ae2f36fe9..a659a490cb 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java
@@ -13,17 +13,20 @@ public class UnsignedNumberRestfulTest {
private static final String host = "127.0.0.1";
private static Connection conn;
+ private static long ts;
@Test
public void testCase001() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
+ Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
+ Assert.assertEquals("127", rs.getString(2));
+ Assert.assertEquals("32767", rs.getString(3));
+ Assert.assertEquals("2147483647", rs.getString(4));
+ Assert.assertEquals("9223372036854775807", rs.getString(5));
}
} catch (SQLException e) {
e.printStackTrace();
@@ -35,13 +38,14 @@ public class UnsignedNumberRestfulTest {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
+
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
+ Assert.assertEquals(127, rs.getByte(2));
+ Assert.assertEquals(32767, rs.getShort(3));
+ Assert.assertEquals(2147483647, rs.getInt(4));
+ Assert.assertEquals(9223372036854775807l, rs.getLong(5));
}
} catch (SQLException e) {
e.printStackTrace();
@@ -55,13 +59,14 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,2147483647, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
+ Assert.assertEquals(127, rs.getByte(2));
+ Assert.assertEquals(32767, rs.getShort(3));
+ Assert.assertEquals(2147483647, rs.getInt(4));
+ Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getLong(5);
}
}
}
@@ -73,13 +78,15 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
+
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
+ Assert.assertEquals(127, rs.getByte(2));
+ Assert.assertEquals(32767, rs.getShort(3));
+ Assert.assertEquals("4294967294", rs.getString(4));
+ Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getInt(4);
}
}
}
@@ -91,13 +98,15 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
+ assertResultSetMetaData(meta);
+
while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
+ Assert.assertEquals(127, rs.getByte(2));
+ Assert.assertEquals("65534", rs.getString(3));
+ Assert.assertEquals("4294967294", rs.getString(4));
+ Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getShort(3);
}
}
}
@@ -109,57 +118,47 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
- while (rs.next()) {
- System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
- System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
- System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
- System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
- System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
- System.out.println();
- }
- }
- }
+ assertResultSetMetaData(meta);
- @Test
- public void testCase007() throws SQLException {
- try (Statement stmt = conn.createStatement()) {
- long now = System.currentTimeMillis();
- stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
- ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
- ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
+ Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals("254", rs.getString(2));
Assert.assertEquals("65534", rs.getString(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
+ rs.getByte(2);
}
}
}
+ private void assertResultSetMetaData(ResultSetMetaData meta) throws SQLException {
+ Assert.assertEquals(5, meta.getColumnCount());
+ Assert.assertEquals("ts", meta.getColumnLabel(1));
+ Assert.assertEquals("f1", meta.getColumnLabel(2));
+ Assert.assertEquals("f2", meta.getColumnLabel(3));
+ Assert.assertEquals("f3", meta.getColumnLabel(4));
+ Assert.assertEquals("f4", meta.getColumnLabel(5));
+ }
+
@BeforeClass
public static void beforeClass() {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ ts = System.currentTimeMillis();
try {
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn = DriverManager.getConnection(url, properties);
-
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists unsign_restful");
stmt.execute("create database if not exists unsign_restful");
stmt.execute("use unsign_restful");
stmt.execute("create table us_table(ts timestamp, f1 tinyint unsigned, f2 smallint unsigned, f3 int unsigned, f4 bigint unsigned)");
- stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(now, 127, 32767,2147483647, 9223372036854775807)");
+ stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + ts + ", 127, 32767,2147483647, 9223372036854775807)");
stmt.close();
- } catch (ClassNotFoundException | SQLException e) {
+ } catch (SQLException e) {
e.printStackTrace();
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
index 40956a601f..ee457ff412 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
@@ -6,7 +6,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
-import java.io.Serializable;
import java.sql.*;
public class RestfulPreparedStatementTest {
@@ -371,7 +370,6 @@ public class RestfulPreparedStatementTest {
pstmt_insert.setSQLXML(1, null);
}
-
@BeforeClass
public static void beforeClass() {
try {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java
index 9bfe9a04ff..81e762c5ca 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java
@@ -18,7 +18,6 @@ import java.text.SimpleDateFormat;
public class RestfulResultSetTest {
private static final String host = "127.0.0.1";
-
private static Connection conn;
private static Statement stmt;
private static ResultSet rs;
@@ -95,7 +94,8 @@ public class RestfulResultSetTest {
@Test
public void getBigDecimal() throws SQLException {
BigDecimal f1 = rs.getBigDecimal("f1");
- Assert.assertEquals(1609430400000l, f1.longValue());
+ long actual = (f1 == null) ? 0 : f1.longValue();
+ Assert.assertEquals(1609430400000l, actual);
BigDecimal f2 = rs.getBigDecimal("f2");
Assert.assertEquals(1, f2.intValue());
@@ -119,7 +119,7 @@ public class RestfulResultSetTest {
@Test
public void getBytes() throws SQLException {
byte[] f1 = rs.getBytes("f1");
- Assert.assertEquals("2021-01-01 00:00:00.0", new String(f1));
+ Assert.assertEquals("2021-01-01 00:00:00.000", new String(f1));
byte[] f2 = rs.getBytes("f2");
Assert.assertEquals(1, Ints.fromByteArray(f2));
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
new file mode 100644
index 0000000000..c861ef2966
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
@@ -0,0 +1,24 @@
+package com.taosdata.jdbc.utils;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class UtilsTest {
+
+ @Test
+ public void escapeSingleQuota() {
+ String s = "'''''a\\'";
+ String news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+
+ s = "\'''''a\\'";
+ news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+
+ s = "\'\'\'\''a\\'";
+ news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+ }
+}
\ No newline at end of file
diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js
index 43a08a800a..f3961e3787 100644
--- a/src/connector/nodejs/nodetaos/cinterface.js
+++ b/src/connector/nodejs/nodetaos/cinterface.js
@@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
const ArrayType = require('ref-array-napi');
const Struct = require('ref-struct-napi');
const FieldTypes = require('./constants');
-const errors = require ('./error');
+const errors = require('./error');
const TaosObjects = require('./taosobjects');
const { NULL_POINTER } = require('ref-napi');
@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
return new TaosObjects.TaosTimestamp(time * 0.001, true);
}
-function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
timestampConverter = convertMillisecondsToDatetime;
if (micro == true) {
timestampConverter = convertMicrosecondsToDatetime;
@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
}
return res;
}
-function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = new Array(data.length);
for (let i = 0; i < data.length; i++) {
if (data[i] == 0) {
res[i] = false;
}
- else if (data[i] == 1){
+ else if (data[i] == 1) {
res[i] = true;
}
else if (data[i] == FieldTypes.C_BOOL_NULL) {
@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return res;
}
-function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
while (currOffset < data.length) {
- let d = data.readIntLE(currOffset,1);
+ let d = data.readIntLE(currOffset, 1);
res.push(d == FieldTypes.C_TINYINT_NULL ? null : d);
currOffset += nbytes;
}
return res;
}
-function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
while (currOffset < data.length) {
- let d = data.readIntLE(currOffset,2);
+ let d = data.readIntLE(currOffset, 2);
res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d);
currOffset += nbytes;
}
return res;
}
-function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return res;
}
-function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return res;
}
-function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return res;
}
-function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return res;
}
-function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return res;
}
-function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
+function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
// Object with all the relevant converters from pblock data to javascript readable data
let convertFunctions = {
- [FieldTypes.C_BOOL] : convertBool,
- [FieldTypes.C_TINYINT] : convertTinyint,
- [FieldTypes.C_SMALLINT] : convertSmallint,
- [FieldTypes.C_INT] : convertInt,
- [FieldTypes.C_BIGINT] : convertBigint,
- [FieldTypes.C_FLOAT] : convertFloat,
- [FieldTypes.C_DOUBLE] : convertDouble,
- [FieldTypes.C_BINARY] : convertBinary,
- [FieldTypes.C_TIMESTAMP] : convertTimestamp,
- [FieldTypes.C_NCHAR] : convertNchar
+ [FieldTypes.C_BOOL]: convertBool,
+ [FieldTypes.C_TINYINT]: convertTinyint,
+ [FieldTypes.C_SMALLINT]: convertSmallint,
+ [FieldTypes.C_INT]: convertInt,
+ [FieldTypes.C_BIGINT]: convertBigint,
+ [FieldTypes.C_FLOAT]: convertFloat,
+ [FieldTypes.C_DOUBLE]: convertDouble,
+ [FieldTypes.C_BINARY]: convertBinary,
+ [FieldTypes.C_TIMESTAMP]: convertTimestamp,
+ [FieldTypes.C_NCHAR]: convertNchar
}
// Define TaosField structure
var char_arr = ArrayType(ref.types.char);
var TaosField = Struct({
- 'name': char_arr,
- });
+ 'name': char_arr,
+});
TaosField.fields.name.type.size = 65;
TaosField.defineProperty('type', ref.types.char);
TaosField.defineProperty('bytes', ref.types.short);
@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
* access this class directly and use it unless you understand what these functions do.
*/
-function CTaosInterface (config = null, pass = false) {
+function CTaosInterface(config = null, pass = false) {
ref.types.char_ptr = ref.refType(ref.types.char);
ref.types.void_ptr = ref.refType(ref.types.void);
ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
taoslibname = 'libtaos';
}
this.libtaos = ffi.Library(taoslibname, {
- 'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ],
- 'taos_init': [ ref.types.void, [ ] ],
+ 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]],
+ 'taos_init': [ref.types.void, []],
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
- 'taos_connect': [ ref.types.void_ptr, [ ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int ] ],
+ 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]],
//void taos_close(TAOS *taos)
- 'taos_close': [ ref.types.void, [ ref.types.void_ptr ] ],
- //int *taos_fetch_lengths(TAOS_RES *taos);
- 'taos_fetch_lengths': [ ref.types.void_ptr, [ ref.types.void_ptr ] ],
+ 'taos_close': [ref.types.void, [ref.types.void_ptr]],
+ //int *taos_fetch_lengths(TAOS_RES *res);
+ 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]],
//int taos_query(TAOS *taos, char *sqlstr)
- 'taos_query': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr ] ],
- //int taos_affected_rows(TAOS *taos)
- 'taos_affected_rows': [ ref.types.int, [ ref.types.void_ptr] ],
+ 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]],
+ //int taos_affected_rows(TAOS_RES *res)
+ 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]],
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
- 'taos_fetch_block': [ ref.types.int, [ ref.types.void_ptr, ref.types.void_ptr] ],
+ 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]],
//int taos_num_fields(TAOS_RES *res);
- 'taos_num_fields': [ ref.types.int, [ ref.types.void_ptr] ],
+ 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]],
//TAOS_ROW taos_fetch_row(TAOS_RES *res)
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row
- 'taos_fetch_row': [ ref.refType(ref.types.void_ptr2), [ ref.types.void_ptr ] ],
+ 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]],
+ 'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
//int taos_result_precision(TAOS_RES *res)
- 'taos_result_precision': [ ref.types.int, [ ref.types.void_ptr ] ],
+ 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]],
//void taos_free_result(TAOS_RES *res)
- 'taos_free_result': [ ref.types.void, [ ref.types.void_ptr] ],
+ 'taos_free_result': [ref.types.void, [ref.types.void_ptr]],
//int taos_field_count(TAOS *taos)
- 'taos_field_count': [ ref.types.int, [ ref.types.void_ptr ] ],
+ 'taos_field_count': [ref.types.int, [ref.types.void_ptr]],
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
- 'taos_fetch_fields': [ ref.refType(TaosField), [ ref.types.void_ptr ] ],
+ 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]],
//int taos_errno(TAOS *taos)
- 'taos_errno': [ ref.types.int, [ ref.types.void_ptr] ],
+ 'taos_errno': [ref.types.int, [ref.types.void_ptr]],
//char *taos_errstr(TAOS *taos)
- 'taos_errstr': [ ref.types.char_ptr, [ ref.types.void_ptr] ],
+ 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]],
//void taos_stop_query(TAOS_RES *res);
- 'taos_stop_query': [ ref.types.void, [ ref.types.void_ptr] ],
+ 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]],
//char *taos_get_server_info(TAOS *taos);
- 'taos_get_server_info': [ ref.types.char_ptr, [ ref.types.void_ptr ] ],
+ 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]],
//char *taos_get_client_info();
- 'taos_get_client_info': [ ref.types.char_ptr, [ ] ],
+ 'taos_get_client_info': [ref.types.char_ptr, []],
// ASYNC
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
- 'taos_query_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr ] ],
+ 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]],
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
- 'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]],
+ 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]],
// Subscription
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
- 'taos_subscribe': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int] ],
+ 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
// TAOS_RES *taos_consume(TAOS_SUB *tsub)
- 'taos_consume': [ ref.types.void_ptr, [ref.types.void_ptr] ],
+ 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]],
//void taos_unsubscribe(TAOS_SUB *tsub);
- 'taos_unsubscribe': [ ref.types.void, [ ref.types.void_ptr ] ],
+ 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]],
// Continuous Query
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
// int64_t stime, void *param, void (*callback)(void *));
- 'taos_open_stream': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr ] ],
+ 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]],
//void taos_close_stream(TAOS_STREAM *tstr);
- 'taos_close_stream': [ ref.types.void, [ ref.types.void_ptr ] ]
+ 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]]
});
if (pass == false) {
@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
try {
this._config = ref.allocCString(config);
}
- catch(err){
+ catch (err) {
throw "Attribute Error: config is expected as a str";
}
}
@@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) {
return this;
}
CTaosInterface.prototype.config = function config() {
- return this._config;
- }
-CTaosInterface.prototype.connect = function connect(host=null, user="root", password="taosdata", db=null, port=0) {
- let _host,_user,_password,_db,_port;
- try {
+ return this._config;
+}
+CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
+ let _host, _user, _password, _db, _port;
+ try {
_host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL);
}
- catch(err) {
+ catch (err) {
throw "Attribute Error: host is expected as a str";
}
try {
_user = ref.allocCString(user)
}
- catch(err) {
+ catch (err) {
throw "Attribute Error: user is expected as a str";
}
try {
_password = ref.allocCString(password);
}
- catch(err) {
+ catch (err) {
throw "Attribute Error: password is expected as a str";
}
try {
_db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL);
}
- catch(err) {
+ catch (err) {
throw "Attribute Error: db is expected as a str";
}
try {
_port = ref.alloc(ref.types.int, port);
}
- catch(err) {
+ catch (err) {
throw TypeError("port is expected as an int")
}
let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port);
@@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) {
console.log("Connection is closed");
}
CTaosInterface.prototype.query = function query(connection, sql) {
- return this.libtaos.taos_query(connection, ref.allocCString(sql));
+ return this.libtaos.taos_query(connection, ref.allocCString(sql));
}
-CTaosInterface.prototype.affectedRows = function affectedRows(connection) {
- return this.libtaos.taos_affected_rows(connection);
+CTaosInterface.prototype.affectedRows = function affectedRows(result) {
+ return this.libtaos.taos_affected_rows(result);
}
CTaosInterface.prototype.useResult = function useResult(result) {
@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0);
for (let i = 0; i < pfields.length; i += 68) {
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
- fields.push( {
- name: ref.readCString(ref.reinterpret(pfields,65,i)),
+ fields.push({
+ name: ref.readCString(ref.reinterpret(pfields, 65, i)),
type: pfields[i + 65],
bytes: pfields[i + 66]
})
@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
return fields;
}
CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
- //let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
- let pblock = this.libtaos.taos_fetch_row(result);
- let num_of_rows = 1;
- if (ref.isNull(pblock) == true) {
- return {block:null, num_of_rows:0};
+ let pblock = ref.NULL_POINTER;
+ let num_of_rows = this.libtaos.taos_fetch_block(result, pblock);
+ if (ref.isNull(pblock.deref()) == true) {
+ return { block: null, num_of_rows: 0 };
}
var fieldL = this.libtaos.taos_fetch_lengths(result);
@@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
var fieldlens = [];
-
+
if (ref.isNull(fieldL) == false) {
- for (let i = 0; i < fields.length; i ++) {
- let plen = ref.reinterpret(fieldL, 4, i*4);
+ for (let i = 0; i < fields.length; i++) {
+ let plen = ref.reinterpret(fieldL, 4, i * 4);
let len = plen.readInt32LE(0);
fieldlens.push(len);
}
@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let blocks = new Array(fields.length);
blocks.fill(null);
- //num_of_rows = Math.abs(num_of_rows);
+ num_of_rows = Math.abs(num_of_rows);
let offset = 0;
+ let ptr = pblock.deref();
+
for (let i = 0; i < fields.length; i++) {
- pdata = ref.reinterpret(pblock,8,i*8);
- if(ref.isNull(pdata.readPointer())){
- blocks[i] = new Array();
- }else{
- pdata = ref.ref(pdata.readPointer());
- if (!convertFunctions[fields[i]['type']] ) {
- throw new errors.DatabaseError("Invalid data type returned from database");
- }
- blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro);
- }
+ pdata = ref.reinterpret(ptr, 8, i * 8);
+ if (ref.isNull(pdata.readPointer())) {
+ blocks[i] = new Array();
+ } else {
+ pdata = ref.ref(pdata.readPointer());
+ if (!convertFunctions[fields[i]['type']]) {
+ throw new errors.DatabaseError("Invalid data type returned from database");
+ }
+ blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro);
+ }
}
- return {blocks: blocks, num_of_rows:Math.abs(num_of_rows)}
+ return { blocks: blocks, num_of_rows }
}
CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) {
let row = this.libtaos.taos_fetch_row(result);
@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
// Async
CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) {
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
- callback = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], callback);
+ callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback);
this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param);
return param;
}
@@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
var fieldL = cti.libtaos.taos_fetch_lengths(result);
var fieldlens = [];
if (ref.isNull(fieldL) == false) {
-
- for (let i = 0; i < fields.length; i ++) {
- let plen = ref.reinterpret(fieldL, 8, i*8);
- let len = ref.get(plen,0,ref.types.int32);
+
+ for (let i = 0; i < fields.length; i++) {
+ let plen = ref.reinterpret(fieldL, 8, i * 8);
+ let len = ref.get(plen, 0, ref.types.int32);
fieldlens.push(len);
}
}
- if (numOfRows2 > 0){
+ if (numOfRows2 > 0) {
for (let i = 0; i < fields.length; i++) {
- if(ref.isNull(pdata.readPointer())){
- blocks[i] = new Array();
- }else{
- if (!convertFunctions[fields[i]['type']] ) {
- throw new errors.DatabaseError("Invalid data type returned from database");
- }
- let prow = ref.reinterpret(row,8,i*8);
- prow = prow.readPointer();
- prow = ref.ref(prow);
- blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
- //offset += fields[i]['bytes'] * numOfRows2;
- }
+ if (ref.isNull(pdata.readPointer())) {
+ blocks[i] = new Array();
+ } else {
+ if (!convertFunctions[fields[i]['type']]) {
+ throw new errors.DatabaseError("Invalid data type returned from database");
+ }
+ let prow = ref.reinterpret(row, 8, i * 8);
+ prow = prow.readPointer();
+ prow = ref.ref(prow);
+ blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
+ //offset += fields[i]['bytes'] * numOfRows2;
+ }
}
}
callback(param2, result2, numOfRows2, blocks);
}
- asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], asyncCallbackWrapper);
+ asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper);
this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param);
return param;
}
// Fetch field meta data by result handle
-CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
+CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) {
let pfields = this.fetchFields(result);
let pfieldscount = this.numFields(result);
let fields = [];
if (ref.isNull(pfields) == false) {
- pfields = ref.reinterpret(pfields, 68 * pfieldscount , 0);
+ pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0);
for (let i = 0; i < pfields.length; i += 68) {
//0 - 64 = name //65 = type, 66 - 67 = bytes
- fields.push( {
- name: ref.readCString(ref.reinterpret(pfields,65,i)),
+ fields.push({
+ name: ref.readCString(ref.reinterpret(pfields, 65, i)),
type: pfields[i + 65],
bytes: pfields[i + 66]
})
@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
}
// Stop a query by result handle
CTaosInterface.prototype.stopQuery = function stopQuery(result) {
- if (result != null){
+ if (result != null) {
this.libtaos.taos_stop_query(result);
}
else {
@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
try {
sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL);
}
- catch(err) {
+ catch (err) {
throw "Attribute Error: sql is expected as a str";
}
try {
topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL);
}
- catch(err) {
+ catch (err) {
throw TypeError("topic is expected as a str");
}
@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0);
for (let i = 0; i < pfields.length; i += 68) {
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
- fields.push( {
- name: ref.readCString(ref.reinterpret(pfields,64,i)),
+ fields.push({
+ name: ref.readCString(ref.reinterpret(pfields, 64, i)),
bytes: pfields[i + 64],
type: pfields[i + 66]
})
@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
}
let data = [];
- while(true) {
+ while (true) {
let { blocks, num_of_rows } = this.fetchBlock(result, fields);
if (num_of_rows == 0) {
break;
@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
for (let j = 0; j < fields.length; j++) {
rowBlock[j] = blocks[j][i];
}
- data[data.length-1] = (rowBlock);
+ data[data.length - 1] = (rowBlock);
}
}
return { data: data, fields: fields, result: result };
@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
}
// Continuous Query
-CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime,stoppingCallback, param = ref.ref(ref.NULL)) {
+CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) {
try {
sql = ref.allocCString(sql);
}
- catch(err) {
+ catch (err) {
throw "Attribute Error: sql string is expected as a str";
}
var cti = this;
@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
let offset = 0;
if (numOfRows2 > 0) {
for (let i = 0; i < fields.length; i++) {
- if (!convertFunctions[fields[i]['type']] ) {
+ if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
}
callback(param2, result2, blocks, fields);
}
- asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2) ], asyncCallbackWrapper);
- asyncStoppingCallbackWrapper = ffi.Callback( ref.types.void, [ ref.types.void_ptr ], stoppingCallback);
+ asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper);
+ asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback);
let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper);
if (ref.isNull(streamHandle)) {
throw new errors.TDError('Failed to open a stream with TDengine');
diff --git a/src/connector/nodejs/nodetaos/cursor.js b/src/connector/nodejs/nodetaos/cursor.js
index e18e6c2500..f879d89d48 100644
--- a/src/connector/nodejs/nodetaos/cursor.js
+++ b/src/connector/nodejs/nodetaos/cursor.js
@@ -1,7 +1,7 @@
const ref = require('ref-napi');
require('./globalfunc.js')
const CTaosInterface = require('./cinterface')
-const errors = require ('./error')
+const errors = require('./error')
const TaosQuery = require('./taosquery')
const { PerformanceObserver, performance } = require('perf_hooks');
module.exports = TDengineCursor;
@@ -22,7 +22,7 @@ module.exports = TDengineCursor;
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
* @since 1.0.0
*/
-function TDengineCursor(connection=null) {
+function TDengineCursor(connection = null) {
//All parameters are store for sync queries only.
this._rowcount = -1;
this._connection = null;
@@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
return null;
}
- if (typeof options == 'function') {
+ if (typeof options == 'function') {
callback = options;
}
if (typeof options != 'object') options = {}
@@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
}
TDengineCursor.prototype._createAffectedResponse = function (num, time) {
- return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)";
+ return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)";
}
TDengineCursor.prototype._createSetResponse = function (num, time) {
- return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)";
+ return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)";
}
TDengineCursor.prototype.executemany = function executemany() {
@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first");
}
- let data = [];
+ let num_of_rows = this._chandle.affectedRows(this._result);
+ let data = new Array(num_of_rows);
+
this._rowcount = 0;
- //let nodetime = 0;
+
let time = 0;
const obs = new PerformanceObserver((items) => {
time += items.getEntries()[0].duration;
performance.clearMarks();
});
- /*
- const obs2 = new PerformanceObserver((items) => {
- nodetime += items.getEntries()[0].duration;
- performance.clearMarks();
- });
- obs2.observe({ entryTypes: ['measure'] });
- performance.mark('nodea');
- */
obs.observe({ entryTypes: ['measure'] });
performance.mark('A');
- while(true) {
-
+ while (true) {
let blockAndRows = this._chandle.fetchBlock(this._result, this._fields);
+ // console.log(blockAndRows);
+ // break;
let block = blockAndRows.blocks;
let num_of_rows = blockAndRows.num_of_rows;
if (num_of_rows == 0) {
@@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
this._rowcount += num_of_rows;
let numoffields = this._fields.length;
for (let i = 0; i < num_of_rows; i++) {
- data.push([]);
-
+ // data.push([]);
+
let rowBlock = new Array(numoffields);
for (let j = 0; j < numoffields; j++) {
rowBlock[j] = block[j][i];
}
- data[data.length-1] = (rowBlock);
+ data[this._rowcount - num_of_rows + i] = (rowBlock);
+ // data.push(rowBlock);
}
}
+
performance.mark('B');
performance.measure('query', 'A', 'B');
let response = this._createSetResponse(this._rowcount, time)
console.log(response);
- // this._connection._clearResultSet();
+ // this._connection._clearResultSet();
let fields = this.fields;
this._reset_result();
this.data = data;
@@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
* @since 1.0.0
*/
-TDengineCursor.prototype.execute_a = function execute_a (operation, options, callback, param) {
+TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) {
if (operation == undefined) {
throw new errors.ProgrammingError('No operation passed as argument');
return null;
}
- if (typeof options == 'function') {
+ if (typeof options == 'function') {
//we expect the parameter after callback to be param
param = callback;
callback = options;
@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
}
if (resCode >= 0) {
-// let fieldCount = cr._chandle.numFields(res2);
-// if (fieldCount == 0) {
-// //cr._chandle.freeResult(res2);
-// return res2;
-// }
-// else {
-// return res2;
-// }
+ // let fieldCount = cr._chandle.numFields(res2);
+ // if (fieldCount == 0) {
+ // //cr._chandle.freeResult(res2);
+ // return res2;
+ // }
+ // else {
+ // return res2;
+ // }
return res2;
}
@@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
* })
*/
TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) {
- if (typeof options == 'function') {
+ if (typeof options == 'function') {
//we expect the parameter after callback to be param
param = callback;
callback = options;
@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
for (let k = 0; k < fields.length; k++) {
rowBlock[k] = block[k][j];
}
- data[data.length-1] = rowBlock;
+ data[data.length - 1] = rowBlock;
}
}
cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks!
- callback(param2, result2, numOfRows2, {data:data,fields:fields});
+ callback(param2, result2, numOfRows2, { data: data, fields: fields });
}
}
ref.writeObject(buf, 0, param);
param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param
- return {param:param,result:result};
+ return { param: param, result: result };
}
/**
* Stop a query given the result handle.
@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
*/
TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) {
while (true) {
- let { data, fields, result} = this._chandle.consume(subscription);
+ let { data, fields, result } = this._chandle.consume(subscription);
callback(data, fields, result);
}
}
@@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
* @return {Buffer} A buffer pointing to the stream handle
* @since 1.3.0
*/
- TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
- let buf = ref.alloc('Object');
- ref.writeObject(buf, 0, param);
+TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
+ let buf = ref.alloc('Object');
+ ref.writeObject(buf, 0, param);
- let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
- let data = [];
- let num_of_rows = blocks[0].length;
- for (let j = 0; j < num_of_rows; j++) {
- data.push([]);
- let rowBlock = new Array(fields.length);
- for (let k = 0; k < fields.length; k++) {
- rowBlock[k] = blocks[k][j];
- }
- data[data.length-1] = rowBlock;
- }
- callback(param2, result2, blocks, fields);
- }
- return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
- }
- /**
- * Close a stream
- * @param {Buffer} - A buffer pointing to the handle of the stream to be closed
- * @since 1.3.0
- */
- TDengineCursor.prototype.closeStream = function closeStream(stream) {
- this._chandle.closeStream(stream);
- }
+ let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
+ let data = [];
+ let num_of_rows = blocks[0].length;
+ for (let j = 0; j < num_of_rows; j++) {
+ data.push([]);
+ let rowBlock = new Array(fields.length);
+ for (let k = 0; k < fields.length; k++) {
+ rowBlock[k] = blocks[k][j];
+ }
+ data[data.length - 1] = rowBlock;
+ }
+ callback(param2, result2, blocks, fields);
+ }
+ return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
+}
+/**
+ * Close a stream
+ * @param {Buffer} - A buffer pointing to the handle of the stream to be closed
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.closeStream = function closeStream(stream) {
+ this._chandle.closeStream(stream);
+}
diff --git a/src/connector/nodejs/package-lock.json b/src/connector/nodejs/package-lock.json
deleted file mode 100644
index 9ca174ccd1..0000000000
--- a/src/connector/nodejs/package-lock.json
+++ /dev/null
@@ -1,285 +0,0 @@
-{
- "name": "td2.0-connector",
- "version": "2.0.6",
- "lockfileVersion": 1,
- "requires": true,
- "dependencies": {
- "array-index": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz",
- "integrity": "sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k=",
- "requires": {
- "debug": "^2.2.0",
- "es6-symbol": "^3.0.2"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
- }
- }
- },
- "d": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
- "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
- "requires": {
- "es5-ext": "^0.10.50",
- "type": "^1.0.1"
- }
- },
- "debug": {
- "version": "4.3.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
- "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
- "requires": {
- "ms": "2.1.2"
- }
- },
- "es5-ext": {
- "version": "0.10.53",
- "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz",
- "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==",
- "requires": {
- "es6-iterator": "~2.0.3",
- "es6-symbol": "~3.1.3",
- "next-tick": "~1.0.0"
- }
- },
- "es6-iterator": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
- "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=",
- "requires": {
- "d": "1",
- "es5-ext": "^0.10.35",
- "es6-symbol": "^3.1.1"
- }
- },
- "es6-symbol": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
- "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
- "requires": {
- "d": "^1.0.1",
- "ext": "^1.1.2"
- }
- },
- "ext": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz",
- "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==",
- "requires": {
- "type": "^2.0.0"
- },
- "dependencies": {
- "type": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/type/-/type-2.1.0.tgz",
- "integrity": "sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA=="
- }
- }
- },
- "ffi-napi": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz",
- "integrity": "sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g==",
- "requires": {
- "debug": "^4.1.1",
- "get-uv-event-loop-napi-h": "^1.0.5",
- "node-addon-api": "^2.0.0",
- "node-gyp-build": "^4.2.1",
- "ref-napi": "^2.0.1",
- "ref-struct-di": "^1.1.0"
- },
- "dependencies": {
- "ref-napi": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz",
- "integrity": "sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA==",
- "requires": {
- "debug": "^4.1.1",
- "get-symbol-from-current-process-h": "^1.0.2",
- "node-addon-api": "^2.0.0",
- "node-gyp-build": "^4.2.1"
- }
- }
- }
- },
- "get-symbol-from-current-process-h": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz",
- "integrity": "sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw=="
- },
- "get-uv-event-loop-napi-h": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz",
- "integrity": "sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg==",
- "requires": {
- "get-symbol-from-current-process-h": "^1.0.1"
- }
- },
- "ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
- },
- "next-tick": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz",
- "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw="
- },
- "node-addon-api": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz",
- "integrity": "sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA=="
- },
- "node-gyp-build": {
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz",
- "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg=="
- },
- "ref-array-napi": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz",
- "integrity": "sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ==",
- "requires": {
- "array-index": "1",
- "debug": "2",
- "ref-napi": "^1.4.2"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
- },
- "ref-napi": {
- "version": "1.5.2",
- "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz",
- "integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==",
- "requires": {
- "debug": "^3.1.0",
- "node-addon-api": "^2.0.0",
- "node-gyp-build": "^4.2.1"
- },
- "dependencies": {
- "debug": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
- "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
- "requires": {
- "ms": "^2.1.1"
- }
- },
- "ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
- }
- }
- }
- }
- },
- "ref-napi": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz",
- "integrity": "sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA==",
- "requires": {
- "debug": "^4.1.1",
- "get-symbol-from-current-process-h": "^1.0.2",
- "node-addon-api": "^2.0.0",
- "node-gyp-build": "^4.2.1"
- }
- },
- "ref-struct-di": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz",
- "integrity": "sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g==",
- "requires": {
- "debug": "^3.1.0"
- },
- "dependencies": {
- "debug": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
- "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
- "requires": {
- "ms": "^2.1.1"
- }
- }
- }
- },
- "ref-struct-napi": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz",
- "integrity": "sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw==",
- "requires": {
- "debug": "2",
- "ref-napi": "^1.4.2"
- },
- "dependencies": {
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
- },
- "ref-napi": {
- "version": "1.5.2",
- "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz",
- "integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==",
- "requires": {
- "debug": "^3.1.0",
- "node-addon-api": "^2.0.0",
- "node-gyp-build": "^4.2.1"
- },
- "dependencies": {
- "debug": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
- "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
- "requires": {
- "ms": "^2.1.1"
- }
- },
- "ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
- }
- }
- }
- }
- },
- "type": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
- "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
- }
- }
-}
diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json
index b39ce2c17d..d21b62108b 100644
--- a/src/connector/nodejs/package.json
+++ b/src/connector/nodejs/package.json
@@ -1,6 +1,6 @@
{
"name": "td2.0-connector",
- "version": "2.0.6",
+ "version": "2.0.7",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"directories": {
diff --git a/src/connector/odbc/examples/c/main.c b/src/connector/odbc/examples/c/main.c
index e36c75688e..3ad32dbe53 100644
--- a/src/connector/odbc/examples/c/main.c
+++ b/src/connector/odbc/examples/c/main.c
@@ -18,8 +18,8 @@
#define CHK_TEST(statement) \
do { \
D("testing: %s", #statement); \
- int r = (statement); \
- if (r) { \
+ int _r = (statement); \
+ if (_r) { \
D("testing failed: %s", #statement); \
return 1; \
} \
@@ -181,7 +181,7 @@ static int do_statement(SQLHSTMT stmt, const char *statement) {
r = traverse_cols(stmt, cols);
char buf[4096];
while (1) {
- SQLRETURN r = SQLFetch(stmt);
+ r = SQLFetch(stmt);
if (r==SQL_NO_DATA) break;
CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "");
for (size_t i=0; itsdb_params = tsdb_params;
for (int i=0; i name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def next(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def log(self, logfile):
- self._logfile = logfile
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- # global querySeqNum
- # querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
- # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- # print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
- with open(self._logfile, "a") as logfile:
- logfile.write("%s;\n" % operation)
-
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
- return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
- return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
- return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
- return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
- return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
- return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
- return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
- return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
- return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
- return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
- return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
- return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
- return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
- return True
-
- return False
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/linux/python3/LICENSE b/src/connector/python/linux/python3/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/linux/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/linux/python3/README.md b/src/connector/python/linux/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/linux/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py
deleted file mode 100644
index 296e79b973..0000000000
--- a/src/connector/python/linux/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: Linux",
- ],
-)
diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/linux/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py
deleted file mode 100644
index 4367947341..0000000000
--- a/src/connector/python/linux/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.CDLL('libtaos.so')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py
deleted file mode 100644
index f6c395342c..0000000000
--- a/src/connector/python/linux/python3/taos/connection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py
deleted file mode 100644
index 93466f5184..0000000000
--- a/src/connector/python/linux/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/linux/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/linux/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/linux/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/osx/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/osx/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py
deleted file mode 100644
index 9bce1a976f..0000000000
--- a/src/connector/python/osx/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: MacOS X",
- ],
-)
diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/osx/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py
deleted file mode 100644
index dca9bd42e8..0000000000
--- a/src/connector/python/osx/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.CDLL('libtaos.dylib')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py
deleted file mode 100644
index f6c395342c..0000000000
--- a/src/connector/python/osx/python3/taos/connection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py
deleted file mode 100644
index 93466f5184..0000000000
--- a/src/connector/python/osx/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py
deleted file mode 100644
index 32dc0ea3c3..0000000000
--- a/src/connector/python/osx/python3/taos/cursor.py
+++ /dev/null
@@ -1,280 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the rowcount of insertion
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def log(self, logfile):
- self._logfile = logfile
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- # global querySeqNum
- # querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
- # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- # print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
- with open(self._logfile, "a") as logfile:
- logfile.write("%s;\n" % operation)
-
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
- return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
- return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
- return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
- return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
- return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
- return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
- return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
- return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
- return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
- return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
- return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
- return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
- return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
- return True
-
- return False
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/osx/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/osx/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/osx/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py
new file mode 100644
index 0000000000..901e8396c0
--- /dev/null
+++ b/src/connector/python/setup.py
@@ -0,0 +1,34 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+setuptools.setup(
+ name="taos",
+ version="2.0.10",
+ author="Taosdata Inc.",
+ author_email="support@taosdata.com",
+ description="TDengine python client package",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/taosdata/TDengine/tree/develop/src/connector/python",
+ packages=setuptools.find_packages(),
+ classifiers=[
+ "Environment :: Console",
+ "Environment :: MacOS X",
+ "Environment :: Win32 (MS Windows)",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 2.7",
+ "Operating System :: Linux",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: Microsoft :: Windows :: Windows 10",
+ ],
+)
diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/taos/__init__.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/__init__.py
rename to src/connector/python/taos/__init__.py
diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/taos/cinterface.py
similarity index 70%
rename from src/connector/python/linux/python2/taos/cinterface.py
rename to src/connector/python/taos/cinterface.py
index 4367947341..b8824327b0 100644
--- a/src/connector/python/linux/python2/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -3,6 +3,7 @@ from .constants import FieldType
from .error import *
import math
import datetime
+import platform
def _convert_millisecond_to_datetime(milli):
@@ -20,40 +21,28 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if micro:
_timestamp_converter = _convert_microsecond_to_datetime
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bool row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_byte))[
+ :abs(num_of_rows)]]
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C tinyint row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
def _crow_tinyint_unsigned_to_python(
@@ -63,92 +52,56 @@ def _crow_tinyint_unsigned_to_python(
micro=False):
"""Function to convert C tinyint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_ubyte))[
+ :abs(num_of_rows)]]
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_short))[
+ :abs(num_of_rows)]]
def _crow_smallint_unsigned_to_python(
data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_ushort))[
+ :abs(num_of_rows)]]
def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_uint))[
+ :abs(num_of_rows)]]
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bigint row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
@@ -158,52 +111,33 @@ def _crow_bigint_unsigned_to_python(
micro=False):
"""Function to convert C bigint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_uint64))[
+ :abs(num_of_rows)]]
def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C float row to python row
"""
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
+ return [None if math.isnan(ele) else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C double row to python row
"""
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
+ return [None if math.isnan(ele) else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row
"""
assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
+ return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
+ 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
@@ -230,30 +164,17 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""
assert(nbytes is not None)
res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
+ for i in range(abs(num_of_rows)):
+ try:
+ rbyte = ctypes.cast(
+ data + nbytes * i,
+ ctypes.POINTER(
+ ctypes.c_short))[
+ :1].pop()
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode()[0:rbyte])
+ except ValueError:
+ res.append(None)
return res
@@ -262,20 +183,12 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""
assert(nbytes is not None)
res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
+ for i in range(abs(num_of_rows)):
+ try:
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode())
+ except ValueError:
+ res.append(None)
return res
@@ -324,14 +237,38 @@ class TaosField(ctypes.Structure):
# C interface class
+def _load_taos_linux():
+ return ctypes.CDLL('libtaos.so')
+
+
+def _load_taos_darwin():
+ return ctypes.cDLL('libtaos.dylib')
+
+
+def _load_taos_windows():
+ return ctypes.windll.LoadLibrary('taos')
+
+
+def _load_taos():
+ load_func = {
+ 'Linux': _load_taos_linux,
+ 'Darwin': _load_taos_darwin,
+ 'Windows': _load_taos_windows,
+ }
+ try:
+ return load_func[platform.system()]()
+ except:
+ sys.exit('unsupported platform to TDengine connector')
+
+
class CTaosInterface(object):
- libtaos = ctypes.CDLL('libtaos.so')
+ libtaos = _load_taos()
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
libtaos.taos_init.restype = None
libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
+ # libtaos.taos_use_result.restype = ctypes.c_void_p
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
libtaos.taos_errstr.restype = ctypes.c_char_p
libtaos.taos_subscribe.restype = ctypes.c_void_p
@@ -432,7 +369,7 @@ class CTaosInterface(object):
'''Close the TDengine handle
'''
CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
+ # print('connection is closed')
@staticmethod
def query(connection, sql):
diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/taos/connection.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/connection.py
rename to src/connector/python/taos/connection.py
diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/taos/constants.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/constants.py
rename to src/connector/python/taos/constants.py
diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/taos/cursor.py
similarity index 98%
rename from src/connector/python/linux/python3/taos/cursor.py
rename to src/connector/python/taos/cursor.py
index 32dc0ea3c3..d443ec95d0 100644
--- a/src/connector/python/linux/python3/taos/cursor.py
+++ b/src/connector/python/taos/cursor.py
@@ -45,6 +45,12 @@ class TDengineCursor(object):
return self
def __next__(self):
+ return self._taos_next()
+
+ def next(self):
+ return self._taos_next()
+
+ def _taos_next(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetch iterator")
diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/taos/dbapi.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/dbapi.py
rename to src/connector/python/taos/dbapi.py
diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/taos/error.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/error.py
rename to src/connector/python/taos/error.py
diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/taos/subscription.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/subscription.py
rename to src/connector/python/taos/subscription.py
diff --git a/src/connector/python/windows/python2/LICENSE b/src/connector/python/windows/python2/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/windows/python2/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/windows/python2/README.md b/src/connector/python/windows/python2/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/windows/python2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py
deleted file mode 100644
index 47d374fe67..0000000000
--- a/src/connector/python/windows/python2/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 2",
- "Operating System :: Windows",
- ],
-)
diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/windows/python2/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py
deleted file mode 100644
index ec72474df9..0000000000
--- a/src/connector/python/windows/python2/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.windll.LoadLibrary('taos')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py
deleted file mode 100644
index 5729d01c6d..0000000000
--- a/src/connector/python/windows/python2/taos/connection.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- if len(kwargs) > 0:
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py
deleted file mode 100644
index 8a8011c3e3..0000000000
--- a/src/connector/python/windows/python2/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Time precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py
deleted file mode 100644
index 5f4666b593..0000000000
--- a/src/connector/python/windows/python2/taos/cursor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
-
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/windows/python2/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/windows/python2/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/windows/python2/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/windows/python3/LICENSE b/src/connector/python/windows/python3/LICENSE
deleted file mode 100644
index 2d032e65d8..0000000000
--- a/src/connector/python/windows/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/windows/python3/README.md b/src/connector/python/windows/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/windows/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py
deleted file mode 100644
index cdcec62a21..0000000000
--- a/src/connector/python/windows/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: Windows",
- ],
-)
diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py
deleted file mode 100644
index b57e25fd2c..0000000000
--- a/src/connector/python/windows/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py
deleted file mode 100644
index ec72474df9..0000000000
--- a/src/connector/python/windows/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.windll.LoadLibrary('taos')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py
deleted file mode 100644
index 5729d01c6d..0000000000
--- a/src/connector/python/windows/python3/taos/connection.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- if len(kwargs) > 0:
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py
deleted file mode 100644
index 49fc17b2fb..0000000000
--- a/src/connector/python/windows/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py
deleted file mode 100644
index 136cd42fe4..0000000000
--- a/src/connector/python/windows/python3/taos/cursor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
-
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py
deleted file mode 100644
index a29621f7a3..0000000000
--- a/src/connector/python/windows/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py
deleted file mode 100644
index 238b293a0b..0000000000
--- a/src/connector/python/windows/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/windows/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c
index c573d709f5..586adacc98 100644
--- a/src/dnode/src/dnodeCfg.c
+++ b/src/dnode/src/dnodeCfg.c
@@ -158,7 +158,7 @@ static int32_t dnodeWriteCfg() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c
index 9554651776..9b15353647 100644
--- a/src/dnode/src/dnodeEps.c
+++ b/src/dnode/src/dnodeEps.c
@@ -277,7 +277,7 @@ static int32_t dnodeWriteEps() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeMInfos.c b/src/dnode/src/dnodeMInfos.c
index 0dca116d84..611c30b843 100644
--- a/src/dnode/src/dnodeMInfos.c
+++ b/src/dnode/src/dnodeMInfos.c
@@ -286,7 +286,7 @@ static int32_t dnodeWriteMInfos() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/inc/taos.h b/src/inc/taos.h
index cd8e116053..6dd695b320 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -82,6 +82,7 @@ typedef struct TAOS_BIND {
uintptr_t buffer_length; // unused
uintptr_t *length;
int * is_null;
+
int is_unsigned; // unused
int * error; // unused
union {
@@ -99,12 +100,25 @@ typedef struct TAOS_BIND {
unsigned int allocated;
} TAOS_BIND;
+typedef struct TAOS_MULTI_BIND {
+ int buffer_type;
+ void *buffer;
+ uintptr_t buffer_length;
+ int32_t *length;
+ char *is_null;
+ int num;
+} TAOS_MULTI_BIND;
+
+
TAOS_STMT *taos_stmt_init(TAOS *taos);
int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name);
int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind);
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind);
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx);
int taos_stmt_add_batch(TAOS_STMT *stmt);
int taos_stmt_execute(TAOS_STMT *stmt);
TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index e9170860a6..e596ee67ec 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -22,7 +22,6 @@ extern "C" {
#include
#include
-#include "osDef.h"
#include "taos.h"
#define TSDB__packed
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index eff4eecbc1..ce6f7c4f22 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -218,6 +218,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
+#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index e9f95660f7..ef3f8ed1fb 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -79,12 +79,12 @@
#define TK_DOT 60
#define TK_CREATE 61
#define TK_TABLE 62
-#define TK_DATABASE 63
-#define TK_TABLES 64
-#define TK_STABLES 65
-#define TK_VGROUPS 66
-#define TK_DROP 67
-#define TK_STABLE 68
+#define TK_STABLE 63
+#define TK_DATABASE 64
+#define TK_TABLES 65
+#define TK_STABLES 66
+#define TK_VGROUPS 67
+#define TK_DROP 68
#define TK_TOPIC 69
#define TK_DNODE 70
#define TK_USER 71
diff --git a/src/inc/ttype.h b/src/inc/ttype.h
index 662a23bfdb..9949f31c59 100644
--- a/src/inc/ttype.h
+++ b/src/inc/ttype.h
@@ -5,6 +5,8 @@
extern "C" {
#endif
+#include
+#include
#include "taosdef.h"
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c
index b88244ea01..4ff5dc36fc 100644
--- a/src/kit/shell/src/shellCheck.c
+++ b/src/kit/shell/src/shellCheck.c
@@ -142,21 +142,21 @@ static void *shellCheckThreadFp(void *arg) {
taos_free_result(pSql);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
return NULL;
}
-static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
+static void shellRunCheckThreads(TAOS *con, SShellArguments *_args) {
pthread_attr_t thattr;
- ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
- for (int t = 0; t < args->threadNum; ++t) {
+ ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
+ for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
- pThread->totalThreads = args->threadNum;
+ pThread->totalThreads = _args->threadNum;
pThread->taos = con;
- pThread->db = args->database;
+ pThread->db = _args->database;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -167,31 +167,31 @@ static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
}
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
-void shellCheck(TAOS *con, SShellArguments *args) {
+void shellCheck(TAOS *con, SShellArguments *_args) {
int64_t start = taosGetTimestampMs();
- if (shellUseDb(con, args->database) != 0) {
+ if (shellUseDb(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
- if (shellShowTables(con, args->database) != 0) {
+ if (shellShowTables(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
- fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, args->threadNum);
- shellRunCheckThreads(con, args);
+ fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, _args->threadNum);
+ shellRunCheckThreads(con, _args);
int64_t end = taosGetTimestampMs();
fprintf(stdout, "total %d tables checked, failed:%d, time spent %.2f seconds\n", checkedNum, errorNum,
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index 0eb1248fad..d4176fca91 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -56,24 +56,24 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut
/*
* FUNCTION: Initialize the shell.
*/
-TAOS *shellInit(SShellArguments *args) {
+TAOS *shellInit(SShellArguments *_args) {
printf("\n");
printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
fflush(stdout);
// set options before initializing
- if (args->timezone != NULL) {
- taos_options(TSDB_OPTION_TIMEZONE, args->timezone);
+ if (_args->timezone != NULL) {
+ taos_options(TSDB_OPTION_TIMEZONE, _args->timezone);
}
- if (args->is_use_passwd) {
- if (args->password == NULL) args->password = getpass("Enter password: ");
+ if (_args->is_use_passwd) {
+ if (_args->password == NULL) _args->password = getpass("Enter password: ");
} else {
- args->password = TSDB_DEFAULT_PASS;
+ _args->password = TSDB_DEFAULT_PASS;
}
- if (args->user == NULL) {
- args->user = TSDB_DEFAULT_USER;
+ if (_args->user == NULL) {
+ _args->user = TSDB_DEFAULT_USER;
}
if (taos_init()) {
@@ -84,10 +84,10 @@ TAOS *shellInit(SShellArguments *args) {
// Connect to the database.
TAOS *con = NULL;
- if (args->auth == NULL) {
- con = taos_connect(args->host, args->user, args->password, args->database, args->port);
+ if (_args->auth == NULL) {
+ con = taos_connect(_args->host, _args->user, _args->password, _args->database, _args->port);
} else {
- con = taos_connect_auth(args->host, args->user, args->auth, args->database, args->port);
+ con = taos_connect_auth(_args->host, _args->user, _args->auth, _args->database, _args->port);
}
if (con == NULL) {
@@ -100,14 +100,14 @@ TAOS *shellInit(SShellArguments *args) {
read_history();
// Check if it is temperory run
- if (args->commands != NULL || args->file[0] != 0) {
- if (args->commands != NULL) {
- printf("%s%s\n", PROMPT_HEADER, args->commands);
- shellRunCommand(con, args->commands);
+ if (_args->commands != NULL || _args->file[0] != 0) {
+ if (_args->commands != NULL) {
+ printf("%s%s\n", PROMPT_HEADER, _args->commands);
+ shellRunCommand(con, _args->commands);
}
- if (args->file[0] != 0) {
- source_file(con, args->file);
+ if (_args->file[0] != 0) {
+ source_file(con, _args->file);
}
taos_close(con);
@@ -116,14 +116,14 @@ TAOS *shellInit(SShellArguments *args) {
}
#ifndef WINDOWS
- if (args->dir[0] != 0) {
- source_dir(con, args);
+ if (_args->dir[0] != 0) {
+ source_dir(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
- if (args->check != 0) {
- shellCheck(con, args);
+ if (_args->check != 0) {
+ shellCheck(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c
index af61995c61..5de50a3aaf 100644
--- a/src/kit/shell/src/shellImport.c
+++ b/src/kit/shell/src/shellImport.c
@@ -233,15 +233,15 @@ void* shellImportThreadFp(void *arg)
return NULL;
}
-static void shellRunImportThreads(SShellArguments* args)
+static void shellRunImportThreads(SShellArguments* _args)
{
pthread_attr_t thattr;
- ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
- for (int t = 0; t < args->threadNum; ++t) {
+ ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
+ for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
- pThread->totalThreads = args->threadNum;
- pThread->taos = taos_connect(args->host, args->user, args->password, args->database, tsDnodeShellPort);
+ pThread->totalThreads = _args->threadNum;
+ pThread->taos = taos_connect(_args->host, _args->user, _args->password, _args->database, tsDnodeShellPort);
if (pThread->taos == NULL) {
fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, "null taos"/*taos_errstr(pThread->taos)*/);
exit(0);
@@ -256,18 +256,18 @@ static void shellRunImportThreads(SShellArguments* args)
}
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
-void source_dir(TAOS* con, SShellArguments* args) {
- shellGetDirectoryFileList(args->dir);
+void source_dir(TAOS* con, SShellArguments* _args) {
+ shellGetDirectoryFileList(_args->dir);
int64_t start = taosGetTimestampMs();
if (shellTablesSQLFile[0] != 0) {
@@ -276,7 +276,7 @@ void source_dir(TAOS* con, SShellArguments* args) {
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", shellTablesSQLFile, (end - start) / 1000.0);
}
- shellRunImportThreads(args);
+ shellRunImportThreads(_args);
int64_t end = taosGetTimestampMs();
- fprintf(stdout, "import %s finished, time spent %.2f seconds\n", args->dir, (end - start) / 1000.0);
+ fprintf(stdout, "import %s finished, time spent %.2f seconds\n", _args->dir, (end - start) / 1000.0);
}
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index 37050c416c..4eead252fd 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -415,7 +415,7 @@ void set_terminal_mode() {
}
}
-void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
+void get_history_path(char *_history) { snprintf(_history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void clearScreen(int ecmd_pos, int cursor_pos) {
struct winsize w;
diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt
index 4e38a8842e..5f75be0e19 100644
--- a/src/kit/taosdemo/CMakeLists.txt
+++ b/src/kit/taosdemo/CMakeLists.txt
@@ -10,7 +10,11 @@ IF (GIT_FOUND)
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
- STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
+ IF ("${TAOSDEMO_COMMIT_SHA1}" STREQUAL "")
+ MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
+ ELSE ()
+ STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
+ ENDIF ()
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
diff --git a/src/kit/taosdemo/async-sub.json b/src/kit/taosdemo/async-sub.json
new file mode 100644
index 0000000000..a30a1be45c
--- /dev/null
+++ b/src/kit/taosdemo/async-sub.json
@@ -0,0 +1,41 @@
+{
+ "filetype": "subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "test",
+ "specified_table_query": {
+ "concurrent": 1,
+ "mode": "async",
+ "interval": 1000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "resubAfterConsume": 10,
+ "sqls": [
+ {
+ "sql": "select col1 from meters where col1 > 1;",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select col2 from meters where col2 > 1;",
+ "result": "./subscribe_res2.txt"
+ }
+ ]
+ },
+ "super_table_query": {
+ "stblname": "meters",
+ "threads": 1,
+ "mode": "sync",
+ "interval": 1000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "sqls": [
+ {
+ "sql": "select col1 from xxxx where col1 > 10;",
+ "result": "./subscribe_res1.txt"
+ }
+ ]
+ }
+}
diff --git a/src/kit/taosdemo/subscribe.json b/src/kit/taosdemo/subscribe.json
index fd33a2e2e2..18846c8116 100644
--- a/src/kit/taosdemo/subscribe.json
+++ b/src/kit/taosdemo/subscribe.json
@@ -5,13 +5,33 @@
"port": 6030,
"user": "root",
"password": "taosdata",
- "databases": "dbx",
- "specified_table_query":
- {"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes",
- "sqls": [{"sql": "select avg(col1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}]
- },
- "super_table_query":
- {"stblname": "stb", "threads":1, "mode":"sync", "interval":10000, "restart":"yes", "keepProgress":"yes",
- "sqls": [{"sql": "select col1 from xxxx where col1 > 10;", "result": "./subscribe_res1.txt"}]
- }
+ "databases": "test",
+ "specified_table_query": {
+ "concurrent": 1,
+ "mode": "sync",
+ "interval": 1000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "resubAfterConsume": 10,
+ "sqls": [
+ {
+ "sql": "select avg(col1) from meters where col1 > 1;",
+ "result": "./subscribe_res0.txt"
+ }
+ ]
+ },
+ "super_table_query": {
+ "stblname": "meters",
+ "threads": 1,
+ "mode": "sync",
+ "interval": 1000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "sqls": [
+ {
+ "sql": "select col1 from xxxx where col1 > 10;",
+ "result": "./subscribe_res1.txt"
+ }
+ ]
+ }
}
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index e6d7fb6a09..ebc1074cba 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -68,20 +68,19 @@ enum TEST_MODE {
INVAID_TEST
};
-enum QUERY_MODE {
- SYNC_QUERY_MODE, // 0
- ASYNC_QUERY_MODE, // 1
- INVALID_MODE
-};
+#define MAX_RECORDS_PER_REQ 32766
+
+#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
+#define COND_BUF_LEN BUFFER_SIZE - 30
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE (16*1024)
+#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
@@ -117,17 +116,30 @@ typedef enum TALBE_EXISTS_EN {
TBL_EXISTS_BUTT
} TALBE_EXISTS_EN;
-enum MODE {
- SYNC,
- ASYNC,
+enum enumSYNC_MODE {
+ SYNC_MODE,
+ ASYNC_MODE,
MODE_BUT
};
-typedef enum enum_INSERT_MODE {
+enum enum_TAOS_INTERFACE {
+ TAOSC_IFACE,
+ REST_IFACE,
+ STMT_IFACE,
+ INTERFACE_BUT
+};
+
+typedef enum enumQUERY_CLASS {
+ SPECIFIED_CLASS,
+ STABLE_CLASS,
+ CLASS_BUT
+} QUERY_CLASS;
+
+typedef enum enum_PROGRESSIVE_OR_INTERLACE {
PROGRESSIVE_INSERT_MODE,
INTERLACE_INSERT_MODE,
INVALID_INSERT_MODE
-} INSERT_MODE;
+} PROG_OR_INTERLACE_MODE;
typedef enum enumQUERY_TYPE {
NO_INSERT_TYPE,
@@ -188,9 +200,10 @@ typedef struct {
/* Used by main to communicate with parse_opt. */
typedef struct SArguments_S {
char * metaFile;
- int test_mode;
+ uint32_t test_mode;
char * host;
uint16_t port;
+ uint16_t iface;
char * user;
char * password;
char * database;
@@ -205,31 +218,31 @@ typedef struct SArguments_S {
bool verbose_print;
bool performance_print;
char * output_file;
- int query_mode;
+ bool async_mode;
char * datatype[MAX_NUM_DATATYPE + 1];
- int len_of_binary;
- int num_of_CPR;
- int num_of_threads;
- int64_t insert_interval;
+ uint32_t len_of_binary;
+ uint32_t num_of_CPR;
+ uint32_t num_of_threads;
+ uint64_t insert_interval;
int64_t query_times;
- int64_t interlace_rows;
- int64_t num_of_RPR; // num_of_records_per_req
- int64_t max_sql_len;
+ uint64_t interlace_rows;
+ uint64_t num_of_RPR; // num_of_records_per_req
+ uint64_t max_sql_len;
int64_t num_of_tables;
int64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int method_of_delete;
+ uint32_t method_of_delete;
char ** arg_list;
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
- int dataLen;
+ char field[TSDB_COL_NAME_LEN + 1];
+ char dataType[MAX_TB_NAME_SIZE];
+ uint32_t dataLen;
char note[128];
} StrColumn;
@@ -237,21 +250,21 @@ typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1];
int64_t childTblCount;
bool childTblExists; // 0: no, 1: yes
- int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
- int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
+ uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
+ uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
char childTblPrefix[MAX_TB_NAME_SIZE];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
- char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful
+ uint16_t insertMode; // 0: taosc, 1: rest, 2: stmt
int64_t childTblLimit;
- int64_t childTblOffset;
+ uint64_t childTblOffset;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
- int64_t interlaceRows; //
+ uint64_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int64_t maxSqlLen; //
+ uint64_t maxSqlLen; //
- int64_t insertInterval; // insert interval, will override global insert interval
+ uint64_t insertInterval; // insert interval, will override global insert interval
int64_t insertRows;
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
@@ -259,34 +272,34 @@ typedef struct SSuperTable_S {
char sampleFile[MAX_FILE_NAME_LEN+1];
char tagsFile[MAX_FILE_NAME_LEN+1];
- int columnCount;
+ uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
- int tagCount;
+ uint32_t tagCount;
StrColumn tags[MAX_TAG_COUNT];
char* childTblName;
char* colsOfCreateChildTable;
- int64_t lenOfOneRow;
- int64_t lenOfTagOfOneRow;
+ uint64_t lenOfOneRow;
+ uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
//int sampleRowCount;
//int sampleUsePos;
- int tagSource; // 0: rand, 1: tag sample
+ uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
- int tagSampleCount;
- int tagUsePos;
+ uint32_t tagSampleCount;
+ uint32_t tagUsePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SSuperTable;
typedef struct {
char name[TSDB_DB_NAME_LEN + 1];
char create_time[32];
- int32_t ntables;
+ int64_t ntables;
int32_t vgroups;
int16_t replica;
int16_t quorum;
@@ -307,8 +320,8 @@ typedef struct {
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
- int minRows;
- int maxRows;
+ uint32_t minRows; // 0 means default
+ uint32_t maxRows; // 0 means default
int comp;
int walLevel;
int cacheLast;
@@ -327,13 +340,15 @@ typedef struct SDataBase_S {
char dbName[MAX_DB_NAME_SIZE];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
- int64_t superTblCount;
+ uint64_t superTblCount;
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
} SDataBase;
typedef struct SDbs_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
+ struct sockaddr_in serv_addr;
+
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
@@ -341,106 +356,112 @@ typedef struct SDbs_S {
bool use_metric;
bool insert_only;
bool do_aggreFunc;
- bool queryMode;
+ bool asyncMode;
- int threadCount;
- int threadCountByCreateTbl;
- int dbCount;
+ uint32_t threadCount;
+ uint32_t threadCountByCreateTbl;
+ uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SDbs;
typedef struct SpecifiedQueryInfo_S {
- int64_t queryInterval; // 0: unlimit > 0 loop/s
- int64_t concurrent;
- int64_t sqlCount;
- int mode; // 0: sync, 1: async
- int64_t subscribeInterval; // ms
- int64_t queryTimes;
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint64_t concurrent;
+ uint64_t sqlCount;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
+ uint64_t queryTimes;
int subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
+ int resubAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
- int64_t totalQueried;
+ uint64_t totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
- int64_t queryInterval; // 0: unlimit > 0 loop/s
- int threadCnt;
- int mode; // 0: sync, 1: async
- int64_t subscribeInterval; // ms
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint32_t threadCnt;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
int subscribeRestart;
int subscribeKeepProgress;
- int64_t queryTimes;
+ uint64_t queryTimes;
int64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
- int64_t sqlCount;
+ uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
+ int resubAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
- int64_t totalQueried;
+ uint64_t totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
+ struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[MAX_DB_NAME_SIZE+1];
- char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful
+ char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
- SuperQueryInfo superQueryInfo;
- int64_t totalQueried;
+ SuperQueryInfo superQueryInfo;
+ uint64_t totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
- TAOS *taos;
- int threadID;
- char db_name[MAX_DB_NAME_SIZE+1];
- uint32_t time_precision;
- char fp[4096];
- char tb_prefix[MAX_TB_NAME_SIZE];
- int64_t start_table_from;
- int64_t end_table_to;
- int64_t ntables;
- int64_t data_of_rate;
- int64_t start_time;
- char* cols;
- bool use_metric;
+ TAOS * taos;
+ TAOS_STMT *stmt;
+ int threadID;
+ char db_name[MAX_DB_NAME_SIZE+1];
+ uint32_t time_precision;
+ char fp[4096];
+ char tb_prefix[MAX_TB_NAME_SIZE];
+ uint64_t start_table_from;
+ uint64_t end_table_to;
+ int64_t ntables;
+ uint64_t data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
SSuperTable* superTblInfo;
+ char *buffer; // sql cmd buffer
// for async insert
- tsem_t lock_sem;
- int64_t counter;
+ tsem_t lock_sem;
+ int64_t counter;
uint64_t st;
uint64_t et;
- int64_t lastTs;
+ uint64_t lastTs;
// sample data
- int64_t samplePos;
+ int64_t samplePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
// insert delay statistics
- int64_t cntDelay;
- int64_t totalDelay;
- int64_t avgDelay;
- int64_t maxDelay;
- int64_t minDelay;
+ uint64_t cntDelay;
+ uint64_t totalDelay;
+ uint64_t avgDelay;
+ uint64_t maxDelay;
+ uint64_t minDelay;
+
+ // seq of query or subscribe
+ uint64_t querySeq; // sequence number of sql command
- // query
- int64_t querySeq; // sequence number of sql command
} threadInfo;
#ifdef WINDOWS
@@ -519,6 +540,8 @@ static int taosRandom()
static int createDatabasesAndStables();
static void createChildTables();
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
+static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
+ char* sqlstr, char *resultFile);
/* ************ Global variables ************ */
@@ -534,6 +557,7 @@ SArguments g_args = {
0, // test_mode
"127.0.0.1", // host
6030, // port
+ TAOSC_IFACE, // iface
"root", // user
#ifdef _TD_POWER_
"powerdb", // password
@@ -566,7 +590,7 @@ SArguments g_args = {
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
- 1024000, // max_sql_len
+ (1024*1024), // max_sql_len
10000, // num_of_tables
10000, // num_of_DPT
0, // abort
@@ -579,7 +603,7 @@ SArguments g_args = {
static SDbs g_Dbs;
-static int g_totalChildTables = 0;
+static int64_t g_totalChildTables = 0;
static SQueryMetaInfo g_queryInfo;
static FILE * g_fpOfInsertResult = NULL;
@@ -650,6 +674,8 @@ static void printHelp() {
"The host to connect to TDengine. Default is localhost.");
printf("%s%s%s%s\n", indent, "-p", indent,
"The TCP/IP port number to use for the connection. Default is 0.");
+ printf("%s%s%s%s\n", indent, "-I", indent,
+ "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'.");
printf("%s%s%s%s\n", indent, "-d", indent,
"Destination database. Default is 'test'.");
printf("%s%s%s%s\n", indent, "-a", indent,
@@ -663,11 +689,11 @@ static void printHelp() {
printf("%s%s%s%s\n", indent, "-q", indent,
"Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
printf("%s%s%s%s\n", indent, "-b", indent,
- "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.");
+ "The data_type of columns, default: INT,INT,INT,INT.");
printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
printf("%s%s%s%s\n", indent, "-l", indent,
- "The number of columns per record. Default is 10.");
+ "The number of columns per record. Default is 4.");
printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
@@ -722,8 +748,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "\n\t-c need a valid path following!\n");
exit(EXIT_FAILURE);
}
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
-
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
} else if (strcmp(argv[i], "-h") == 0) {
if (argc == i+1) {
printHelp();
@@ -739,6 +764,23 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->port = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-I") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-I need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ ++i;
+ if (0 == strcasecmp(argv[i], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorPrint("%s", "\n\t-I need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
} else if (strcmp(argv[i], "-u") == 0) {
if (argc == i+1) {
printHelp();
@@ -768,16 +810,16 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->sqlFile = argv[++i];
} else if (strcmp(argv[i], "-q") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n");
exit(EXIT_FAILURE);
}
- arguments->query_mode = atoi(argv[++i]);
+ arguments->async_mode = atoi(argv[++i]);
} else if (strcmp(argv[i], "-T") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-T need a number following!\n");
exit(EXIT_FAILURE);
@@ -792,24 +834,24 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->insert_interval = atoi(argv[++i]);
} else if (strcmp(argv[i], "-qt") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-qt need a number following!\n");
exit(EXIT_FAILURE);
}
arguments->query_times = atoi(argv[++i]);
} else if (strcmp(argv[i], "-B") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-B need a number following!\n");
exit(EXIT_FAILURE);
}
arguments->interlace_rows = atoi(argv[++i]);
} else if (strcmp(argv[i], "-r") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-r need a number following!\n");
exit(EXIT_FAILURE);
@@ -847,6 +889,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->num_of_CPR = atoi(argv[++i]);
} else if (strcmp(argv[i], "-b") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-b need valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
sptr = arguments->datatype;
++i;
if (strstr(argv[i], ",") == NULL) {
@@ -964,9 +1011,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
- if (arguments->method_of_delete < 0
- || arguments->method_of_delete > 3) {
- arguments->method_of_delete = 0;
+ if (arguments->method_of_delete > 3) {
+ errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ exit(EXIT_FAILURE);
}
} else if ((strcmp(argv[i], "--version") == 0) ||
(strcmp(argv[i], "-V") == 0)){
@@ -991,7 +1038,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->port );
printf("# User: %s\n", arguments->user);
printf("# Password: %s\n", arguments->password);
- printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false");
+ printf("# Use metric: %s\n",
+ arguments->use_metric ? "true" : "false");
if (*(arguments->datatype)) {
printf("# Specified data type: ");
for (int i = 0; i < MAX_NUM_DATATYPE; i++)
@@ -1001,11 +1049,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
break;
printf("\n");
}
- printf("# Insertion interval: %"PRId64"\n",
+ printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
- printf("# Number of records per req: %"PRId64"\n",
+ printf("# Number of records per req: %"PRIu64"\n",
arguments->num_of_RPR);
- printf("# Max SQL length: %"PRId64"\n",
+ printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
printf("# Length of Binary: %d\n", arguments->len_of_binary);
printf("# Number of Threads: %d\n", arguments->num_of_threads);
@@ -1033,8 +1081,6 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
static bool getInfoFromJsonFile(char* file);
-//static int generateOneRowDataForStb(SSuperTable* stbInfo);
-//static int getDataIntoMemForStb(SSuperTable* stbInfo);
static void init_rand_data();
static void tmfclose(FILE *fp) {
if (NULL != fp) {
@@ -1053,7 +1099,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
TAOS_RES *res = NULL;
int32_t code = -1;
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 5 /* retry */; i++) {
if (NULL != res) {
taos_free_result(res);
res = NULL;
@@ -1069,7 +1115,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
if (code != 0) {
if (!quiet) {
debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
- errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res));
+ errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res));
}
taos_free_result(res);
//taos_close(taos);
@@ -1086,27 +1132,33 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return 0;
}
-static void getResult(TAOS_RES *res, char* resultFileName) {
+static void appendResultBufToFile(char *resultBuf, char *resultFile)
+{
+ FILE *fp = NULL;
+ if (resultFile[0] != 0) {
+ fp = fopen(resultFile, "at");
+ if (fp == NULL) {
+ errorPrint(
+ "%s() LN%d, failed to open result file: %s, result will not save to file\n",
+ __func__, __LINE__, resultFile);
+ return;
+ }
+ }
+
+ fprintf(fp, "%s", resultBuf);
+ tmfclose(fp);
+}
+
+static void appendResultToFile(TAOS_RES *res, char* resultFile) {
TAOS_ROW row = NULL;
int num_rows = 0;
int num_fields = taos_field_count(res);
TAOS_FIELD *fields = taos_fetch_fields(res);
- FILE *fp = NULL;
- if (resultFileName[0] != 0) {
- fp = fopen(resultFileName, "at");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n",
- __func__, __LINE__, resultFileName);
- }
- }
-
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
- if (fp)
- fclose(fp);
return ;
}
@@ -1116,7 +1168,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
- if (fp) fprintf(fp, "%s", databuf);
+ appendResultBufToFile(databuf, resultFile);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
@@ -1128,22 +1180,42 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
totalLen += len;
}
- if (fp) fprintf(fp, "%s", databuf);
- tmfclose(fp);
+ verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
+ __func__, __LINE__, databuf, resultFile);
+ appendResultBufToFile(databuf, resultFile);
free(databuf);
}
-static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) {
- TAOS_RES *res = taos_query(taos, command);
- if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return;
- }
+static void selectAndGetResult(
+ threadInfo *pThreadInfo, char *command)
+{
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return;
+ }
- getResult(res, resultFileName);
- taos_free_result(res);
+ if ((strlen(pThreadInfo->fp))) {
+ appendResultToFile(res, pThreadInfo->fp);
+ }
+ taos_free_result(res);
+
+ } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ int retCode = postProceSql(
+ g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
+ command,
+ pThreadInfo->fp);
+ if (0 != retCode) {
+ printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ }
+
+ } else {
+ errorPrint("%s() LN%d, unknown query mode: %s\n",
+ __func__, __LINE__, g_queryInfo.queryMode);
+ }
}
static int32_t rand_bool(){
@@ -1188,13 +1260,31 @@ static float rand_float(){
return randfloat[cursor];
}
+#if 0
+static const char charNum[] = "0123456789";
+
+static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose
+static void nonrand_string(char *str, int size)
+{
+ str[0] = 0;
+ if (size > 0) {
+ int n;
+ for (n = 0; n < size; n++) {
+ str[n] = charNum[n % 10];
+ }
+ str[n] = 0;
+ }
+}
+#endif
+
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
+
static void rand_string(char *str, int size) {
str[0] = 0;
if (size > 0) {
//--size;
int n;
- for (n = 0; n < size - 1; n++) {
+ for (n = 0; n < size; n++) {
int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
str[n] = charset[key];
}
@@ -1242,6 +1332,8 @@ static void init_rand_data() {
static int printfInsertMeta() {
SHOW_PARSE_RESULT_START();
+ printf("interface: \033[33m%s\033[0m\n",
+ (g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt");
printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port);
printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
@@ -1249,11 +1341,11 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
- printf("top insert interval: \033[33m%"PRId64"\033[0m\n",
+ printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
- printf("number of records per req: \033[33m%"PRId64"\033[0m\n",
+ printf("number of records per req: \033[33m%"PRIu64"\033[0m\n",
g_args.num_of_RPR);
- printf("max sql length: \033[33m%"PRId64"\033[0m\n",
+ printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
@@ -1315,10 +1407,10 @@ static int printfInsertMeta() {
}
}
- printf(" super table count: \033[33m%"PRId64"\033[0m\n",
+ printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTblCount);
- for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].sTblName);
@@ -1346,13 +1438,14 @@ static int printfInsertMeta() {
printf(" dataSource: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].dataSource);
printf(" insertMode: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].insertMode);
+ (g_Dbs.db[i].superTbls[j].insertMode==TAOSC_IFACE)?"taosc":
+ (g_Dbs.db[i].superTbls[j].insertMode==REST_IFACE)?"rest":"stmt");
if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) {
printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblLimit);
}
- if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) {
- printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n",
+ if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
+ printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblOffset);
}
printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
@@ -1364,11 +1457,11 @@ static int printfInsertMeta() {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
*/
- printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n",
+ printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n",
+ printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
@@ -1376,7 +1469,7 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].disorderRange);
printf(" disorderRatio: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].disorderRatio);
- printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n",
+ printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].timeStampStep);
@@ -1442,8 +1535,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR);
- fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len);
+ fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR);
+ fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
for (int i = 0; i < g_Dbs.dbCount; i++) {
@@ -1500,9 +1593,9 @@ static void printfInsertMetaToFile(FILE* fp) {
}
}
- fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount);
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- fprintf(fp, " super table[%d]:\n", j);
+ fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ fprintf(fp, " super table[%"PRIu64"]:\n", j);
fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName);
@@ -1529,13 +1622,14 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " dataSource: %s\n",
g_Dbs.db[i].superTbls[j].dataSource);
fprintf(fp, " insertMode: %s\n",
- g_Dbs.db[i].superTbls[j].insertMode);
+ (g_Dbs.db[i].superTbls[j].insertMode==TAOSC_IFACE)?"taosc":
+ (g_Dbs.db[i].superTbls[j].insertMode==REST_IFACE)?"rest":"stmt");
fprintf(fp, " insertRows: %"PRId64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
- fprintf(fp, " interlace rows: %"PRId64"\n",
+ fprintf(fp, " interlace rows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- fprintf(fp, " stable insert interval: %"PRId64"\n",
+ fprintf(fp, " stable insert interval: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
/*
@@ -1545,22 +1639,28 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
*/
- fprintf(fp, " interlaceRows: %"PRId64"\n",
+ fprintf(fp, " interlaceRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
- fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
- fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
- fprintf(fp, " maxSqlLen: %"PRId64"\n",
+ fprintf(fp, " disorderRange: %d\n",
+ g_Dbs.db[i].superTbls[j].disorderRange);
+ fprintf(fp, " disorderRatio: %d\n",
+ g_Dbs.db[i].superTbls[j].disorderRatio);
+ fprintf(fp, " maxSqlLen: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
fprintf(fp, " timeStampStep: %"PRId64"\n",
g_Dbs.db[i].superTbls[j].timeStampStep);
fprintf(fp, " startTimestamp: %s\n",
g_Dbs.db[i].superTbls[j].startTimestamp);
- fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat);
- fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile);
- fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile);
+ fprintf(fp, " sampleFormat: %s\n",
+ g_Dbs.db[i].superTbls[j].sampleFormat);
+ fprintf(fp, " sampleFile: %s\n",
+ g_Dbs.db[i].superTbls[j].sampleFile);
+ fprintf(fp, " tagsFile: %s\n",
+ g_Dbs.db[i].superTbls[j].tagsFile);
- fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount);
+ fprintf(fp, " columnCount: %d\n ",
+ g_Dbs.db[i].superTbls[j].columnCount);
for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
if ((0 == strncasecmp(
@@ -1572,7 +1672,8 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs.db[i].superTbls[j].columns[k].dataType,
g_Dbs.db[i].superTbls[j].columns[k].dataLen);
} else {
- fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ fprintf(fp, "column[%d]:%s ",
+ k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
}
}
fprintf(fp, "\n");
@@ -1609,64 +1710,68 @@ static void printfQueryMeta() {
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
printf("\n");
- printf("specified table query info: \n");
- printf("query interval: \033[33m%"PRId64" ms\033[0m\n",
- g_queryInfo.specifiedQueryInfo.queryInterval);
- printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times);
- printf("concurrent: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.specifiedQueryInfo.concurrent);
- printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
+
+ if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
+ printf("specified table query info: \n");
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.sqlCount);
- printf("specified tbl query times:\n");
- printf(" \033[33m%"PRId64"\033[0m\n",
+ if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
+ printf("specified tbl query times:\n");
+ printf(" \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryTimes);
-
- if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.mode);
- printf("interval: \033[33m%"PRId64"\033[0m\n",
+ printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryInterval);
+ printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
+ printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
+ printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
+ printf("keepProgress: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
- }
- for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n",
- i, g_queryInfo.specifiedQueryInfo.sql[i]);
- }
- printf("\n");
- printf("super table query info:\n");
- printf("query interval: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.queryInterval);
- printf("threadCnt: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.childTblCount);
- printf("stable name: \033[33m%s\033[0m\n",
- g_queryInfo.superQueryInfo.sTblName);
- printf("stb query times:\033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.queryTimes);
+ for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.specifiedQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
- if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.mode);
- printf("interval: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeKeepProgress);
- }
-
- printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
+ printf("super table query info:\n");
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.sqlCount);
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n",
- i, g_queryInfo.superQueryInfo.sql[i]);
+
+ if (g_queryInfo.superQueryInfo.sqlCount > 0) {
+ printf("query interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryInterval);
+ printf("threadCnt: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
+ g_queryInfo.superQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n",
+ g_queryInfo.superQueryInfo.sTblName);
+ printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryTimes);
+
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.superQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
}
- printf("\n");
SHOW_PARSE_RESULT_END();
}
@@ -1826,7 +1931,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
formatTimestamp(dbInfos[count]->create_time,
*(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
TSDB_TIME_PRECISION_MILLI);
- dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
@@ -1877,7 +1982,7 @@ static void printfDbInfoForQueryToFile(
fprintf(fp, "================ database[%d] ================\n", index);
fprintf(fp, "name: %s\n", dbInfos->name);
fprintf(fp, "created_time: %s\n", dbInfos->create_time);
- fprintf(fp, "ntables: %d\n", dbInfos->ntables);
+ fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables);
fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
fprintf(fp, "replica: %d\n", dbInfos->replica);
fprintf(fp, "quorum: %d\n", dbInfos->quorum);
@@ -1914,13 +2019,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables
res = taos_query(taos, "show variables;");
- //getResult(res, filename);
+ //appendResultToFile(res, filename);
xDumpResultToFile(filename, res);
// show dnodes
res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res);
- //getResult(res, filename);
+ //appendResultToFile(res, filename);
// show databases
res = taos_query(taos, "show databases;");
@@ -1955,14 +2060,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
free(dbInfos);
}
-static int postProceSql(char* host, uint16_t port, char* sqlstr)
+static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
+ char* sqlstr, char *resultFile)
{
char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
char *url = "/rest/sql";
- struct hostent *server;
- struct sockaddr_in serv_addr;
int bytes, sent, received, req_str_len, resp_len;
char *request_buf;
char response_buf[RESP_BUF_LEN];
@@ -2011,27 +2115,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
ERROR_EXIT("ERROR opening socket");
}
- server = gethostbyname(host);
- if (server == NULL) {
- free(request_buf);
- ERROR_EXIT("ERROR, no such host");
- }
-
- debugPrint("h_name: %s\nh_addretype: %s\nh_length: %d\n",
- server->h_name,
- (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
- server->h_length);
-
- memset(&serv_addr, 0, sizeof(serv_addr));
- serv_addr.sin_family = AF_INET;
- serv_addr.sin_port = htons(rest_port);
-#ifdef WINDOWS
- serv_addr.sin_addr.s_addr = inet_addr(host);
-#else
- memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length);
-#endif
-
- int retConn = connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr));
+ int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr));
debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
if (retConn < 0) {
free(request_buf);
@@ -2113,6 +2197,10 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
response_buf[RESP_BUF_LEN - 1] = '\0';
printf("Response:\n%s\n", response_buf);
+ if (resultFile) {
+ appendResultBufToFile(response_buf, resultFile);
+ }
+
free(request_buf);
#ifdef WINDOWS
closesocket(sockfd);
@@ -2291,7 +2379,7 @@ static int calcRowLen(SSuperTable* superTbls) {
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
- int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) {
+ int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char limitBuf[100] = "\0";
@@ -2302,7 +2390,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* childTblName = *childTblNameOfSuperTbl;
if (offset >= 0) {
- snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"",
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
limit, offset);
}
@@ -2320,8 +2408,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
exit(-1);
}
- int childTblCount = (limit < 0)?10000:limit;
- int count = 0;
+ int64_t childTblCount = (limit < 0)?10000:limit;
+ int64_t count = 0;
if (childTblName == NULL) {
childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (NULL == childTblName) {
@@ -2372,7 +2460,7 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
- -1, -1);
+ -1, 0);
}
static int getSuperTableFromServer(TAOS * taos, char* dbName,
@@ -2708,12 +2796,12 @@ static int createDatabasesAndStables() {
printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
}
- debugPrint("%s() LN%d supertbl count:%"PRId64"\n",
+ debugPrint("%s() LN%d supertbl count:%"PRIu64"\n",
__func__, __LINE__, g_Dbs.db[i].superTblCount);
int validStbCount = 0;
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
g_Dbs.db[i].superTbls[j].sTblName);
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
@@ -2725,7 +2813,7 @@ static int createDatabasesAndStables() {
&g_Dbs.db[i].superTbls[j]);
if (0 != ret) {
- errorPrint("create super table %d failed!\n\n", j);
+ errorPrint("create super table %"PRIu64" failed!\n\n", j);
continue;
}
}
@@ -2753,7 +2841,7 @@ static void* createTable(void *sarg)
threadInfo *pThreadInfo = (threadInfo *)sarg;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t lastPrintTime = taosGetTimestampMs();
int buff_len;
buff_len = BUFFER_SIZE / 8;
@@ -2767,15 +2855,15 @@ static void* createTable(void *sarg)
int len = 0;
int batchNum = 0;
- verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n",
+ verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
__func__, __LINE__,
pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- for (int64_t i = pThreadInfo->start_table_from;
+ for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(buffer, buff_len,
- "create table if not exists %s.%s%"PRId64" %s;",
+ "create table if not exists %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
@@ -2806,7 +2894,7 @@ static void* createTable(void *sarg)
}
len += snprintf(buffer + len,
buff_len - len,
- "if not exists %s.%s%"PRId64" using %s.%s tags %s ",
+ "if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
pThreadInfo->db_name, superTblInfo->childTblPrefix,
i, pThreadInfo->db_name,
superTblInfo->sTblName, tagsValBuf);
@@ -2828,9 +2916,9 @@ static void* createTable(void *sarg)
return NULL;
}
- int64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n",
+ printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
}
@@ -2848,11 +2936,11 @@ static void* createTable(void *sarg)
}
static int startMultiThreadCreateChildTable(
- char* cols, int threads, int64_t startFrom, int64_t ntables,
+ char* cols, int threads, uint64_t startFrom, int64_t ntables,
char* db_name, SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
- threadInfo *infos = malloc(threads * sizeof(threadInfo));
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
printf("malloc failed\n");
@@ -2872,7 +2960,7 @@ static int startMultiThreadCreateChildTable(
int64_t b = 0;
b = ntables % threads;
- for (int64_t i = 0; i < threads; i++) {
+ for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
@@ -2898,7 +2986,7 @@ static int startMultiThreadCreateChildTable(
startFrom = t_info->end_table_to + 1;
t_info->use_metric = true;
t_info->cols = cols;
- t_info->minDelay = INT16_MAX;
+ t_info->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info);
}
@@ -2925,7 +3013,7 @@ static void createChildTables() {
if (g_Dbs.use_metric) {
if (g_Dbs.db[i].superTblCount > 0) {
// with super table
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable)
|| (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
continue;
@@ -2933,10 +3021,10 @@ static void createChildTables() {
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
- int startFrom = 0;
+ uint64_t startFrom = 0;
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
- verbosePrint("%s() LN%d: create %d child tables from %d\n",
+ verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n",
__func__, __LINE__, g_totalChildTables, startFrom);
startMultiThreadCreateChildTable(
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
@@ -3046,10 +3134,12 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
return 0;
}
+#if 0
int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
// TODO
return 0;
}
+#endif
/*
Read 10000 lines at most. If more than 10000 lines, continue to read after using
@@ -3092,7 +3182,7 @@ static int readSampleFromCsvFileToMem(
}
if (readLen > superTblInfo->lenOfOneRow) {
- printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n",
+ printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
(int32_t)readLen, superTblInfo->lenOfOneRow);
continue;
}
@@ -3335,7 +3425,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (threads2 && threads2->type == cJSON_Number) {
g_Dbs.threadCountByCreateTbl = threads2->valueint;
} else if (!threads2) {
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCountByCreateTbl = 1;
} else {
errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
__func__, __LINE__);
@@ -3344,6 +3434,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
+ if (gInsertInterval->valueint <0) {
+ errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
@@ -3355,16 +3450,24 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
- printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
+ printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
- printf(" press Enter key to continue or Ctrl-C to stop.");
- (void)getchar();
+ if (!g_args.answer_yes) {
+ printf(" press Enter key to continue or Ctrl-C to stop.");
+ (void)getchar();
+ }
g_args.interlace_rows = g_args.num_of_RPR;
}
} else if (!interlaceRows) {
@@ -3377,9 +3480,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len");
if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
+ if (maxSqlLen->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.max_sql_len = maxSqlLen->valueint;
} else if (!maxSqlLen) {
- g_args.max_sql_len = 1024000;
+ g_args.max_sql_len = (1024*1024);
} else {
errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
__func__, __LINE__);
@@ -3388,9 +3496,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req");
if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
+ if (numRecPerReq->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) {
+ numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
+ }
g_args.num_of_RPR = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = INT64_MAX;
+ g_args.num_of_RPR = MAX_RECORDS_PER_REQ;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
__func__, __LINE__);
@@ -3550,7 +3665,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (minRows && minRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
} else if (!minRows) {
- g_Dbs.db[i].dbCfg.minRows = -1;
+ g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, minRows not found\n");
goto PARSE_OVER;
@@ -3560,7 +3675,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (maxRows && maxRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
} else if (!maxRows) {
- g_Dbs.db[i].dbCfg.maxRows = -1;
+ g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, maxRows not found\n");
goto PARSE_OVER;
@@ -3705,7 +3820,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count not found\n",
+ errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
@@ -3724,15 +3839,24 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful
+ cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt
if (insertMode && insertMode->type == cJSON_String
&& insertMode->valuestring != NULL) {
- tstrncpy(g_Dbs.db[i].superTbls[j].insertMode,
- insertMode->valuestring, MAX_DB_NAME_SIZE);
+ if (0 == strcasecmp(insertMode->valuestring, "taosc")) {
+ g_Dbs.db[i].superTbls[j].insertMode = TAOSC_IFACE;
+ } else if (0 == strcasecmp(insertMode->valuestring, "rest")) {
+ g_Dbs.db[i].superTbls[j].insertMode = REST_IFACE;
+ } else if (0 == strcasecmp(insertMode->valuestring, "stmt")) {
+ g_Dbs.db[i].superTbls[j].insertMode = STMT_IFACE;
+ } else {
+ errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n",
+ __func__, __LINE__, insertMode->valuestring);
+ goto PARSE_OVER;
+ }
} else if (!insertMode) {
- tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE);
+ g_Dbs.db[i].superTbls[j].insertMode = TAOSC_IFACE;
} else {
- printf("ERROR: failed to read json, insert_mode not found\n");
+ errorPrint("%s", "failed to read json, insert_mode not found\n");
goto PARSE_OVER;
}
@@ -3823,9 +3947,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
- if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
- int32_t len = maxSqlLen->valueint;
+ cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
+ if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) {
+ int32_t len = stbMaxSqlLen->valueint;
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
} else if (len < 5) {
@@ -3835,7 +3959,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxSqlLen) {
g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
} else {
- errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n",
+ errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
@@ -3857,20 +3981,27 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
*/
- cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
- if (interlaceRows && interlaceRows->type == cJSON_Number) {
- g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
+ cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
+ if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) {
+ if (stbInterlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
- printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
+ printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
- printf(" press Enter key to continue or Ctrl-C to stop.");
- (void)getchar();
+ if (!g_args.answer_yes) {
+ printf(" press Enter key to continue or Ctrl-C to stop.");
+ (void)getchar();
+ }
g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR;
}
- } else if (!interlaceRows) {
+ } else if (!stbInterlaceRows) {
g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint(
@@ -3907,6 +4038,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
+ if (insertRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
@@ -3919,8 +4055,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval");
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
+ if (insertInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
} else if (!insertInterval) {
- verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n",
+ verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
@@ -4001,6 +4142,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
+ if (gQueryTimes->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
@@ -4028,10 +4174,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- // super_table_query
+ // specified_table_query
cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query");
if (!specifiedQuery) {
- g_queryInfo.specifiedQueryInfo.concurrent = 0;
+ g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, super_table_query not found\n");
@@ -4047,6 +4193,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
"query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
+ if (specifiedQueryTimes->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ __func__, __LINE__, specifiedQueryTimes->valueint);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
} else if (!specifiedQueryTimes) {
g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
@@ -4058,31 +4210,32 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
- g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
- if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
+ if (concurrent->valueint <= 0) {
+ errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
}
+ g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
} else if (!concurrent) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
}
- cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode");
- if (mode && mode->type == cJSON_String
- && mode->valuestring != NULL) {
- if (0 == strcmp("sync", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
- } else if (0 == strcmp("async", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE;
+ cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode");
+ if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String
+ && specifiedAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, query mode input error\n",
+ errorPrint("%s() LN%d, failed to read json, async mode input error\n",
__func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
}
cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval");
@@ -4152,6 +4305,18 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+ cJSON* resubAfterConsume =
+ cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume");
+ if (resubAfterConsume
+ && resubAfterConsume->type == cJSON_Number) {
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j]
+ = resubAfterConsume->valueint;
+ } else if (!resubAfterConsume) {
+ //printf("failed to read json, subscribe interval no found\n");
+ //goto PARSE_OVER;
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = 1;
+ }
+
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) {
tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
@@ -4165,10 +4330,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
}
- // sub_table_query
+ // super_table_query
cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
if (!superQuery) {
- g_queryInfo.superQueryInfo.threadCnt = 0;
+ g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, sub_table_query not found\n");
@@ -4184,6 +4349,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
+ if (superQueryTimes->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ __func__, __LINE__, superQueryTimes->valueint);
+ goto PARSE_OVER;
+ }
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
@@ -4195,6 +4365,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
+ if (threads->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
} else if (!threads) {
g_queryInfo.superQueryInfo.threadCnt = 1;
@@ -4218,26 +4394,31 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- cJSON* submode = cJSON_GetObjectItem(superQuery, "mode");
- if (submode && submode->type == cJSON_String
- && submode->valuestring != NULL) {
- if (0 == strcmp("sync", submode->valuestring)) {
- g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
- } else if (0 == strcmp("async", submode->valuestring)) {
- g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE;
+ cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode");
+ if (superAsyncMode && superAsyncMode->type == cJSON_String
+ && superAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, query mode input error\n",
+ errorPrint("%s() LN%d, failed to read json, async mode input error\n",
__func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
}
- cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval");
- if (subinterval && subinterval->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint;
- } else if (!subinterval) {
+ cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
+ if (superInterval && superInterval->type == cJSON_Number) {
+ if (superInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
+ } else if (!superInterval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.subscribeInterval = 10000;
@@ -4305,6 +4486,18 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
MAX_QUERY_SQL_LENGTH);
+ cJSON* superResubAfterConsume =
+ cJSON_GetObjectItem(sql, "resubAfterConsume");
+ if (superResubAfterConsume
+ && superResubAfterConsume->type == cJSON_Number) {
+ g_queryInfo.superQueryInfo.resubAfterConsume[j] =
+ superResubAfterConsume->valueint;
+ } else if (!superResubAfterConsume) {
+ //printf("failed to read json, subscribe interval no found\n");
+ //goto PARSE_OVER;
+ g_queryInfo.superQueryInfo.resubAfterConsume[j] = 1;
+ }
+
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
&& result->valuestring != NULL){
@@ -4404,7 +4597,7 @@ static void prepareSampleData() {
static void postFreeResource() {
tmfclose(g_fpOfInsertResult);
for (int i = 0; i < g_Dbs.dbCount; i++) {
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) {
free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL;
@@ -4452,16 +4645,22 @@ static int getRowDataFromSample(
return dataLen;
}
-static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) {
+static int64_t generateStbRowData(
+ SSuperTable* stbInfo,
+ char* recBuf, int64_t timestamp
+ ) {
int64_t dataLen = 0;
char *pstr = recBuf;
int64_t maxLen = MAX_DATA_SIZE;
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "(%" PRId64 ",", timestamp);
for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6))
- || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) {
+ if ((0 == strncasecmp(stbInfo->columns[i].dataType,
+ "BINARY", strlen("BINARY")))
+ || (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "NCHAR", strlen("NCHAR")))) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
errorPrint( "binary or nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
@@ -4474,47 +4673,47 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
tmfree(buf);
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "int", 3)) {
+ "INT", 3)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_int());
+ "%d,", rand_int());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "bigint", 6)) {
+ "BIGINT", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64",", rand_bigint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "float", 5)) {
+ "FLOAT", 5)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f, ", rand_float());
+ "%f,", rand_float());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "double", 6)) {
+ "DOUBLE", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f, ", rand_double());
+ "%f,", rand_double());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "smallint", 8)) {
+ "SMALLINT", 8)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_smallint());
+ "%d,", rand_smallint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "tinyint", strlen("tinyint"))) {
+ "TINYINT", strlen("TINYINT"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_tinyint());
+ "%d,", rand_tinyint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "bool", strlen("bool"))) {
+ "BOOL", strlen("BOOL"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_bool());
+ "%d,", rand_bool());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "timestamp", strlen("timestamp"))) {
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64",", rand_bigint());
} else {
errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
return -1;
}
}
- dataLen -= 2;
+ dataLen -= 1;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
@@ -4523,7 +4722,7 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
}
static int64_t generateData(char *recBuf, char **data_type,
- int num_of_cols, int64_t timestamp, int lenOfBinary) {
+ int64_t timestamp, int lenOfBinary) {
memset(recBuf, 0, MAX_DATA_SIZE);
char *pstr = recBuf;
pstr += sprintf(pstr, "(%" PRId64, timestamp);
@@ -4541,31 +4740,31 @@ static int64_t generateData(char *recBuf, char **data_type,
}
for (int i = 0; i < c; i++) {
- if (strcasecmp(data_type[i % c], "tinyint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_tinyint() );
- } else if (strcasecmp(data_type[i % c], "smallint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_smallint());
- } else if (strcasecmp(data_type[i % c], "int") == 0) {
- pstr += sprintf(pstr, ", %d", rand_int());
- } else if (strcasecmp(data_type[i % c], "bigint") == 0) {
- pstr += sprintf(pstr, ", %" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % c], "float") == 0) {
- pstr += sprintf(pstr, ", %10.4f", rand_float());
- } else if (strcasecmp(data_type[i % c], "double") == 0) {
+ if (strcasecmp(data_type[i % c], "TINYINT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_tinyint() );
+ } else if (strcasecmp(data_type[i % c], "SMALLINT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_smallint());
+ } else if (strcasecmp(data_type[i % c], "INT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_int());
+ } else if (strcasecmp(data_type[i % c], "BIGINT") == 0) {
+ pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
+ } else if (strcasecmp(data_type[i % c], "FLOAT") == 0) {
+ pstr += sprintf(pstr, ",%10.4f", rand_float());
+ } else if (strcasecmp(data_type[i % c], "DOUBLE") == 0) {
double t = rand_double();
- pstr += sprintf(pstr, ", %20.8f", t);
- } else if (strcasecmp(data_type[i % c], "bool") == 0) {
- bool b = taosRandom() & 1;
- pstr += sprintf(pstr, ", %s", b ? "true" : "false");
- } else if (strcasecmp(data_type[i % c], "binary") == 0) {
+ pstr += sprintf(pstr, ",%20.8f", t);
+ } else if (strcasecmp(data_type[i % c], "BOOL") == 0) {
+ bool b = rand_bool() & 1;
+ pstr += sprintf(pstr, ",%s", b ? "true" : "false");
+ } else if (strcasecmp(data_type[i % c], "BINARY") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ", \"%s\"", s);
+ pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
- } else if (strcasecmp(data_type[i % c], "nchar") == 0) {
+ } else if (strcasecmp(data_type[i % c], "NCHAR") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ", \"%s\"", s);
+ pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
}
@@ -4588,7 +4787,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
sampleDataBuf = calloc(
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n",
+ errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
strerror(errno));
@@ -4609,44 +4808,59 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
return 0;
}
-static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
+static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
{
int affectedRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
- __func__, __LINE__, buffer);
+ __func__, __LINE__, pThreadInfo->buffer);
if (superTblInfo) {
- if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) {
- affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
- } else {
- if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) {
+ if (superTblInfo->insertMode == TAOSC_IFACE) {
+ affectedRows = queryDbExec(
+ pThreadInfo->taos,
+ pThreadInfo->buffer, INSERT_TYPE, false);
+ } else if (superTblInfo->insertMode == REST_IFACE) {
+ if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port,
+ pThreadInfo->buffer, NULL /* not set result file */)) {
affectedRows = -1;
printf("========restful return fail, threadID[%d]\n",
pThreadInfo->threadID);
} else {
affectedRows = k;
}
+ } else if (superTblInfo->insertMode == STMT_IFACE) {
+ debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt);
+ if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
+ errorPrint("%s() LN%d, failied to execute insert statement\n",
+ __func__, __LINE__);
+ exit(-1);
+ }
+
+ affectedRows = k;
+ } else {
+ errorPrint("%s() LN%d: unknown insert mode: %d\n",
+ __func__, __LINE__, superTblInfo->insertMode);
+ affectedRows = 0;
}
} else {
- affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
+ affectedRows = queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, INSERT_TYPE, false);
}
return affectedRows;
}
-static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq)
+static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) {
- if ((superTblInfo->childTblOffset >= 0)
- && (superTblInfo->childTblLimit > 0)) {
+ if (superTblInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
superTblInfo->childTblName +
(tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
- verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, tableSeq);
@@ -4654,29 +4868,21 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableS
superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"",
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
g_args.tb_prefix, tableSeq);
}
}
-static int64_t generateDataTail(
- SSuperTable* superTblInfo,
- int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
- int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) {
- int64_t len = 0;
- int ncols_per_record = 1; // count first col ts
+static int64_t generateDataTailWithoutStb(
+ uint64_t batch, char* buffer,
+ int64_t remainderBufLen, int64_t insertRows,
+ uint64_t startFrom, int64_t startTime,
+ /* int64_t *pSamplePos, */int64_t *dataLen) {
+ uint64_t len = 0;
char *pstr = buffer;
- if (superTblInfo == NULL) {
- int datatypeSeq = 0;
- while(g_args.datatype[datatypeSeq]) {
- datatypeSeq ++;
- ncols_per_record ++;
- }
- }
-
- verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch);
+ verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
int64_t k = 0;
for (k = 0; k < batch;) {
@@ -4685,75 +4891,36 @@ static int64_t generateDataTail(
int64_t retLen = 0;
- if (superTblInfo) {
- if (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample"))) {
- retLen = getRowDataFromSample(
- data,
- remainderBufLen,
- startTime + superTblInfo->timeStampStep * k,
- superTblInfo,
- pSamplePos);
- } else if (0 == strncasecmp(superTblInfo->dataSource,
- "rand", strlen("rand"))) {
+ char **data_type = g_args.datatype;
+ int lenOfBinary = g_args.len_of_binary;
- int64_t randTail = superTblInfo->timeStampStep * k;
- if (superTblInfo->disorderRatio > 0) {
- int rand_num = taosRandom() % 100;
- if(rand_num < superTblInfo->disorderRatio) {
- randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1);
- debugPrint("rand data generated, back %"PRId64"\n", randTail);
- }
- }
+ int64_t randTail = DEFAULT_TIMESTAMP_STEP * k;
- int64_t d = startTime
- + randTail;
- retLen = generateRowData(
- data,
- d,
- superTblInfo);
- }
-
- if (retLen > remainderBufLen) {
- break;
- }
-
- pstr += snprintf(pstr , retLen + 1, "%s", data);
- k++;
- len += retLen;
- remainderBufLen -= retLen;
- } else {
- char **data_type = g_args.datatype;
- int lenOfBinary = g_args.len_of_binary;
-
- int64_t randTail = DEFAULT_TIMESTAMP_STEP * k;
-
- if (g_args.disorderRatio != 0) {
+ if (g_args.disorderRatio != 0) {
int rand_num = taosRandom() % 100;
if (rand_num < g_args.disorderRatio) {
- randTail = (randTail + (taosRandom() % g_args.disorderRange + 1)) * (-1);
+ randTail = (randTail +
+ (taosRandom() % g_args.disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
- } else {
+ } else {
randTail = DEFAULT_TIMESTAMP_STEP * k;
- }
-
- retLen = generateData(data, data_type,
- ncols_per_record,
- startTime + randTail,
- lenOfBinary);
-
- if (len > remainderBufLen)
- break;
-
- pstr += sprintf(pstr, "%s", data);
- k++;
- len += retLen;
- remainderBufLen -= retLen;
}
- verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n",
+ retLen = generateData(data, data_type,
+ startTime + randTail,
+ lenOfBinary);
+
+ if (len > remainderBufLen)
+ break;
+
+ pstr += sprintf(pstr, "%s", data);
+ k++;
+ len += retLen;
+ remainderBufLen -= retLen;
+
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
startFrom ++;
@@ -4767,17 +4934,107 @@ static int64_t generateDataTail(
return k;
}
-static int generateSQLHead(char *tableName, int32_t tableSeq,
- threadInfo* pThreadInfo, SSuperTable* superTblInfo,
+static int64_t generateStbDataTail(
+ SSuperTable* superTblInfo,
+ uint64_t batch, char* buffer,
+ int64_t remainderBufLen, int64_t insertRows,
+ uint64_t startFrom, int64_t startTime,
+ int64_t *pSamplePos, int64_t *dataLen) {
+ uint64_t len = 0;
+
+ char *pstr = buffer;
+
+ verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
+
+ int64_t k = 0;
+ for (k = 0; k < batch;) {
+ char data[MAX_DATA_SIZE];
+ memset(data, 0, MAX_DATA_SIZE);
+
+ int64_t retLen = 0;
+
+ if (0 == strncasecmp(superTblInfo->dataSource,
+ "sample", strlen("sample"))) {
+ retLen = getRowDataFromSample(
+ data,
+ remainderBufLen,
+ startTime + superTblInfo->timeStampStep * k,
+ superTblInfo,
+ pSamplePos);
+ } else if (0 == strncasecmp(superTblInfo->dataSource,
+ "rand", strlen("rand"))) {
+ int64_t randTail = superTblInfo->timeStampStep * k;
+ if (superTblInfo->disorderRatio > 0) {
+ int rand_num = taosRandom() % 100;
+ if(rand_num < superTblInfo->disorderRatio) {
+ randTail = (randTail +
+ (taosRandom() % superTblInfo->disorderRange + 1)) * (-1);
+ debugPrint("rand data generated, back %"PRId64"\n", randTail);
+ }
+ }
+
+ int64_t d = startTime + randTail;
+ retLen = generateStbRowData(superTblInfo, data, d);
+ }
+
+ if (retLen > remainderBufLen) {
+ break;
+ }
+
+ pstr += snprintf(pstr , retLen + 1, "%s", data);
+ k++;
+ len += retLen;
+ remainderBufLen -= retLen;
+
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
+ __func__, __LINE__, len, k, buffer);
+
+ startFrom ++;
+
+ if (startFrom >= insertRows) {
+ break;
+ }
+ }
+
+ *dataLen = len;
+ return k;
+}
+
+
+static int generateSQLHeadWithoutStb(char *tableName,
+ char *dbName,
char *buffer, int remainderBufLen)
{
int len;
-#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
char headBuf[HEAD_BUFF_LEN];
- if (superTblInfo) {
- if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ dbName,
+ tableName);
+
+ if (len > remainderBufLen)
+ return -1;
+
+ tstrncpy(buffer, headBuf, len + 1);
+
+ return len;
+}
+
+static int generateStbSQLHead(
+ SSuperTable* superTblInfo,
+ char *tableName, int32_t tableSeq,
+ char *dbName,
+ char *buffer, int remainderBufLen)
+{
+ int len;
+
+ char headBuf[HEAD_BUFF_LEN];
+
+ if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
char* tagsValBuf = NULL;
if (0 == superTblInfo->tagSource) {
tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq);
@@ -4796,9 +5053,9 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
headBuf,
HEAD_BUFF_LEN,
"%s.%s using %s.%s tags %s values",
- pThreadInfo->db_name,
+ dbName,
tableName,
- pThreadInfo->db_name,
+ dbName,
superTblInfo->sTblName,
tagsValBuf);
tmfree(tagsValBuf);
@@ -4807,22 +5064,14 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
headBuf,
HEAD_BUFF_LEN,
"%s.%s values",
- pThreadInfo->db_name,
+ dbName,
tableName);
} else {
len = snprintf(
headBuf,
HEAD_BUFF_LEN,
"%s.%s values",
- pThreadInfo->db_name,
- tableName);
- }
- } else {
- len = snprintf(
- headBuf,
- HEAD_BUFF_LEN,
- "%s.%s values",
- pThreadInfo->db_name,
+ dbName,
tableName);
}
@@ -4835,25 +5084,27 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
}
static int64_t generateInterlaceDataBuffer(
- char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes,
- int64_t tableSeq,
+ char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
+ uint64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
int64_t startTime,
- int64_t *pRemainderBufLen)
+ uint64_t *pRemainderBufLen)
{
assert(buffer);
char *pstr = buffer;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo,
- superTblInfo, pstr, *pRemainderBufLen);
+ int headLen = generateStbSQLHead(
+ superTblInfo,
+ tableName, tableSeq, pThreadInfo->db_name,
+ pstr, *pRemainderBufLen);
if (headLen <= 0) {
return 0;
}
// generate data buffer
- verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n",
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n",
pThreadInfo->threadID, __func__, __LINE__, i, buffer);
pstr += headLen;
@@ -4861,29 +5112,34 @@ static int64_t generateInterlaceDataBuffer(
int64_t dataLen = 0;
- verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
i, batchPerTblTimes, batchPerTbl);
+ int64_t k;
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
startTime = taosGetTimestamp(pThreadInfo->time_precision);
}
- } else {
- startTime = 1500000000000;
- }
- int64_t k = generateDataTail(
- superTblInfo,
- batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
- startTime,
- &(pThreadInfo->samplePos), &dataLen);
+ k = generateStbDataTail(
+ superTblInfo,
+ batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos), &dataLen);
+ } else {
+ startTime = 1500000000000;
+ k = generateDataTailWithoutStb(
+ batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
+ startTime,
+ /* &(pThreadInfo->samplePos), */&dataLen);
+ }
if (k == batchPerTbl) {
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
- debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n",
+ debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
@@ -4893,35 +5149,33 @@ static int64_t generateInterlaceDataBuffer(
return k;
}
-static int generateProgressiveDataBuffer(
+static int64_t generateProgressiveDataBuffer(
char *tableName,
int64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
- int64_t startFrom, int64_t startTime, int64_t *pSamplePos,
+ uint64_t startFrom, int64_t startTime, int64_t *pSamplePos,
int64_t *pRemainderBufLen)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int ncols_per_record = 1; // count first col ts
-
- if (superTblInfo == NULL) {
- int datatypeSeq = 0;
- while(g_args.datatype[datatypeSeq]) {
- datatypeSeq ++;
- ncols_per_record ++;
- }
- }
-
assert(buffer != NULL);
char *pstr = buffer;
- int64_t k = 0;
-
memset(buffer, 0, *pRemainderBufLen);
- int64_t headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo,
+ int64_t headLen;
+
+ if (superTblInfo) {
+ headLen = generateStbSQLHead(
+ superTblInfo,
+ tableName, tableSeq, pThreadInfo->db_name,
buffer, *pRemainderBufLen);
+ } else {
+ headLen = generateSQLHeadWithoutStb(
+ tableName, pThreadInfo->db_name,
+ buffer, *pRemainderBufLen);
+ }
if (headLen <= 0) {
return 0;
@@ -4930,20 +5184,40 @@ static int generateProgressiveDataBuffer(
*pRemainderBufLen -= headLen;
int64_t dataLen;
- k = generateDataTail(superTblInfo,
- g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom,
+ int64_t k;
+
+ if (superTblInfo) {
+ k = generateStbDataTail(superTblInfo,
+ g_args.num_of_RPR, pstr, *pRemainderBufLen,
+ insertRows, startFrom,
startTime,
pSamplePos, &dataLen);
+ } else {
+ k = generateDataTailWithoutStb(
+ g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom,
+ startTime,
+ /*pSamplePos, */&dataLen);
+ }
return k;
}
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0)));
+}
+
+// sync write interlace data
static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__);
int64_t insertRows;
- int64_t interlaceRows;
+ uint64_t interlaceRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4967,21 +5241,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (interlaceRows > g_args.num_of_RPR)
interlaceRows = g_args.num_of_RPR;
- int insertMode;
+ int progOrInterlace;
if (interlaceRows > 0) {
- insertMode = INTERLACE_INSERT_MODE;
+ progOrInterlace= INTERLACE_INSERT_MODE;
} else {
- insertMode = PROGRESSIVE_INSERT_MODE;
+ progOrInterlace = PROGRESSIVE_INSERT_MODE;
}
- // TODO: prompt tbl count multple interlace rows and batch
- //
-
- int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
- char* buffer = calloc(maxSqlLen, 1);
- if (NULL == buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n",
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ pThreadInfo->buffer = calloc(maxSqlLen, 1);
+ if (NULL == pThreadInfo->buffer) {
+ errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -4991,30 +5262,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0;
- int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
+ int64_t nTimeStampStep =
+ superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
- int insert_interval =
+ uint64_t insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
- int64_t st = 0;
- int64_t et = 0xffffffff;
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
- int64_t tableSeq = pThreadInfo->start_table_from;
+ uint64_t tableSeq = pThreadInfo->start_table_from;
- debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n",
- pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from,
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
int64_t startTime = pThreadInfo->start_time;
- assert(pThreadInfo->ntables > 0);
+ uint64_t batchPerTbl = interlaceRows;
+ uint64_t batchPerTblTimes;
- int64_t batchPerTbl = interlaceRows;
-
- int64_t batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
g_args.num_of_RPR / interlaceRows;
@@ -5022,9 +5293,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
batchPerTblTimes = 1;
}
- int64_t generatedRecPerTbl = 0;
+ uint64_t generatedRecPerTbl = 0;
bool flagSleep = true;
- int64_t sleepTimeTotal = 0;
+ uint64_t sleepTimeTotal = 0;
char *strInsertInto = "insert into ";
int nInsertBufLen = strlen(strInsertInto);
@@ -5035,27 +5306,27 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
flagSleep = false;
}
// generate data
- memset(buffer, 0, maxSqlLen);
- int64_t remainderBufLen = maxSqlLen;
+ memset(pThreadInfo->buffer, 0, maxSqlLen);
+ uint64_t remainderBufLen = maxSqlLen;
- char *pstr = buffer;
+ char *pstr = pThreadInfo->buffer;
int len = snprintf(pstr, nInsertBufLen + 1, "%s", strInsertInto);
pstr += len;
remainderBufLen -= len;
- int64_t recOfBatch = 0;
+ uint64_t recOfBatch = 0;
- for (int64_t i = 0; i < batchPerTblTimes; i ++) {
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
errorPrint("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
- free(buffer);
+ free(pThreadInfo->buffer);
return NULL;
}
- int64_t oldRemainderLen = remainderBufLen;
+ uint64_t oldRemainderLen = remainderBufLen;
int64_t generated = generateInterlaceDataBuffer(
tableName, batchPerTbl, i, batchPerTblTimes,
tableSeq,
@@ -5064,10 +5335,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTime,
&remainderBufLen);
- if (generated < 0) {
- debugPrint("[%d] %s() LN%d, generated data is %"PRId64"\n",
+ debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
- goto free_and_statistics_interlace;
+ if (generated < 0) {
+ errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace;
} else if (generated == 0) {
break;
}
@@ -5081,7 +5354,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->threadID, __func__, __LINE__,
batchPerTbl, recOfBatch);
- if (insertMode == INTERLACE_INSERT_MODE) {
+ if (progOrInterlace == INTERLACE_INSERT_MODE) {
if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
// turn to first table
tableSeq = pThreadInfo->start_table_from;
@@ -5111,41 +5384,48 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
break;
}
- verbosePrint("[%d] %s() LN%d recOfBatch=%"PRId64" totalInsertRows=%"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
pThreadInfo->totalInsertRows);
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
- pThreadInfo->threadID, __func__, __LINE__, buffer);
+ pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer);
startTs = taosGetTimestampMs();
- int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch);
+ if (recOfBatch == 0) {
+ errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch);
+ errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n");
+ goto free_of_interlace;
+ }
+ int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
- performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n",
__func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
- pThreadInfo->threadID,
- __func__, __LINE__, affectedRows);
- if ((affectedRows < 0) || (recOfBatch != affectedRows)) {
- errorPrint("[%d] %s() LN%d execInsert insert %"PRId64", affected rows: %"PRId64"\n%s\n",
+ if (recOfBatch != affectedRows) {
+ errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
- recOfBatch, affectedRows, buffer);
- goto free_and_statistics_interlace;
+ recOfBatch, affectedRows, pThreadInfo->buffer);
+ goto free_of_interlace;
}
pThreadInfo->totalAffectedRows += affectedRows;
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
pThreadInfo->threadID,
pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows);
@@ -5156,8 +5436,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
et = taosGetTimestampMs();
if (insert_interval > (et - st) ) {
- int sleepTime = insert_interval - (et -st);
- performancePrint("%s() LN%d sleep: %d ms for insert interval\n",
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
__func__, __LINE__, sleepTime);
taosMsleep(sleepTime); // ms
sleepTimeTotal += insert_interval;
@@ -5165,44 +5445,37 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
-free_and_statistics_interlace:
- tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+free_of_interlace:
+ tmfree(pThreadInfo->buffer);
+ printStatPerThread(pThreadInfo);
return NULL;
}
-// sync insertion
-/*
- 1 thread: 100 tables * 2000 rows/s
- 1 thread: 10 tables * 20000 rows/s
- 6 thread: 300 tables * 2000 rows/s
-
- 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s
-*/
+// sync insertion progressive data
static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ int64_t timeStampStep =
+ superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
+ int64_t insertRows =
+ (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
+ verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
+ __func__, __LINE__, insertRows);
- char* buffer = calloc(maxSqlLen, 1);
- if (NULL == buffer) {
- errorPrint( "Failed to alloc %d Bytes, reason:%s\n",
+ pThreadInfo->buffer = calloc(maxSqlLen, 1);
+ if (NULL == pThreadInfo->buffer) {
+ errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
maxSqlLen,
strerror(errno));
return NULL;
}
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
- int64_t timeStampStep =
- superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
/* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
@@ -5214,21 +5487,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
- for (int64_t tableSeq =
- pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
- tableSeq ++) {
+ for (uint64_t tableSeq = pThreadInfo->start_table_from;
+ tableSeq <= pThreadInfo->end_table_to;
+ tableSeq ++) {
int64_t start_time = pThreadInfo->start_time;
- int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
- verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows);
-
- for (int64_t i = 0; i < insertRows;) {
- /*
- if (insert_interval) {
- st = taosGetTimestampMs();
- }
- */
-
+ for (uint64_t i = 0; i < insertRows;) {
char tableName[TSDB_TABLE_NAME_LEN];
getTableName(tableName, pThreadInfo, tableSeq);
verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n",
@@ -5236,7 +5500,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->threadID, tableSeq, tableName);
int64_t remainderBufLen = maxSqlLen;
- char *pstr = buffer;
+ char *pstr = pThreadInfo->buffer;
int nInsertBufLen = strlen("insert into ");
int len = snprintf(pstr, nInsertBufLen + 1, "%s", "insert into ");
@@ -5244,7 +5508,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int generated = generateProgressiveDataBuffer(
+ int64_t generated = generateProgressiveDataBuffer(
tableName, tableSeq, pThreadInfo, pstr, insertRows,
i, start_time,
&(pThreadInfo->samplePos),
@@ -5252,27 +5516,33 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (generated > 0)
i += generated;
else
- goto free_and_statistics_2;
+ goto free_of_progressive;
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
startTs = taosGetTimestampMs();
- int64_t affectedRows = execInsert(pThreadInfo, buffer, generated);
+ int64_t affectedRows = execInsert(pThreadInfo, generated);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
+ uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
__func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- if (affectedRows < 0)
- goto free_and_statistics_2;
+ if (affectedRows < 0) {
+ errorPrint("%s() LN%d, affected rows: %"PRId64"\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_progressive;
+ }
pThreadInfo->totalAffectedRows += affectedRows;
@@ -5287,37 +5557,20 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (i >= insertRows)
break;
-/*
- if (insert_interval) {
- et = taosGetTimestampMs();
-
- if (insert_interval > ((et - st)) ) {
- int sleep_time = insert_interval - (et -st);
- performancePrint("%s() LN%d sleep: %d ms for insert interval\n",
- __func__, __LINE__, sleep_time);
- taosMsleep(sleep_time); // ms
- }
- }
- */
} // num_of_DPT
- if (g_args.verbose_print) {
- if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo &&
+ if ((g_args.verbose_print) &&
+ (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) &&
(0 == strncasecmp(
superTblInfo->dataSource, "sample", strlen("sample")))) {
verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
__func__, __LINE__, pThreadInfo->samplePos);
- }
}
} // tableSeq
-free_and_statistics_2:
- tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+free_of_progressive:
+ tmfree(pThreadInfo->buffer);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5383,10 +5636,12 @@ static void callBack(void *param, TAOS_RES *res, int code) {
int rand_num = taosRandom() % 100;
if (0 != pThreadInfo->superTblInfo->disorderRatio
&& rand_num < pThreadInfo->superTblInfo->disorderRatio) {
- int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
- generateRowData(data, d, pThreadInfo->superTblInfo);
+ int64_t d = pThreadInfo->lastTs
+ - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
+ generateStbRowData(pThreadInfo->superTblInfo, data, d);
} else {
- generateRowData(data, pThreadInfo->lastTs += 1000, pThreadInfo->superTblInfo);
+ generateStbRowData(pThreadInfo->superTblInfo,
+ data, pThreadInfo->lastTs += 1000);
}
pstr += sprintf(pstr, "%s", data);
pThreadInfo->counter++;
@@ -5425,18 +5680,35 @@ static void *asyncWrite(void *sarg) {
return NULL;
}
+static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr)
+{
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
+ struct hostent *server = gethostbyname(host);
+ if ((server == NULL) || (server->h_addr == NULL)) {
+ errorPrint("%s", "ERROR, no such host");
+ return -1;
+ }
+
+ debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n",
+ server->h_name,
+ server->h_addr,
+ (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
+ server->h_length);
+
+ memset(serv_addr, 0, sizeof(struct sockaddr_in));
+ serv_addr->sin_family = AF_INET;
+ serv_addr->sin_port = htons(rest_port);
+#ifdef WINDOWS
+ serv_addr->sin_addr.s_addr = inet_addr(host);
+#else
+ memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length);
+#endif
+ return 0;
+}
+
static void startMultiThreadInsertData(int threads, char* db_name,
char* precision,SSuperTable* superTblInfo) {
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
- assert(pids != NULL);
-
- threadInfo *infos = malloc(threads * sizeof(threadInfo));
- assert(infos != NULL);
-
- memset(pids, 0, threads * sizeof(pthread_t));
- memset(infos, 0, threads * sizeof(threadInfo));
-
//TAOS* taos;
//if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
// taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
@@ -5497,28 +5769,28 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
- TAOS* taos = taos_connect(
+ TAOS* taos0 = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
- if (NULL == taos) {
+ if (NULL == taos0) {
errorPrint("%s() LN%d, connect to server fail , reason: %s\n",
__func__, __LINE__, taos_errstr(NULL));
exit(-1);
}
- int ntables = 0;
- int startFrom;
+ int64_t ntables = 0;
+ uint64_t startFrom;
if (superTblInfo) {
- int limit, offset;
+ int64_t limit;
+ uint64_t offset;
if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) &&
((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
- if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
- && (superTblInfo->childTblOffset >= 0)) {
+ if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
if ((superTblInfo->childTblLimit < 0)
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
> (superTblInfo->childTblCount))) {
@@ -5559,13 +5831,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
limit * TSDB_TABLE_NAME_LEN);
if (superTblInfo->childTblName == NULL) {
errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
- taos_close(taos);
+ taos_close(taos0);
exit(-1);
}
int64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
- taos,
+ taos0,
db_name, superTblInfo->sTblName,
&superTblInfo->childTblName, &childTblCount,
limit,
@@ -5575,19 +5847,35 @@ static void startMultiThreadInsertData(int threads, char* db_name,
startFrom = 0;
}
- taos_close(taos);
+ taos_close(taos0);
- int a = ntables / threads;
+ int64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ int64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
+ if ((superTblInfo)
+ && (superTblInfo->insertMode == REST_IFACE)) {
+ if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) {
+ exit(-1);
+ }
+ }
+
+ pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ assert(pids != NULL);
+
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ assert(infos != NULL);
+
+ memset(pids, 0, threads * sizeof(pthread_t));
+ memset(infos, 0, threads * sizeof(threadInfo));
+
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
@@ -5596,20 +5884,35 @@ static void startMultiThreadInsertData(int threads, char* db_name,
t_info->superTblInfo = superTblInfo;
t_info->start_time = start_time;
- t_info->minDelay = INT16_MAX;
+ t_info->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) ||
- (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
+ (superTblInfo->insertMode != REST_IFACE)) {
//t_info->taos = taos;
t_info->taos = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == t_info->taos) {
errorPrint(
- "connect to server fail from insert sub thread, reason: %s\n",
+ "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n",
+ __func__, __LINE__,
taos_errstr(NULL));
+ free(infos);
exit(-1);
}
+
+ if ((superTblInfo) && (superTblInfo->insertMode == STMT_IFACE)) {
+ t_info->stmt = taos_stmt_init(t_info->taos);
+ if (NULL == t_info->stmt) {
+ errorPrint(
+ "%s() LN%d, failed init stmt, reason: %s\n",
+ __func__, __LINE__,
+ taos_errstr(NULL));
+ free(pids);
+ free(infos);
+ exit(-1);
+ }
+ }
} else {
t_info->taos = NULL;
}
@@ -5628,10 +5931,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
*/
tsem_init(&(t_info->lock_sem), 0, 0);
- if (SYNC == g_Dbs.queryMode) {
- pthread_create(pids + i, NULL, syncWrite, t_info);
- } else {
+ if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, t_info);
+ } else {
+ pthread_create(pids + i, NULL, syncWrite, t_info);
}
}
@@ -5639,19 +5942,23 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pthread_join(pids[i], NULL);
}
- int64_t totalDelay = 0;
- int64_t maxDelay = 0;
- int64_t minDelay = INT16_MAX;
- int64_t cntDelay = 1;
+ uint64_t totalDelay = 0;
+ uint64_t maxDelay = 0;
+ uint64_t minDelay = UINT64_MAX;
+ uint64_t cntDelay = 1;
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
tsem_destroy(&(t_info->lock_sem));
+
+ if (t_info->stmt) {
+ taos_stmt_close(t_info->stmt);
+ }
taos_close(t_info->taos);
- debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n",
+ debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__,
t_info->threadID, t_info->totalInsertRows,
t_info->totalAffectedRows);
@@ -5677,35 +5984,42 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t t = end - start;
if (superTblInfo) {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
t / 1000.0, superTblInfo->totalInsertRows,
superTblInfo->totalAffectedRows,
threads, db_name, superTblInfo->sTblName,
(double)superTblInfo->totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
+
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
t / 1000.0, superTblInfo->totalInsertRows,
superTblInfo->totalAffectedRows,
threads, db_name, superTblInfo->sTblName,
(double)superTblInfo->totalInsertRows / (t / 1000.0));
+ }
} else {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
t / 1000.0, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
(double)g_args.totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
t * 1000.0, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
(double)g_args.totalInsertRows / (t / 1000.0));
+ }
}
- printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
- fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
+ }
//taos_close(taos);
@@ -5726,7 +6040,7 @@ static void *readTable(void *sarg) {
return NULL;
}
- int num_of_DPT;
+ int64_t num_of_DPT;
/* if (rinfo->superTblInfo) {
num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
@@ -5734,22 +6048,22 @@ static void *readTable(void *sarg) {
num_of_DPT = g_args.num_of_DPT;
// }
- int num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int totalData = num_of_DPT * num_of_tables;
+ int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
+ int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
if (!do_aggreFunc) {
printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
}
- printf("%d records:\n", totalData);
+ printf("%"PRId64" records:\n", totalData);
fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
for (int j = 0; j < n; j++) {
double totalT = 0;
- int count = 0;
- for (int i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%d where ts>= %" PRId64,
+ uint64_t count = 0;
+ for (int64_t i = 0; i < num_of_tables; i++) {
+ sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64,
aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
@@ -5774,7 +6088,7 @@ static void *readTable(void *sarg) {
taos_free_result(pSql);
}
- fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n",
+ fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
(double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT * 1000);
@@ -5796,31 +6110,31 @@ static void *readMetric(void *sarg) {
return NULL;
}
- int num_of_DPT = rinfo->superTblInfo->insertRows;
- int num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int totalData = num_of_DPT * num_of_tables;
+ int64_t num_of_DPT = rinfo->superTblInfo->insertRows;
+ int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
+ int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
if (!do_aggreFunc) {
printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
}
- printf("%d records:\n", totalData);
- fprintf(fp, "Querying On %d records:\n", totalData);
+ printf("%"PRId64" records:\n", totalData);
+ fprintf(fp, "Querying On %"PRId64" records:\n", totalData);
for (int j = 0; j < n; j++) {
- char condition[BUFFER_SIZE - 30] = "\0";
+ char condition[COND_BUF_LEN] = "\0";
char tempS[64] = "\0";
- int m = 10 < num_of_tables ? 10 : num_of_tables;
+ int64_t m = 10 < num_of_tables ? 10 : num_of_tables;
- for (int i = 1; i <= m; i++) {
+ for (int64_t i = 1; i <= m; i++) {
if (i == 1) {
- sprintf(tempS, "t1 = %d", i);
+ sprintf(tempS, "t1 = %"PRId64"", i);
} else {
- sprintf(tempS, " or t1 = %d ", i);
+ sprintf(tempS, " or t1 = %"PRId64" ", i);
}
- strcat(condition, tempS);
+ strncat(condition, tempS, COND_BUF_LEN - 1);
sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
@@ -5875,7 +6189,8 @@ static int insertTestProcess() {
return -1;
}
- printfInsertMetaToFile(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ printfInsertMetaToFile(g_fpOfInsertResult);
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
@@ -5886,7 +6201,8 @@ static int insertTestProcess() {
// create database and super tables
if(createDatabasesAndStables() != 0) {
- fclose(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
return -1;
}
@@ -5902,11 +6218,13 @@ static int insertTestProcess() {
end = taosGetTimestampMs();
if (g_totalChildTables > 0) {
- printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
+ fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
- fprintf(g_fpOfInsertResult,
- "Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ }
}
taosMsleep(1000);
@@ -5915,7 +6233,7 @@ static int insertTestProcess() {
for (int i = 0; i < g_Dbs.dbCount; i++) {
if (g_Dbs.use_metric) {
if (g_Dbs.db[i].superTblCount > 0) {
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j];
@@ -5979,14 +6297,20 @@ static void *specifiedTableQuery(void *sarg) {
return NULL;
}
- int64_t st = 0;
- int64_t et = 0;
+ uint64_t st = 0;
+ uint64_t et = 0;
- int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+ uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
+ uint64_t totalQueried = 0;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->fp, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
while(queryTimes --) {
if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
@@ -5996,44 +6320,22 @@ static void *specifiedTableQuery(void *sarg) {
st = taosGetTimestampMs();
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- int64_t t1 = taosGetTimestampMs();
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- }
- selectAndGetResult(pThreadInfo->taos,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile);
- int64_t t2 = taosGetTimestampMs();
- printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000.0);
- } else {
- int64_t t1 = taosGetTimestampMs();
- int retCode = postProceSql(g_queryInfo.host,
- g_queryInfo.port,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
- if (0 != retCode) {
- printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
- return NULL;
- }
- int64_t t2 = taosGetTimestampMs();
- printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000.0);
+ selectAndGetResult(pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
+
+ et = taosGetTimestampMs();
+ printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n",
+ taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0);
- }
totalQueried ++;
g_queryInfo.specifiedQueryInfo.totalQueried ++;
- et = taosGetTimestampMs();
-
- int64_t currentPrintTime = taosGetTimestampMs();
- int64_t endTs = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
__func__, __LINE__, endTs, startTs);
- printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
@@ -6059,14 +6361,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
tstrncpy(outSql, inSql, pos - inSql + 1);
//printf("1: %s\n", outSql);
- strcat(outSql, subTblName);
+ strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
//printf("2: %s\n", outSql);
- strcat(outSql, pos+strlen(sourceString));
+ strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
//printf("3: %s\n", outSql);
}
static void *superTableQuery(void *sarg) {
- char sqlstr[1024];
+ char sqlstr[MAX_QUERY_SQL_LENGTH];
threadInfo *pThreadInfo = (threadInfo *)sarg;
if (pThreadInfo->taos == NULL) {
@@ -6085,14 +6387,14 @@ static void *superTableQuery(void *sarg) {
}
}
- int64_t st = 0;
- int64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
+ uint64_t st = 0;
+ uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
- int queryTimes = g_queryInfo.superQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ uint64_t totalQueried = 0;
+ uint64_t startTs = taosGetTimestampMs();
- int64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t lastPrintTime = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.superQueryInfo.queryInterval
&& (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
@@ -6105,13 +6407,12 @@ static void *superTableQuery(void *sarg) {
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
memset(sqlstr,0,sizeof(sqlstr));
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[j][0] != 0) {
- sprintf(tmpFile, "%s-%d",
+ sprintf(pThreadInfo->fp, "%s-%d",
g_queryInfo.superQueryInfo.result[j],
pThreadInfo->threadID);
}
- selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile);
+ selectAndGetResult(pThreadInfo, sqlstr);
totalQueried++;
g_queryInfo.superQueryInfo.totalQueried ++;
@@ -6119,7 +6420,7 @@ static void *superTableQuery(void *sarg) {
int64_t currentPrintTime = taosGetTimestampMs();
int64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently completed queries: %d, QPS: %10.3f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
@@ -6128,7 +6429,7 @@ static void *superTableQuery(void *sarg) {
}
}
et = taosGetTimestampMs();
- printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n",
+ printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(),
pThreadInfo->start_table_from,
pThreadInfo->end_table_to,
@@ -6169,15 +6470,23 @@ static int queryTestProcess() {
(void)getchar();
}
- printfQuerySystemInfo(taos);
+ if (g_args.debug_print || g_args.verbose_print) {
+ printfQuerySystemInfo(taos);
+ }
+
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ if (convertHostToServAddr(
+ g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
+ exit(-1);
+ }
pthread_t *pids = NULL;
threadInfo *infos = NULL;
//==== create sub threads for query from specify table
int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
- int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
+ uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
if ((nSqlCount > 0) && (nConcurrent > 0)) {
@@ -6189,32 +6498,33 @@ static int queryTestProcess() {
ERROR_EXIT("memory allocation failed for create threads\n");
}
- for (int i = 0; i < nConcurrent; i++) {
- for (int j = 0; j < nSqlCount; j++) {
- threadInfo *t_info = infos + i * nSqlCount + j;
- t_info->threadID = i * nSqlCount + j;
- t_info->querySeq = j;
+ for (uint64_t i = 0; i < nSqlCount; i++) {
+ for (int j = 0; j < nConcurrent; j++) {
+ uint64_t seq = i * nConcurrent + j;
+ threadInfo *t_info = infos + seq;
+ t_info->threadID = seq;
+ t_info->querySeq = i;
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
- if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(taos);
- free(infos);
- free(pids);
- errorPrint( "use database %s failed!\n\n",
- g_queryInfo.dbName);
- return -1;
- }
+ char sqlStr[MAX_TB_NAME_SIZE*2];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
+ if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(taos);
+ free(infos);
+ free(pids);
+ errorPrint( "use database %s failed!\n\n",
+ g_queryInfo.dbName);
+ return -1;
+ }
+ }
+
+ t_info->taos = NULL;// TODO: workaround to use separate taos connection;
+
+ pthread_create(pids + seq, NULL, specifiedTableQuery,
+ t_info);
}
-
- t_info->taos = NULL;// TODO: workaround to use separate taos connection;
-
- pthread_create(pids + i * nSqlCount + j, NULL, specifiedTableQuery,
- t_info);
- }
}
} else {
g_queryInfo.specifiedQueryInfo.concurrent = 0;
@@ -6237,21 +6547,21 @@ static int queryTestProcess() {
ERROR_EXIT("memory allocation failed for create threads\n");
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
+ int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
- int a = ntables / threads;
+ int64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ int64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
- int startFrom = 0;
+ uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
@@ -6288,40 +6598,69 @@ static int queryTestProcess() {
tmfree((char*)infosOfSub);
// taos_close(taos);// TODO: workaround to use separate taos connection;
- int64_t endTs = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
- int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
g_queryInfo.superQueryInfo.totalQueried;
- printf("==== completed total queries: %d, the QPS of all threads: %10.3f====\n",
+ fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
return 0;
}
-static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+static void stable_sub_callback(
+ TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
- getResult(res, (char*)param);
- taos_free_result(res);
+ if (param)
+ appendResultToFile(res, ((threadInfo *)param)->fp);
+ // tao_unscribe() will free result.
}
-static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
+static void specified_sub_callback(
+ TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ __func__, __LINE__, code, taos_errstr(res));
+ return;
+ }
+
+ if (param)
+ appendResultToFile(res, ((threadInfo *)param)->fp);
+ // tao_unscribe() will free result.
+}
+
+static TAOS_SUB* subscribeImpl(
+ QUERY_CLASS class,
+ threadInfo *pThreadInfo,
+ char *sql, char* topic, bool restart, uint64_t interval)
+{
TAOS_SUB* tsub = NULL;
- if (g_queryInfo.specifiedQueryInfo.mode) {
- tsub = taos_subscribe(taos,
- g_queryInfo.specifiedQueryInfo.subscribeRestart,
- topic, sql, subscribe_callback, (void*)resultFileName,
+ if ((SPECIFIED_CLASS == class)
+ && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, specified_sub_callback, (void*)pThreadInfo,
g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ } else if ((STABLE_CLASS == class)
+ && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, stable_sub_callback, (void*)pThreadInfo,
+ g_queryInfo.superQueryInfo.subscribeInterval);
} else {
- tsub = taos_subscribe(taos,
- g_queryInfo.specifiedQueryInfo.subscribeRestart,
- topic, sql, NULL, NULL, 0);
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, NULL, NULL, interval);
}
if (tsub == NULL) {
@@ -6334,9 +6673,16 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- char subSqlstr[1024];
+ char subSqlstr[MAX_QUERY_SQL_LENGTH];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
+ errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
+ pThreadInfo->ntables,
+ MAX_QUERY_SQL_COUNT);
+ exit(-1);
+ }
+
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
@@ -6362,59 +6708,97 @@ static void *superSubscribe(void *sarg) {
return NULL;
}
- //int64_t st = 0;
- //int64_t et = 0;
- do {
- //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
- // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- //}
-
- //st = taosGetTimestampMs();
- char topic[32] = {0};
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- sprintf(topic, "taosdemo-subscribe-%d", i);
- memset(subSqlstr,0,sizeof(subSqlstr));
- replaceChildTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i);
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID);
+ char topic[32] = {0};
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID,
+ pThreadInfo->start_table_from,
+ pThreadInfo->end_table_to, i);
+ sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"",
+ i, pThreadInfo->querySeq);
+ memset(subSqlstr, 0, sizeof(subSqlstr));
+ replaceChildTblName(
+ g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
+ subSqlstr, i);
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->fp, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
}
- tsub[i] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile);
+
+ debugPrint("%s() LN%d, [%d] subSqlstr: %s\n",
+ __func__, __LINE__, pThreadInfo->threadID, subSqlstr);
+ tsub[i] = subscribeImpl(
+ STABLE_CLASS,
+ pThreadInfo, subSqlstr, topic,
+ g_queryInfo.superQueryInfo.subscribeRestart,
+ g_queryInfo.superQueryInfo.subscribeInterval);
if (NULL == tsub[i]) {
taos_close(pThreadInfo->taos);
return NULL;
}
- }
- //et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
- } while(0);
+ }
// start loop to consume result
+ int consumed[MAX_QUERY_SQL_COUNT];
+ for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) {
+ consumed[i] = 0;
+ }
TAOS_RES* res = NULL;
- while(1) {
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.superQueryInfo.mode) {
- continue;
- }
- res = taos_consume(tsub[i]);
- if (res) {
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.superQueryInfo.result[i],
- pThreadInfo->threadID);
+ while(1) {
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
+ continue;
+ }
+
+ res = taos_consume(tsub[i]);
+ if (res) {
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->fp, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ appendResultToFile(res, pThreadInfo->fp);
+ }
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->fp, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ appendResultToFile(res, pThreadInfo->fp);
+ }
+ consumed[i] ++;
+
+ if ((g_queryInfo.superQueryInfo.subscribeKeepProgress)
+ && (consumed[i] >=
+ g_queryInfo.superQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
+ printf("keepProgress:%d, resub super table query: %"PRIu64"\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress,
+ pThreadInfo->querySeq);
+ taos_unsubscribe(tsub,
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ consumed[i]= 0;
+ tsub[i] = subscribeImpl(
+ STABLE_CLASS,
+ pThreadInfo, subSqlstr, topic,
+ g_queryInfo.superQueryInfo.subscribeRestart,
+ g_queryInfo.superQueryInfo.subscribeInterval
+ );
+ if (NULL == tsub[i]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+ }
}
- getResult(res, tmpFile);
- }
}
}
taos_free_result(res);
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ taos_unsubscribe(tsub[i], 0);
}
taos_close(pThreadInfo->taos);
@@ -6423,7 +6807,7 @@ static void *superSubscribe(void *sarg) {
static void *specifiedSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ TAOS_SUB* tsub = NULL;
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
@@ -6449,61 +6833,71 @@ static void *specifiedSubscribe(void *sarg) {
return NULL;
}
- //int64_t st = 0;
- //int64_t et = 0;
- do {
- //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
- // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- //}
-
- //st = taosGetTimestampMs();
- char topic[32] = {0};
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- sprintf(topic, "taosdemo-subscribe-%d", i);
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
- }
- tsub[i] = subscribeImpl(pThreadInfo->taos,
- g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile);
- if (NULL == tsub[i]) {
- taos_close(pThreadInfo->taos);
- return NULL;
- }
- }
- //et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
- } while(0);
+ char topic[32] = {0};
+ sprintf(topic, "taosdemo-subscribe-%"PRIu64"", pThreadInfo->querySeq);
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->fp, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+ tsub = subscribeImpl(
+ SPECIFIED_CLASS, pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ topic,
+ g_queryInfo.specifiedQueryInfo.subscribeRestart,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ if (NULL == tsub) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
// start loop to consume result
TAOS_RES* res = NULL;
+
+ int consumed;
+
while(1) {
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
continue;
}
- res = taos_consume(tsub[i]);
+ res = taos_consume(tsub);
if (res) {
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
- }
- getResult(res, tmpFile);
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->fp, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ appendResultToFile(res, pThreadInfo->fp);
+ }
+
+ consumed ++;
+ if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress)
+ && (consumed >=
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
+ printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
+ pThreadInfo->querySeq);
+ consumed = 0;
+ taos_unsubscribe(tsub,
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+ tsub = subscribeImpl(
+ SPECIFIED_CLASS,
+ pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ topic,
+ g_queryInfo.specifiedQueryInfo.subscribeRestart,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ if (NULL == tsub) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+ }
}
- }
}
taos_free_result(res);
-
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- taos_unsubscribe(tsub[i],
- g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
- }
-
+ taos_unsubscribe(tsub, 0);
taos_close(pThreadInfo->taos);
+
return NULL;
}
@@ -6541,88 +6935,126 @@ static int subscribeTestProcess() {
pthread_t *pids = NULL;
threadInfo *infos = NULL;
- //==== create sub threads for query from super table
- if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) ||
- (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) {
- errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
- g_queryInfo.specifiedQueryInfo.concurrent);
- exit(-1);
- }
- pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t));
- infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo));
- if ((NULL == pids) || (NULL == infos)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
- exit(-1);
- }
+ pthread_t *pidsOfStable = NULL;
+ threadInfo *infosOfStable = NULL;
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) {
- threadInfo *t_info = infos + i;
- t_info->threadID = i;
- t_info->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pids + i, NULL, specifiedSubscribe, t_info);
- }
+ //==== create threads for query for specified table
+ if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
+ debugPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ } else {
+ if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
+ errorPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ exit(-1);
+ }
- //==== create sub threads for query from sub table
- pthread_t *pidsOfSub = NULL;
- threadInfo *infosOfSub = NULL;
- if ((g_queryInfo.superQueryInfo.sqlCount > 0)
- && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt *
+ pids = malloc(
+ g_queryInfo.specifiedQueryInfo.sqlCount *
+ g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(pthread_t));
- infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt *
+ infos = malloc(
+ g_queryInfo.specifiedQueryInfo.sqlCount *
+ g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
- if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n",
- __func__, __LINE__);
- // taos_close(taos);
- exit(-1);
+ if ((NULL == pids) || (NULL == infos)) {
+ errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
+ exit(-1);
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
- int threads = g_queryInfo.superQueryInfo.threadCnt;
-
- int a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
-
- int b = 0;
- if (threads != 0) {
- b = ntables % threads;
- }
-
- int startFrom = 0;
- for (int i = 0; i < threads; i++) {
- threadInfo *t_info = infosOfSub + i;
- t_info->threadID = i;
-
- t_info->start_table_from = startFrom;
- t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1;
- startFrom = t_info->end_table_to + 1;
- t_info->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pidsOfSub + i, NULL, superSubscribe, t_info);
- }
-
- g_queryInfo.superQueryInfo.threadCnt = threads;
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) {
- pthread_join(pidsOfSub[i], NULL);
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
+ uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
+ threadInfo *t_info = infos + seq;
+ t_info->threadID = seq;
+ t_info->querySeq = i;
+ t_info->taos = NULL; // TODO: workaround to use separate taos connection;
+ pthread_create(pids + seq, NULL, specifiedSubscribe, t_info);
+ }
}
}
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) {
- pthread_join(pids[i], NULL);
+ //==== create threads for super table query
+ if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
+ printf("%s() LN%d, super table query sqlCount %"PRIu64".\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.sqlCount);
+ } else {
+ if ((g_queryInfo.superQueryInfo.sqlCount > 0)
+ && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
+ pidsOfStable = malloc(
+ g_queryInfo.superQueryInfo.sqlCount *
+ g_queryInfo.superQueryInfo.threadCnt *
+ sizeof(pthread_t));
+ infosOfStable = malloc(
+ g_queryInfo.superQueryInfo.sqlCount *
+ g_queryInfo.superQueryInfo.threadCnt *
+ sizeof(threadInfo));
+ if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
+ errorPrint("%s() LN%d, malloc failed for create threads\n",
+ __func__, __LINE__);
+ // taos_close(taos);
+ exit(-1);
+ }
+
+ int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
+ int threads = g_queryInfo.superQueryInfo.threadCnt;
+
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ uint64_t startFrom = 0;
+ for (int j = 0; j < threads; j++) {
+ uint64_t seq = i * threads + j;
+ threadInfo *t_info = infosOfStable + seq;
+ t_info->threadID = seq;
+ t_info->querySeq = i;
+
+ t_info->start_table_from = startFrom;
+ t_info->ntables = jend_table_to = jend_table_to + 1;
+ t_info->taos = NULL; // TODO: workaround to use separate taos connection;
+ pthread_create(pidsOfStable + seq,
+ NULL, superSubscribe, t_info);
+ }
+ }
+
+ g_queryInfo.superQueryInfo.threadCnt = threads;
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < threads; j++) {
+ uint64_t seq = i * threads + j;
+ pthread_join(pidsOfStable[seq], NULL);
+ }
+ }
+ }
+ }
+
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
+ uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
+ pthread_join(pids[seq], NULL);
+ }
}
tmfree((char*)pids);
tmfree((char*)infos);
- tmfree((char*)pidsOfSub);
- tmfree((char*)infosOfSub);
+ tmfree((char*)pidsOfStable);
+ tmfree((char*)infosOfStable);
// taos_close(taos);
return 0;
}
@@ -6703,7 +7135,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
- g_Dbs.queryMode = g_args.query_mode;
+ g_Dbs.asyncMode = g_args.async_mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
@@ -6712,7 +7144,7 @@ static void setParaFromArg(){
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
g_args.tb_prefix, MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
- tstrncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].insertMode = g_args.iface;
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP;
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 96a1cd16f8..15db83297c 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -72,7 +72,8 @@ enum _show_db_index {
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
@@ -83,10 +84,10 @@ enum _show_tables_index {
TSDB_SHOW_TABLES_NAME_INDEX,
TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
+ TSDB_SHOW_TABLES_METRIC_INDEX,
+ TSDB_SHOW_TABLES_UID_INDEX,
TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
+ TSDB_SHOW_TABLES_VGID_INDEX,
TSDB_MAX_SHOW_TABLES
};
@@ -99,22 +100,24 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
+#define COL_NOTE_LEN 128
+
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
- char note[128];
+ char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
- char name[TSDB_COL_NAME_LEN + 1];
+ char name[TSDB_TABLE_NAME_LEN];
SColDes cols[];
} STableDef;
extern char version[];
typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_DB_NAME_LEN];
char create_time[32];
int32_t ntables;
int32_t vgroups;
@@ -132,14 +135,15 @@ typedef struct {
int8_t wallevel;
int32_t fsync;
int8_t comp;
+ int8_t cachelast;
char precision[8]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN + 1];
- char metric[TSDB_TABLE_NAME_LEN + 1];
+ char name[TSDB_TABLE_NAME_LEN];
+ char metric[TSDB_TABLE_NAME_LEN];
} STableRecord;
typedef struct {
@@ -151,7 +155,7 @@ typedef struct {
pthread_t threadID;
int32_t threadIndex;
int32_t totalThreads;
- char dbName[TSDB_TABLE_NAME_LEN + 1];
+ char dbName[TSDB_DB_NAME_LEN];
void *taosCon;
int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut;
@@ -210,13 +214,13 @@ static struct argp_option options[] = {
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
// dump unit options
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'B', 0, 0, "Dump assigned databases", 2},
+ {"databases", 'D', 0, 0, "Dump assigned databases", 2},
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
- {"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
+ {"without-property", 'N', 0, 0, "Dump schema without properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
@@ -337,15 +341,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'A':
arguments->all_databases = true;
break;
- case 'B':
+ case 'D':
arguments->databases = true;
break;
// dump format option
case 's':
arguments->schemaonly = true;
break;
- case 'M':
- arguments->with_property = true;
+ case 'N':
+ arguments->with_property = false;
break;
case 'S':
// parse time here.
@@ -354,23 +358,23 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'E':
arguments->end_time = atol(arg);
break;
- case 'N':
+ case 'B':
arguments->data_batch = atoi(arg);
if (arguments->data_batch >= INT16_MAX) {
arguments->data_batch = INT16_MAX - 1;
- }
+ }
break;
- case 'L':
+ case 'L':
{
int32_t len = atoi(arg);
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
} else if (len < TSDB_MAX_SQL_LEN) {
len = TSDB_MAX_SQL_LEN;
- }
+ }
arguments->max_sql_len = len;
break;
- }
+ }
case 't':
arguments->table_batch = atoi(arg);
break;
@@ -398,27 +402,27 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-int taosDumpOut(struct arguments *arguments);
-int taosDumpIn(struct arguments *arguments);
-void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
-int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
-int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
-void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
-void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
-int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
-int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
-int taosCheckParam(struct arguments *arguments);
-void taosFreeDbInfos();
+static int taosDumpOut(struct arguments *arguments);
+static int taosDumpIn(struct arguments *arguments);
+static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
+static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
+static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
+static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
+static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
+static int taosCheckParam(struct arguments *arguments);
+static void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
struct arguments g_args = {
// connection option
- NULL,
- "root",
+ NULL,
+ "root",
#ifdef _TD_POWER_
- "powerdb",
+ "powerdb",
#else
- "taosdata",
+ "taosdata",
#endif
0,
"",
@@ -432,8 +436,8 @@ struct arguments g_args = {
false,
false,
// dump format option
- false,
- false,
+ false, // schemeonly
+ true, // with_property
0,
INT64_MAX,
1,
@@ -523,7 +527,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
- if (argc > 1)
+ if (argc > 2)
parse_args(argc, argv, &g_args);
argp_parse(&argp, argc, argv, 0, 0, &g_args);
@@ -675,10 +679,10 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
}
sprintf(tempCommand, "show tables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -705,12 +709,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
free(tempCommand);
return 0;
}
-
+
sprintf(tempCommand, "show stables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -748,7 +752,7 @@ int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric
return -1;
}
}
-
+
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
@@ -770,7 +774,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
sprintf(tmpCommand, "select tbname from %s", metric);
-
+
TAOS_RES *res = taos_query(taosCon, tmpCommand);
int32_t code = taos_errno(res);
if (code != 0) {
@@ -792,20 +796,20 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
-
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
+
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
numOfTable++;
}
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
-
+
int maxThreads = arguments->thread_num;
int tableOfPerFile ;
if (numOfTable <= arguments->thread_num) {
@@ -815,16 +819,16 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
tableOfPerFile = numOfTable / arguments->thread_num;
if (0 != numOfTable % arguments->thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
+ fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
close(fd);
return -1;
}
-
+
int32_t numOfThread = *totalNumOfThread;
int subFd = -1;
for (; numOfThread < maxThreads; numOfThread++) {
@@ -838,7 +842,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
(void)remove(tmpBuf);
}
sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
+ (void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
@@ -856,11 +860,11 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
-
+
if (fd >= 0) {
close(fd);
fd = -1;
- }
+ }
*totalNumOfThread = numOfThread;
@@ -884,7 +888,7 @@ int taosDumpOut(struct arguments *arguments) {
} else {
sprintf(tmpBuf, "dbs.sql");
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -916,9 +920,9 @@ int taosDumpOut(struct arguments *arguments) {
taosDumpCharset(fp);
sprintf(command, "show databases");
- result = taos_query(taos, command);
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
goto _exit_failure;
@@ -955,15 +959,17 @@ int taosDumpOut(struct arguments *arguments) {
goto _exit_failure;
}
- strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
if (arguments->with_property) {
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
//dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//dbInfos[count]->daysToKeep1;
//dbInfos[count]->daysToKeep2;
@@ -974,8 +980,10 @@ int taosDumpOut(struct arguments *arguments) {
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
//dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@@ -1007,9 +1015,9 @@ int taosDumpOut(struct arguments *arguments) {
g_resultStatistics.totalDatabasesOfDumpOut++;
sprintf(command, "use %s", dbInfos[0]->name);
-
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
+
+ result = taos_query(taos, command);
+ code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
goto _exit_failure;
@@ -1038,7 +1046,7 @@ int taosDumpOut(struct arguments *arguments) {
int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
if (0 == ret) {
superTblCnt++;
- }
+ }
}
retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
}
@@ -1050,7 +1058,7 @@ int taosDumpOut(struct arguments *arguments) {
goto _clean_tmp_file;
}
}
-
+
// TODO: save dump super table into result_output.txt
fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
@@ -1076,7 +1084,7 @@ int taosDumpOut(struct arguments *arguments) {
taos_close(taos);
taos_free_result(result);
tfree(command);
- taosFreeDbInfos();
+ taosFreeDbInfos();
fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
return 0;
@@ -1090,15 +1098,17 @@ _exit_failure:
return -1;
}
-int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
+int taosGetTableDes(
+ char* dbName, char *table,
+ STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
TAOS_ROW row = NULL;
TAOS_RES* res = NULL;
int count = 0;
char sqlstr[COMMAND_SIZE];
sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1108,7 +1118,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
TAOS_FIELD *fields = taos_fetch_fields(res);
- tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN);
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
@@ -1128,23 +1138,23 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
if (isSuperTable) {
return count;
}
-
+
// if chidl-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
taos_free_result(res);
return -1;
}
-
- fields = taos_fetch_fields(res);
+
+ fields = taos_fetch_fields(res);
row = taos_fetch_row(res);
if (NULL == row) {
@@ -1159,7 +1169,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
res = NULL;
continue;
}
-
+
int32_t* length = taos_fetch_lengths(res);
//int32_t* length = taos_fetch_lengths(tmpResult);
@@ -1188,16 +1198,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
case TSDB_DATA_TYPE_BINARY: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
- char tbuf[COMMAND_SIZE];
- converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN];
+ converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COMMAND_SIZE];
- convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
@@ -1219,15 +1229,17 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
default:
break;
}
-
+
taos_free_result(res);
- res = NULL;
+ res = NULL;
}
return count;
}
-int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) {
+int32_t taosDumpTable(
+ char *table, char *metric, struct arguments *arguments,
+ FILE *fp, TAOS* taosCon, char* dbName) {
int count = 0;
STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
@@ -1280,9 +1292,10 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
- "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update);
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
}
pstr += sprintf(pstr, ";");
@@ -1293,8 +1306,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
{
SThreadParaObj *pThread = (SThreadParaObj*)arg;
STableRecord tableRecord;
- int fd;
-
+ int fd;
+
char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
@@ -1305,13 +1318,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
-
+
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1321,13 +1334,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
memset(tmpBuf, 0, TSDB_FILENAME_LEN);
sprintf(tmpBuf, "use %s", pThread->dbName);
-
- TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
+
+ TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", pThread->dbName);
taos_free_result(tmpResult);
- fclose(fp);
+ fclose(fp);
close(fd);
return NULL;
}
@@ -1340,14 +1353,17 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
- int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
+ int ret = taosDumpTable(
+ tableRecord.name, tableRecord.metric, &g_args,
+ fp, pThread->taosCon, pThread->dbName);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
pThread->rowsOfDumpOut += ret;
-
+
if (pThread->rowsOfDumpOut >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName);
+ printf(" %"PRId64 " rows already be dumpout from database %s\n",
+ pThread->rowsOfDumpOut, pThread->dbName);
lastRowsPrint += 5000000;
}
@@ -1355,15 +1371,18 @@ void* taosDumpOutWorkThreadFp(void *arg)
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
-
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
+
+ memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
+ sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
+ g_args.outpath, pThread->dbName,
+ pThread->threadIndex, fileNameIndex);
} else {
- sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
+ sprintf(tmpBuf, "%s.tables.%d-%d.sql",
+ pThread->dbName, pThread->threadIndex, fileNameIndex);
}
fileNameIndex++;
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1377,7 +1396,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
taos_free_result(tmpResult);
close(fd);
- fclose(fp);
+ fclose(fp);
return NULL;
}
@@ -1385,15 +1404,16 @@ void* taosDumpOutWorkThreadFp(void *arg)
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName)
{
pthread_attr_t thattr;
- SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
+ SThreadParaObj *threadObj =
+ (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
for (int t = 0; t < numOfThread; ++t) {
SThreadParaObj *pThread = threadObj + t;
pThread->rowsOfDumpOut = 0;
pThread->tablesOfDumpOut = 0;
pThread->threadIndex = t;
pThread->totalThreads = numOfThread;
- tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN);
- pThread->taosCon = taosCon;
+ tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
+ pThread->taosCon = taosCon;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -1408,7 +1428,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
pthread_join(threadObj[t].threadID, NULL);
}
- // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
+ // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
int64_t totalRowsOfDumpOut = 0;
int64_t totalChildTblsOfDumpOut = 0;
for (int32_t t = 0; t < numOfThread; ++t) {
@@ -1449,7 +1469,7 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
}
-int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
+int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
TAOS_ROW row;
int fd = -1;
@@ -1457,8 +1477,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
sprintf(sqlstr, "show %s.stables", dbName);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
@@ -1478,13 +1498,14 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
(void)remove(".stables.tmp");
exit(-1);
}
-
- while ((row = taos_fetch_row(res)) != NULL) {
+
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
- }
-
+ }
+
taos_free_result(res);
(void)lseek(fd, 0, SEEK_SET);
@@ -1492,7 +1513,7 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
while (1) {
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
-
+
int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
if (0 == ret) {
superTblCnt++;
@@ -1505,8 +1526,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
close(fd);
(void)remove(".stables.tmp");
-
- return 0;
+
+ return 0;
}
@@ -1516,19 +1537,19 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
STableRecord tableRecord;
taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
-
+
fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
g_resultStatistics.totalDatabasesOfDumpOut++;
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
fprintf(fp, "USE %s;\n\n", dbInfo->name);
-
+
(void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
sprintf(sqlstr, "show %s.tables", dbInfo->name);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1547,15 +1568,17 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
-
+ tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+
taosWrite(fd, &tableRecord, sizeof(STableRecord));
-
+
numOfTable++;
}
taos_free_result(res);
@@ -1570,7 +1593,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
@@ -1579,7 +1602,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
return -1;
}
-
+
int32_t numOfThread = 0;
int subFd = -1;
for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
@@ -1616,7 +1639,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
fd = -1;
}
-
+
taos_free_result(res);
// start multi threads to dumpout
@@ -1624,7 +1647,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf);
- }
+ }
free(tblBuf);
return 0;
@@ -1637,15 +1660,18 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
char* pstr = sqlstr;
- pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name);
+ pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
+ dbName, tableDes->name);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (counter == 0) {
- pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, " (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
- pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@@ -1658,9 +1684,11 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
for (; counter < numOfCols; counter++) {
if (counter == count_temp) {
- pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ") TAGS (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
- pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@@ -1687,7 +1715,8 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
char *pstr = NULL;
pstr = tmpBuf;
- pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric);
+ pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
+ dbName, tableDes->name, dbName, metric);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
@@ -1735,7 +1764,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
char *pstr = NULL;
TAOS_ROW row = NULL;
int numFields = 0;
-
+
if (arguments->schemaonly) {
return 0;
}
@@ -1750,11 +1779,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
pstr = tmpBuffer;
char sqlstr[1024] = {0};
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbname, arguments->start_time, arguments->end_time);
-
- TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
@@ -1774,7 +1803,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
while ((row = taos_fetch_row(tmpResult)) != NULL) {
pstr = tmpBuffer;
curr_sqlstr_len = 0;
-
+
int32_t* length = taos_fetch_lengths(tmpResult); // act len
if (count == 0) {
@@ -1829,7 +1858,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
- pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR: {
@@ -1857,10 +1886,10 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
- totalRows++;
+ totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
-
+
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
lastRowsPrint += 5000000;
@@ -2206,7 +2235,7 @@ static FILE* taosOpenDumpInFile(char *fptr) {
}
char *fname = full_path.we_wordv[0];
-
+
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
@@ -2240,7 +2269,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
line[--read_len] = '\0';
//if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
+ if (read_len == 0 ) {
continue;
}
@@ -2259,8 +2288,8 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
-
+ cmd_len = 0;
+
if (lineNo >= lastRowsPrint) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
@@ -2300,7 +2329,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
if (totalThreads > tsSqlFileNum) {
totalThreads = tsSqlFileNum;
}
-
+
SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
for (int32_t t = 0; t < totalThreads; ++t) {
pThread = threadObj + t;
@@ -2330,7 +2359,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
int taosDumpIn(struct arguments *arguments) {
assert(arguments->isDumpIn);
-
+
TAOS *taos = NULL;
FILE *fp = NULL;
@@ -2345,22 +2374,22 @@ int taosDumpIn(struct arguments *arguments) {
int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
if (tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
-
+
fp = taosOpenDumpInFile(tsDbSqlFile);
if (NULL == fp) {
fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
return -1;
}
fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
-
+
taosLoadFileCharset(fp, tsfCharset);
-
+
taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
}
if (0 != tsSqlFileNumOfTbls) {
taosStartDumpInWorkThreads(taos, arguments);
- }
+ }
taos_close(taos);
taosFreeSQLFiles();
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index ed1de1b87a..e052f34a33 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -249,7 +249,7 @@ typedef struct SAcctObj {
} SAcctObj;
typedef struct {
- char db[TSDB_DB_NAME_LEN];
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t type;
int16_t numOfColumns;
int32_t index;
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index b513da29f4..3525bcac18 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -522,13 +522,13 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pStatus->lastReboot = htonl(pStatus->lastReboot);
pStatus->numOfCores = htons(pStatus->numOfCores);
- uint32_t version = htonl(pStatus->version);
- if (version != tsVersion) {
+ uint32_t _version = htonl(pStatus->version);
+ if (_version != tsVersion) {
pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp);
if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) {
pDnode->offlineReason = TAOS_DN_OFF_VERSION_NOT_MATCH;
}
- mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, version, tsVersion);
+ mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, _version, tsVersion);
return TSDB_CODE_MND_INVALID_MSG_VERSION;
}
diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c
index 459d981138..cbf713af65 100644
--- a/src/mnode/src/mnodeProfile.c
+++ b/src/mnode/src/mnodeProfile.c
@@ -123,8 +123,9 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po
if (/* pConn->ip != ip || */ pConn->port != port /* || strcmp(pConn->user, user) != 0 */) {
mDebug("connId:%d, incoming conn user:%s ip:%s:%u, not match exist conn user:%s ip:%s:%u", connId, user,
taosIpStr(ip), port, pConn->user, taosIpStr(pConn->ip), pConn->port);
- taosCacheRelease(tsMnodeConnCache, (void **)&pConn, false);
- return NULL;
+ pConn->port = port;
+ //taosCacheRelease(tsMnodeConnCache, (void **)&pConn, false);
+ //return NULL;
}
// mDebug("connId:%d, is incoming, user:%s ip:%s:%u", connId, pConn->user, taosIpStr(pConn->ip), pConn->port);
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 03772f2724..5fe22826b7 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -129,7 +129,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
SShowObj *pShow = calloc(1, showObjSize);
pShow->type = pShowMsg->type;
pShow->payloadLen = htons(pShowMsg->payloadLen);
- tstrncpy(pShow->db, pShowMsg->db, TSDB_DB_NAME_LEN);
+ tstrncpy(pShow->db, pShowMsg->db, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN);
memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen);
pShow = mnodePutShowObj(pShow);
@@ -253,10 +253,6 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
int32_t connId = htonl(pHBMsg->connId);
SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort);
- if (pConn == NULL) {
- pHBMsg->pid = htonl(pHBMsg->pid);
- pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort, pHBMsg->pid, pHBMsg->appName);
- }
if (pConn == NULL) {
// do not close existing links, otherwise
diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c
index 55ee39b6bc..e77c1b3e59 100644
--- a/src/mnode/src/mnodeUser.c
+++ b/src/mnode/src/mnodeUser.c
@@ -123,7 +123,7 @@ static void mnodePrintUserAuth() {
mnodeDecUserRef(pUser);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
}
diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt
index ab8b0f7678..4472c683c7 100644
--- a/src/os/CMakeLists.txt
+++ b/src/os/CMakeLists.txt
@@ -2,7 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX)
- ADD_SUBDIRECTORY(src/linux)
+ ADD_SUBDIRECTORY(src/linux)
ELSEIF (TD_DARWIN)
ADD_SUBDIRECTORY(src/darwin)
ELSEIF (TD_WINDOWS)
diff --git a/src/os/inc/os.h b/src/os/inc/os.h
index c3e02b14db..6731ca6d7d 100644
--- a/src/os/inc/os.h
+++ b/src/os/inc/os.h
@@ -20,45 +20,9 @@
extern "C" {
#endif
-#ifdef _TD_DARWIN_64
-#include "osDarwin.h"
-#endif
-
-#ifdef _TD_ARM_64
-#include "osArm64.h"
-#endif
-
-#ifdef _TD_ARM_32
-#include "osArm32.h"
-#endif
-
-#ifdef _TD_MIPS_64
-#include "osMips64.h"
-#endif
-
-#ifdef _TD_LINUX_64
-#include "osLinux64.h"
-#endif
-
-#ifdef _TD_LINUX_32
-#include "osLinux32.h"
-#endif
-
-#ifdef _ALPINE
-#include "osAlpine.h"
-#endif
-
-#ifdef _TD_NINGSI_60
-#include "osNingsi.h"
-#endif
-
-#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
-#include "osWindows.h"
-#endif
-
+#include "osInc.h"
#include "osDef.h"
#include "osAtomic.h"
-#include "osCommon.h"
#include "osDir.h"
#include "osFile.h"
#include "osLz4.h"
@@ -67,6 +31,7 @@ extern "C" {
#include "osRand.h"
#include "osSemphone.h"
#include "osSignal.h"
+#include "osSleep.h"
#include "osSocket.h"
#include "osString.h"
#include "osSysinfo.h"
diff --git a/src/os/inc/osAlpine.h b/src/os/inc/osAlpine.h
deleted file mode 100644
index eba9459395..0000000000
--- a/src/os/inc/osAlpine.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ALPINE_H
-#define TDENGINE_OS_ALPINE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-typedef int(*__compar_fn_t)(const void *, const void *);
-void error (int, int, const char *);
-#ifndef PTHREAD_MUTEX_RECURSIVE_NP
- #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h
deleted file mode 100644
index 54835a1ca8..0000000000
--- a/src/os/inc/osArm32.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ARM32_H
-#define TDENGINE_OS_ARM32_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define TAOS_OS_FUNC_LZ4
- #define BUILDIN_CLZL(val) __builtin_clzll(val)
- #define BUILDIN_CTZL(val) __builtin_ctzll(val)
- #define BUILDIN_CLZ(val) __builtin_clz(val)
- #define BUILDIN_CTZ(val) __builtin_ctz(val)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h
deleted file mode 100644
index 76098f6846..0000000000
--- a/src/os/inc/osArm64.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ARM64_H
-#define TDENGINE_OS_ARM64_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osAtomic.h b/src/os/inc/osAtomic.h
index 803c351400..7affa444ee 100644
--- a/src/os/inc/osAtomic.h
+++ b/src/os/inc/osAtomic.h
@@ -20,7 +20,252 @@
extern "C" {
#endif
-#ifndef TAOS_OS_FUNC_ATOMIC
+#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
+ #define atomic_load_8(ptr) (*(char volatile*)(ptr))
+ #define atomic_load_16(ptr) (*(short volatile*)(ptr))
+ #define atomic_load_32(ptr) (*(long volatile*)(ptr))
+ #define atomic_load_64(ptr) (*(__int64 volatile*)(ptr))
+ #define atomic_load_ptr(ptr) (*(void* volatile*)(ptr))
+
+ #define atomic_store_8(ptr, val) ((*(char volatile*)(ptr)) = (char)(val))
+ #define atomic_store_16(ptr, val) ((*(short volatile*)(ptr)) = (short)(val))
+ #define atomic_store_32(ptr, val) ((*(long volatile*)(ptr)) = (long)(val))
+ #define atomic_store_64(ptr, val) ((*(__int64 volatile*)(ptr)) = (__int64)(val))
+ #define atomic_store_ptr(ptr, val) ((*(void* volatile*)(ptr)) = (void*)(val))
+
+ #define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val))
+ #define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val))
+ #define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val))
+ #define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
+ #ifdef _WIN64
+ #define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
+ #else
+ #define atomic_exchange_ptr(ptr, val) _InlineInterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
+ #endif
+
+ #ifdef _TD_GO_DLL_
+ #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
+ #else
+ #define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval))
+ #endif
+ #define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval))
+ #define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval))
+ #define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval))
+ #define atomic_val_compare_exchange_ptr(ptr, oldval, newval) _InterlockedCompareExchangePointer((void* volatile*)(ptr), (void*)(newval), (void*)(oldval))
+
+ char interlocked_add_fetch_8(char volatile *ptr, char val);
+ short interlocked_add_fetch_16(short volatile *ptr, short val);
+ long interlocked_add_fetch_32(long volatile *ptr, long val);
+ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val);
+
+ char interlocked_and_fetch_8(char volatile* ptr, char val);
+ short interlocked_and_fetch_16(short volatile* ptr, short val);
+ long interlocked_and_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val);
+
+ char interlocked_or_fetch_8(char volatile* ptr, char val);
+ short interlocked_or_fetch_16(short volatile* ptr, short val);
+ long interlocked_or_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ char interlocked_xor_fetch_8(char volatile* ptr, char val);
+ short interlocked_xor_fetch_16(short volatile* ptr, short val);
+ long interlocked_xor_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ __int64 interlocked_fetch_xor_64(__int64 volatile* ptr, __int64 val);
+
+ #define atomic_add_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_add_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_add_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_add_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+ #ifdef _TD_GO_DLL_
+ #define atomic_fetch_add_8 __sync_fetch_and_ad
+ #define atomic_fetch_add_16 __sync_fetch_and_add
+ #else
+ #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
+ #endif
+ #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_sub_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), -(char)(val))
+ #define atomic_sub_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), -(short)(val))
+ #define atomic_sub_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), -(long)(val))
+ #define atomic_sub_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), -(__int64)(val))
+
+ #define atomic_fetch_sub_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), -(char)(val))
+ #define atomic_fetch_sub_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), -(short)(val))
+ #define atomic_fetch_sub_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), -(long)(val))
+ #define atomic_fetch_sub_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), -(__int64)(val))
+
+ #define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_and_64(ptr, val) interlocked_fetch_and_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_or_64(ptr, val) interlocked_fetch_or_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_xor_64(ptr, val) interlocked_fetch_xor_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #ifdef _WIN64
+ #define atomic_add_fetch_ptr atomic_add_fetch_64
+ #define atomic_fetch_add_ptr atomic_fetch_add_64
+ #define atomic_sub_fetch_ptr atomic_sub_fetch_64
+ #define atomic_fetch_sub_ptr atomic_fetch_sub_64
+ #define atomic_and_fetch_ptr atomic_and_fetch_64
+ #define atomic_fetch_and_ptr atomic_fetch_and_64
+ #define atomic_or_fetch_ptr atomic_or_fetch_64
+ #define atomic_fetch_or_ptr atomic_fetch_or_64
+ #define atomic_xor_fetch_ptr atomic_xor_fetch_64
+ #define atomic_fetch_xor_ptr atomic_fetch_xor_64
+ #else
+ #define atomic_add_fetch_ptr atomic_add_fetch_32
+ #define atomic_fetch_add_ptr atomic_fetch_add_32
+ #define atomic_sub_fetch_ptr atomic_sub_fetch_32
+ #define atomic_fetch_sub_ptr atomic_fetch_sub_32
+ #define atomic_and_fetch_ptr atomic_and_fetch_32
+ #define atomic_fetch_and_ptr atomic_fetch_and_32
+ #define atomic_or_fetch_ptr atomic_or_fetch_32
+ #define atomic_fetch_or_ptr atomic_fetch_or_32
+ #define atomic_xor_fetch_ptr atomic_xor_fetch_32
+ #define atomic_fetch_xor_ptr atomic_fetch_xor_32
+ #endif
+#elif defined(_TD_NINGSI_60)
+ /*
+ * type __sync_fetch_and_add (type *ptr, type value);
+ * type __sync_fetch_and_sub (type *ptr, type value);
+ * type __sync_fetch_and_or (type *ptr, type value);
+ * type __sync_fetch_and_and (type *ptr, type value);
+ * type __sync_fetch_and_xor (type *ptr, type value);
+ * type __sync_fetch_and_nand (type *ptr, type value);
+ * type __sync_add_and_fetch (type *ptr, type value);
+ * type __sync_sub_and_fetch (type *ptr, type value);
+ * type __sync_or_and_fetch (type *ptr, type value);
+ * type __sync_and_and_fetch (type *ptr, type value);
+ * type __sync_xor_and_fetch (type *ptr, type value);
+ * type __sync_nand_and_fetch (type *ptr, type value);
+ *
+ * bool __sync_bool_compare_and_swap (type*ptr, type oldval, type newval, ...)
+ * type __sync_val_compare_and_swap (type *ptr, type oldval, ?type newval, ...)
+ * */
+
+ #define atomic_load_8(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_16(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_32(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_64(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_ptr(ptr) __sync_fetch_and_add((ptr), 0)
+
+ #define atomic_store_8(ptr, val) (*(ptr)=(val))
+ #define atomic_store_16(ptr, val) (*(ptr)=(val))
+ #define atomic_store_32(ptr, val) (*(ptr)=(val))
+ #define atomic_store_64(ptr, val) (*(ptr)=(val))
+ #define atomic_store_ptr(ptr, val) (*(ptr)=(val))
+
+ int8_t atomic_exchange_8_impl(int8_t* ptr, int8_t val );
+ int16_t atomic_exchange_16_impl(int16_t* ptr, int16_t val );
+ int32_t atomic_exchange_32_impl(int32_t* ptr, int32_t val );
+ int64_t atomic_exchange_64_impl(int64_t* ptr, int64_t val );
+ void* atomic_exchange_ptr_impl( void **ptr, void *val );
+
+ #define atomic_exchange_8(ptr, val) atomic_exchange_8_impl((int8_t*)ptr, (int8_t)val)
+ #define atomic_exchange_16(ptr, val) atomic_exchange_16_impl((int16_t*)ptr, (int16_t)val)
+ #define atomic_exchange_32(ptr, val) atomic_exchange_32_impl((int32_t*)ptr, (int32_t)val)
+ #define atomic_exchange_64(ptr, val) atomic_exchange_64_impl((int64_t*)ptr, (int64_t)val)
+ #define atomic_exchange_ptr(ptr, val) atomic_exchange_ptr_impl((void **)ptr, (void*)val)
+
+ #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_16 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_32 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_64 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap
+
+ #define atomic_add_fetch_8(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_16(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_32(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_64(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_ptr(ptr, val) __sync_add_and_fetch((ptr), (val))
+
+ #define atomic_fetch_add_8(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_16(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_32(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_64(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_ptr(ptr, val) __sync_fetch_and_add((ptr), (val))
+
+ #define atomic_sub_fetch_8(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_16(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_32(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_64(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_ptr(ptr, val) __sync_sub_and_fetch((ptr), (val))
+
+ #define atomic_fetch_sub_8(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_16(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_32(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_64(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_ptr(ptr, val) __sync_fetch_and_sub((ptr), (val))
+
+ #define atomic_and_fetch_8(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_16(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_32(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_64(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_ptr(ptr, val) __sync_and_and_fetch((ptr), (val))
+
+ #define atomic_fetch_and_8(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_16(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_32(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_64(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_ptr(ptr, val) __sync_fetch_and_and((ptr), (val))
+
+ #define atomic_or_fetch_8(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_16(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_32(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_64(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_ptr(ptr, val) __sync_or_and_fetch((ptr), (val))
+
+ #define atomic_fetch_or_8(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_16(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_32(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_64(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_ptr(ptr, val) __sync_fetch_and_or((ptr), (val))
+
+ #define atomic_xor_fetch_8(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_16(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_32(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_64(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_ptr(ptr, val) __sync_xor_and_fetch((ptr), (val))
+
+ #define atomic_fetch_xor_8(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_16(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_32(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_64(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_ptr(ptr, val) __sync_fetch_and_xor((ptr), (val))
+
+#else
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
diff --git a/src/os/inc/osDarwin.h b/src/os/inc/osDarwin.h
deleted file mode 100644
index 7c206afe7a..0000000000
--- a/src/os/inc/osDarwin.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_DARWIN_H
-#define TDENGINE_OS_DARWIN_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include