diff --git a/.appveyor.yml b/.appveyor.yml
index fe4816688b..ee1dc91767 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -1,30 +1,49 @@
version: 1.0.{build}
-os: Visual Studio 2015
+image:
+ - Visual Studio 2015
+ - macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
+matrix:
+ exclude:
+ - image: macos
+ ARCH: x86
+for:
+ -
+ matrix:
+ only:
+ - image: Visual Studio 2015
+ clone_folder: c:\dev\TDengine
+ clone_depth: 1
-clone_folder: c:\dev\TDengine
-clone_depth: 1
+ init:
+ - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
-init:
- - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
+ before_build:
+ - cd c:\dev\TDengine
+ - md build
-before_build:
- - cd c:\dev\TDengine
- - md build
-
-build_script:
- - cd build
- - cmake -G "NMake Makefiles" ..
- - nmake install
+ build_script:
+ - cd build
+ - cmake -G "NMake Makefiles" ..
+ - nmake install
+ -
+ matrix:
+ only:
+ - image: macos
+ clone_depth: 1
+ build_script:
+ - mkdir debug
+ - cd debug
+ - cmake .. > /dev/null
+ - make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
-
on_build_success: true
on_build_failure: true
on_build_status_changed: true
diff --git a/.drone.yml b/.drone.yml
new file mode 100644
index 0000000000..e7ae6ebbda
--- /dev/null
+++ b/.drone.yml
@@ -0,0 +1,180 @@
+---
+kind: pipeline
+name: test_amd64
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: smoke_test
+ image: python:3.8
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential gcc
+ - pip3 install psutil
+ - pip3 install guppy3
+ - pip3 install src/connector/python/linux/python3/
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ - cd ../tests
+ - ./test-all.sh smoke
+ when:
+ branch:
+ - develop
+ - master
+
+
+- name: crash_gen
+ image: python:3.8
+ commands:
+ - pip3 install requests
+ - pip3 install src/connector/python/linux/python3/
+ - pip3 install psutil
+ - pip3 install guppy3
+ - cd tests/pytest
+ - ./crash_gen.sh -a -p -t 4 -s 2000
+ when:
+ branch:
+ - develop
+ - master
+
+
+---
+kind: pipeline
+name: test_arm64
+
+platform:
+ os: linux
+ arch: arm64
+
+steps:
+- name: build
+ image: gcc
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch64 > /dev/null
+ - make
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: test_arm
+
+platform:
+ os: linux
+ arch: arm
+
+steps:
+- name: build
+ image: arm32v7/ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch32 > /dev/null
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_trusty
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:trusty
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake3 build-essential git binutils-2.26
+
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_xenial
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:xenial
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_bionic
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: goodbye
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: 64-bit
+ image: alpine
+ commands:
+ - echo 64-bit is good.
+ when:
+ branch:
+ - develop
+ - master
+
+
+depends_on:
+- test_arm64
+- test_amd64
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 0617d75976..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,296 +0,0 @@
-#
-# Configuration
-#
-#
-# Build Matrix
-#
-branches:
- only:
- - master
- - develop
- - coverity_scan
- - /^.*ci-.*$/
-
-matrix:
- - os: linux
- dist: focal
- language: c
-
- git:
- - depth: 1
-
- compiler: gcc
- env: DESC="linux/gcc build and test"
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - net-tools
- - python3-pip
- - python3-setuptools
- - valgrind
- - psmisc
- - unixodbc
- - unixodbc-dev
- - mono-complete
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- after_success:
- - travis_wait 20
- - |-
- case $TRAVIS_OS_NAME in
- linux)
- cd ${TRAVIS_BUILD_DIR}/debug
- make install > /dev/null || travis_terminate $?
-
- py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
- pip3 install psutil
- pip3 install guppy3
- pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
-
- cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
- mcs -out:taosdemo *.cs || travis_terminate $?
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
- sleep 5
- mono taosdemo -Q DEFAULT -y || travis_terminate $?
- pkill -KILL -x taosd
- fuser -k -n tcp 6030
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests
- ./test-all.sh smoke || travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- ./valgrind-test.sh 2>&1 > mem-error-out.log
- sleep 1
-
-
- # Color setting
- RED='\033[0;31m'
- GREEN='\033[1;32m'
- GREEN_DARK='\033[0;32m'
- GREEN_UNDERLINE='\033[4;32m'
- NC='\033[0m'
-
- grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
-
- for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
- do
- if [ -n "$memError" ]; then
- if [ "$memError" -gt 12 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
- More than our threshold! ## ${NC}"
- travis_terminate $memError
- fi
- fi
- done
-
- grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
- for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
- do
- if [ -n "$defiMemError" ]; then
- if [ "$defiMemError" -gt 13 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports \
- Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
- travis_terminate $defiMemError
- fi
- fi
- done
-
- ;;
- esac
-
- - os: linux
- dist: bionic
- language: c
- compiler: gcc
- env: COVERITY_SCAN=true
- git:
- - depth: 1
-
- script:
- - echo "this job is for coverity scan"
-
- addons:
- coverity_scan:
- # GitHub project metadata
- # ** specific to your project **
- project:
- name: TDengine
- version: 2.x
- description: TDengine
-
- # Where email notification of build analysis results will be sent
- notification_email: sdsang@taosdata.com, slguan@taosdata.com
-
- # Commands to prepare for build_command
- # ** likely specific to your build **
- build_command_prepend: cmake . > /dev/null
-
- # The command that will be added as an argument to "cov-build" to compile your project for analysis,
- # ** likely specific to your build **
- build_command: make
-
- # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
- # Take care in resource usage, and consider the build frequency allowances per
- # https://scan.coverity.com/faq#frequency
- branch_pattern: coverity_scan
-
- - os: linux
- dist: trusty
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - binutils-2.26
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="trusty/gcc-4.8/bintuils-2.26 build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
-
- - os: linux
- dist: bionic
- language: c
- compiler: clang
- env: DESC="linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: bionic
- language: c
- compiler: clang
- env: DESC="arm64 linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: xenial
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="arm64 xenial build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: osx
- osx_image: xcode11.4
- language: c
- compiler: clang
- env: DESC="mac/clang build"
- git:
- - depth: 1
- addons:
- homebrew:
- - cmake
- - unixodbc
-
- script:
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
- - cmake .. > /dev/null
- - make > /dev/null
diff --git a/Jenkinsfile b/Jenkinsfile
index dfe9ed4389..33ce784bce 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -94,6 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
+ pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}
diff --git a/README.md b/README.md
index 45a955f458..78f902babe 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[](https://travis-ci.org/taosdata/TDengine)
+[](https://cloud.drone.io/taosdata/TDengine)
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
diff --git a/cmake/define.inc b/cmake/define.inc
index ffef456087..2fde4d2da6 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -57,7 +57,7 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
ENDIF ()
@@ -65,7 +65,7 @@ IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
@@ -73,7 +73,7 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
@@ -81,7 +81,7 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
@@ -89,7 +89,7 @@ IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
@@ -97,7 +97,7 @@ IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
@@ -139,7 +139,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
diff --git a/cmake/env.inc b/cmake/env.inc
index efcc996176..3989993953 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -32,6 +32,7 @@ ENDIF ()
#
# Set compiler options
+SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
diff --git a/cmake/install.inc b/cmake/install.inc
index 9e325531d5..f8b3b7c3c6 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
diff --git a/cmake/version.inc b/cmake/version.inc
index 8035b31cc7..0ee23f319a 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.0.20.0")
+ SET(TD_VER_NUMBER "2.1.0.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/deps/rmonotonic/src/monotonic.c b/deps/rmonotonic/src/monotonic.c
index 1470f91b56..c6d2df9097 100644
--- a/deps/rmonotonic/src/monotonic.c
+++ b/deps/rmonotonic/src/monotonic.c
@@ -36,6 +36,15 @@ static char monotonic_info_string[32];
static long mono_ticksPerMicrosecond = 0;
+#ifdef _TD_NINGSI_60
+// implement __rdtsc in ningsi60
+uint64_t __rdtsc(){
+ unsigned int lo,hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return ((uint64_t)hi << 32) | lo;
+}
+#endif
+
static monotime getMonotonicUs_x86() {
return __rdtsc() / mono_ticksPerMicrosecond;
}
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index aba10a14e3..50b31a55d3 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -117,9 +117,9 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## 常用工具
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
-* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
+* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
-* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
+* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md))
## TDengine与其他数据库的对比测试
diff --git a/documentation20/cn/06.queries/docs.md b/documentation20/cn/06.queries/docs.md
index a161778a72..5557134aac 100644
--- a/documentation20/cn/06.queries/docs.md
+++ b/documentation20/cn/06.queries/docs.md
@@ -12,7 +12,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, G
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
-例如:在TAOS Shell中,从表d1001中查询出vlotage > 215的记录,按时间降序排列,仅仅输出2条。
+例如:在TAOS Shell中,从表d1001中查询出voltage > 215的记录,按时间降序排列,仅仅输出2条。
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 3442a2248c..5eec33e2f1 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)。
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
@@ -447,7 +446,7 @@ Query OK, 1 row(s) in set (0.000141s)
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index ad3179c310..59f80b0a55 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Linux**
-**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
@@ -68,7 +68,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Windows x64/x86**
-**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载 :**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 :**
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
@@ -213,7 +213,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `int taos_result_precision(TAOS_RES *res)`
- 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。
+ 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
@@ -349,7 +349,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
- 返回值为NULL,表示创建成功,返回值不为空,表示成功。
+ 返回值为NULL,表示创建失败;返回值不为空,表示成功。
- `void taos_close_stream (TAOS_STREAM *tstr)`
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 72fcd05d52..bfa0456c7d 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -144,7 +144,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- numOfMnodes:系统中管理节点个数。默认值:3。
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
-- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*100(即100天)。
+- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
@@ -462,31 +462,31 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- |
-| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
-| ABORT | COPY | ID | NCHAR | SMALLINT |
-| ACCOUNT | COUNT | IF | NE | SPREAD |
-| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
-| ADD | CTIME | IMMEDIATE | NOT | STABLES |
-| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
-| ALL | DATABASES | IN | NOW | STATEMENT |
-| ALTER | DAYS | INITIALLY | OF | STDDEV |
-| AND | DEFERRED | INSERT | OFFSET | STREAM |
-| AS | DELIMITERS | INSTEAD | OR | STREAMS |
-| ASC | DESC | INTEGER | ORDER | STRING |
-| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
-| AVG | DETACH | INTO | PERCENTILE | TABLE |
-| BEFORE | DIFF | IP | PLUS | TABLES |
-| BEGIN | DISTINCT | IS | PRAGMA | TAG |
-| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
-| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
-| BINARY | DNODES | KEEP | QUERIES | TBNAME |
-| BITAND | DOT | KEY | QUERY | TIMES |
-| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
-| BITOR | DROP | LAST | REM | TINYINT |
-| BOOL | EACH | LE | REPLACE | TOP |
-| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
-| BY | EQ | LIKE | RESET | TRIGGER |
-| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
+| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
+| ABORT | COPY | ID | NCHAR | SPREAD |
+| ACCOUNT | COUNT | IF | NE | STABLE |
+| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
+| ADD | CTIME | IMMEDIATE | NOT | STAR |
+| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
+| ALL | DATABASES | IN | NOW | STDDEV |
+| ALTER | DAYS | INITIALLY | OF | STREAM |
+| AND | DEFERRED | INSERT | OFFSET | STREAMS |
+| AS | DELIMITERS | INSTEAD | OR | STRING |
+| ASC | DESC | INTEGER | ORDER | SUM |
+| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
+| AVG | DETACH | INTO | PERCENTILE | TABLES |
+| BEFORE | DIFF | IP | PLUS | TAG |
+| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
+| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
+| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
+| BINARY | DNODES | KEEP | QUERIES | TIMES |
+| BITAND | DOT | KEY | QUERY | TIMESTAMP |
+| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
+| BITOR | DROP | LAST | REM | TOP |
+| BOOL | EACH | LE | REPLACE | TOPIC |
+| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
+| BY | EQ | LIKE | RESET | UMINUS |
+| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
| CHANGE | FAIL | LOCAL | ROWS | USE |
| CLOG | FILL | LP | RP | USER |
@@ -498,5 +498,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| CONCAT | GLOB | METRICS | SHOW | VIEW |
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
-| CONNECTION | GT | MNODES | | |
+| CONNECTION | GT | MNODES | SLIMIT | |
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index c754eae088..112ad99391 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
-| 6 | BINARY | 自定义 | 记录二进制字节型字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字节型字符的字符串,每个字节型字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
+| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
@@ -56,7 +56,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
**Tips**:
1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
-2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏。
+2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
## 数据库管理
@@ -407,18 +407,14 @@ SELECT select_expr [, select_expr ...]
[INTERVAL (interval_val [, interval_offset])]
[SLIDING sliding_val]
[FILL fill_val]
- [GROUP BY col_list [HAVING having_condition]]
+ [GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file];
```
-#### SELECT子句
-
-一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。
-
-##### 通配符
+#### 通配符
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
```mysql
@@ -470,7 +466,7 @@ Query OK, 1 row(s) in set (0.020443s)
```
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
-```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
+```count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```mysql
taos> SELECT COUNT(*) FROM d1001;
@@ -488,7 +484,7 @@ taos> SELECT FIRST(*) FROM d1001;
Query OK, 1 row(s) in set (0.000849s)
```
-##### 标签列
+#### 标签列
从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```mysql
@@ -622,13 +618,15 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
Query OK, 1 row(s) in set (0.001091s)
```
-- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
-- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
+- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。
+ * 暂不支持含列名的四则运算表达式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。
+ * 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。
+- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
-- 通过”>>"输出结果可以导出到指定文件
+- 通过 ">>" 输出结果可以导出到指定文件。
### 支持的条件过滤操作
@@ -648,7 +646,8 @@ Query OK, 1 row(s) in set (0.001091s)
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
-### GROUP BY 之后的 HAVING 过滤
+
+
+### UNION ALL 操作符
+
+```mysql
+SELECT ...
+UNION ALL SELECT ...
+[UNION ALL SELECT ...]
+```
+
+TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。
### SQL 示例
@@ -705,11 +715,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:应用全部字段。
- 适用于:表、超级表。
+ 适用于:**表、超级表**。
说明:
- 1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。
+ 1)可以使用星号\*来替代具体的字段,使用星号(\*)返回全部记录数量。
2)针对同一表的(不包含NULL值)字段查询结果均相同。
@@ -740,7 +750,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool字段。
- 适用于:表、超级表。
+ 适用于:**表、超级表**。
示例:
```mysql
@@ -767,7 +777,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表。
+ 适用于:**表**。
- **SUM**
```mysql
@@ -779,7 +789,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表、超级表。
+ 适用于:**表、超级表**。
示例:
```mysql
@@ -806,7 +816,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表。(从 2.0.15.1 版本开始,本函数也支持超级表)
+ 适用于:**表**。(从 2.0.15.1 版本开始,本函数也支持**超级表**)
示例:
```mysql
@@ -829,7 +839,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:自变量是时间戳,因变量是该列的值。
- 适用于:表。
+ 适用于:**表**。
示例:
```mysql
@@ -852,6 +862,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
示例:
```mysql
taos> SELECT MIN(current), MIN(voltage) FROM meters;
@@ -877,6 +889,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
示例:
```mysql
taos> SELECT MAX(current), MAX(voltage) FROM meters;
@@ -902,6 +916,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:所有字段。
+ 适用于:**表、超级表**。
+
说明:
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
@@ -935,6 +951,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:所有字段。
+ 适用于:**表、超级表**。
+
说明:
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
@@ -966,6 +984,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:
1)*k*值取值范围1≤*k*≤100;
@@ -1000,6 +1020,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:
1)*k*值取值范围1≤*k*≤100;
@@ -1033,6 +1055,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表**。
+
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
示例:
@@ -1048,12 +1072,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
```
- 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
+ 功能说明:统计表/超级表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
返回结果数据类型: 双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
```mysql
@@ -1068,12 +1094,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
- 功能说明:返回表(超级表)的最后一条记录。
+ 功能说明:返回表/超级表的最后一条记录。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
+ 适用于:**表、超级表**。
+
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
示例:
@@ -1102,6 +1130,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表**。
+
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
示例:
@@ -1124,6 +1154,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
示例:
@@ -1152,6 +1184,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:
1)支持两列或多列之间进行计算,可使用括号控制计算优先级;
diff --git a/importSampleData/README.md b/importSampleData/README.md
index ee3a6e073c..56c5be0da4 100644
--- a/importSampleData/README.md
+++ b/importSampleData/README.md
@@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
-* -savetb int
+* -savetb string
当 save 为 1 时保存统计信息的表名, 默认 statistic。
diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go
index 61de6e740c..5fee49734d 100644
--- a/importSampleData/app/main.go
+++ b/importSampleData/app/main.go
@@ -7,7 +7,6 @@ import (
"encoding/json"
"flag"
"fmt"
- "hash/crc32"
"io"
"log"
"os"
@@ -17,47 +16,55 @@ import (
"sync"
"time"
- dataimport "github.com/taosdata/TDengine/importSampleData/import"
+ dataImport "github.com/taosdata/TDengine/importSampleData/import"
_ "github.com/taosdata/driver-go/taosSql"
)
const (
- TIMESTAMP = "timestamp"
- DATETIME = "datetime"
- MILLISECOND = "millisecond"
- DEFAULT_STARTTIME int64 = -1
- DEFAULT_INTERVAL int64 = 1 * 1000
- DEFAULT_DELAY int64 = -1
- DEFAULT_STATISTIC_TABLE = "statistic"
+ // 主键类型必须为 timestamp
+ TIMESTAMP = "timestamp"
- JSON_FORMAT = "json"
- CSV_FORMAT = "csv"
- SUPERTABLE_PREFIX = "s_"
- SUBTABLE_PREFIX = "t_"
+ // 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
+ DATETIME = "datetime"
+ MILLISECOND = "millisecond"
- DRIVER_NAME = "taosSql"
- STARTTIME_LAYOUT = "2006-01-02 15:04:05.000"
- INSERT_PREFIX = "insert into "
+ DefaultStartTime int64 = -1
+ DefaultInterval int64 = 1 * 1000 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
+ DefaultDelay int64 = -1 //
+
+ // 当 save 为 1 时保存统计信息的表名, 默认 statistic。
+ DefaultStatisticTable = "statistic"
+
+ // 样例数据文件格式,可以是 json 或 csv
+ JsonFormat = "json"
+ CsvFormat = "csv"
+
+ SuperTablePrefix = "s_" // 超级表前缀
+ SubTablePrefix = "t_" // 子表前缀
+
+ DriverName = "taosSql"
+ StartTimeLayout = "2006-01-02 15:04:05.000"
+ InsertPrefix = "insert into "
)
var (
- cfg string
- cases string
- hnum int
- vnum int
- thread int
- batch int
- auto int
- starttimestr string
- interval int64
- host string
- port int
- user string
- password string
- dropdb int
- db string
- dbparam string
+ cfg string // 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 config/cfg.toml
+ cases string // 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 [usecase] 查看,可同时导入多个场景,中间使用逗号分隔,如:sensor_info,camera_detection,默认为 sensor_info
+ hnum int // 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 t_0 数据,指定 hnum 为 2 时会根据原有表名创建 t、t_1 两张子表。默认为 100。
+ vnum int // 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次
+ thread int // 执行导入数据的线程数目,默认为 10
+ batch int // 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录
+ auto int // 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0
+ startTimeStr string // 导入的记录开始时间,格式为 "yyyy-MM-dd HH:mm:ss.SSS",不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空
+ interval int64 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000
+ host string // 导入的 TDengine 服务器 IP,默认为 127.0.0.1
+ port int // 导入的 TDengine 服务器端口,默认为 6030
+ user string // 导入的 TDengine 用户名,默认为 root
+ password string // 导入的 TDengine 用户密码,默认为 taosdata
+ dropdb int // 导入数据之前是否删除数据库,1 是,0 否, 默认 0
+ db string // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
+ dbparam string // 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 days 10 cache 16000 ablocks 4,默认为空
dataSourceName string
startTime int64
@@ -72,10 +79,10 @@ var (
lastStaticTime time.Time
lastTotalRows int64
timeTicker *time.Ticker
- delay int64 // default 10 milliseconds
- tick int64
- save int
- saveTable string
+ delay int64 // 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
+ tick int64 // 打印统计信息的时间间隔,默认 2000 ms。
+ save int // 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
+ saveTable string // 当 save 为 1 时保存统计信息的表名, 默认 statistic。
)
type superTableConfig struct {
@@ -83,7 +90,7 @@ type superTableConfig struct {
endTime int64
cycleTime int64
avgInterval int64
- config dataimport.CaseConfig
+ config dataImport.CaseConfig
}
type scaleTableInfo struct {
@@ -92,14 +99,14 @@ type scaleTableInfo struct {
insertRows int64
}
-type tableRows struct {
- tableName string // tableName
- value string // values(...)
-}
+//type tableRows struct {
+// tableName string // tableName
+// value string // values(...)
+//}
type dataRows struct {
rows []map[string]interface{}
- config dataimport.CaseConfig
+ config dataImport.CaseConfig
}
func (rows dataRows) Len() int {
@@ -107,9 +114,9 @@ func (rows dataRows) Len() int {
}
func (rows dataRows) Less(i, j int) bool {
- itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
- jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
- return itime < jtime
+ iTime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
+ jTime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
+ return iTime < jTime
}
func (rows dataRows) Swap(i, j int) {
@@ -123,26 +130,26 @@ func getPrimaryKey(value interface{}) int64 {
}
func init() {
- parseArg() //parse argument
+ parseArg() // parse argument
if db == "" {
- //db = "go"
+ // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
db = fmt.Sprintf("test_%s", time.Now().Format("20060102"))
}
- if auto == 1 && len(starttimestr) == 0 {
+ if auto == 1 && len(startTimeStr) == 0 {
log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
}
- if len(starttimestr) != 0 {
- t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local)
+ if len(startTimeStr) != 0 {
+ t, err := time.ParseInLocation(StartTimeLayout, strings.TrimSpace(startTimeStr), time.Local)
if err != nil {
- log.Fatalf("param startTime %s error, %s\n", starttimestr, err)
+ log.Fatalf("param startTime %s error, %s\n", startTimeStr, err)
}
startTime = t.UnixNano() / 1e6 // as millisecond
} else {
- startTime = DEFAULT_STARTTIME
+ startTime = DefaultStartTime
}
dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port)
@@ -154,9 +161,9 @@ func init() {
func main() {
- importConfig := dataimport.LoadConfig(cfg)
+ importConfig := dataImport.LoadConfig(cfg)
- var caseMinumInterval int64 = -1
+ var caseMinInterval int64 = -1
for _, userCase := range strings.Split(cases, ",") {
caseConfig, ok := importConfig.UserCases[userCase]
@@ -168,7 +175,7 @@ func main() {
checkUserCaseConfig(userCase, &caseConfig)
- //read file as map array
+ // read file as map array
fileRows := readFile(caseConfig)
log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows))
@@ -177,31 +184,31 @@ func main() {
continue
}
- _, exists := superTableConfigMap[caseConfig.Stname]
+ _, exists := superTableConfigMap[caseConfig.StName]
if !exists {
- superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig}
+ superTableConfigMap[caseConfig.StName] = &superTableConfig{config: caseConfig}
} else {
- log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname)
+ log.Fatalf("the stname of case %s already exist.\n", caseConfig.StName)
}
var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows)
// set super table's startTime, cycleTime and avgInterval
- superTableConfigMap[caseConfig.Stname].startTime = start
- superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval
- superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime
+ superTableConfigMap[caseConfig.StName].startTime = start
+ superTableConfigMap[caseConfig.StName].cycleTime = cycleTime
+ superTableConfigMap[caseConfig.StName].avgInterval = avgInterval
- if caseMinumInterval == -1 || caseMinumInterval > avgInterval {
- caseMinumInterval = avgInterval
+ if caseMinInterval == -1 || caseMinInterval > avgInterval {
+ caseMinInterval = avgInterval
}
- startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT)
+ startStr := time.Unix(0, start*int64(time.Millisecond)).Format(StartTimeLayout)
log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime)
}
- if DEFAULT_DELAY == delay {
+ if DefaultDelay == delay {
// default delay
- delay = caseMinumInterval / 2
+ delay = caseMinInterval / 2
if delay < 1 {
delay = 1
}
@@ -218,7 +225,7 @@ func main() {
createSuperTable(superTableConfigMap)
log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6)
- //create sub table
+ // create sub table
start = time.Now()
createSubTable(subTableMap)
log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6)
@@ -278,7 +285,7 @@ func staticSpeed() {
defer connection.Close()
if save == 1 {
- connection.Exec("use " + db)
+ _, _ = connection.Exec("use " + db)
_, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)")
if err != nil {
log.Fatalf("create %s Table error: %s\n", saveTable, err)
@@ -294,12 +301,12 @@ func staticSpeed() {
total := getTotalRows(successRows)
currentSuccessRows := total - lastTotalRows
- speed := currentSuccessRows * 1e9 / int64(usedTime)
+ speed := currentSuccessRows * 1e9 / usedTime
log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed)
if save == 1 {
insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed)
- connection.Exec(insertSql)
+ _, _ = connection.Exec(insertSql)
}
lastStaticTime = currentTime
@@ -327,12 +334,13 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
} else {
// use the sample data primary timestamp
- sort.Sort(fileRows) // sort the file data by the primarykey
+ sort.Sort(fileRows) // sort the file data by the primaryKey
minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])
start = minTime // default startTime use the minTime
- if DEFAULT_STARTTIME != startTime {
+ // 设置了start时间的话 按照start来
+ if DefaultStartTime != startTime {
start = startTime
}
@@ -350,31 +358,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
return
}
-func createStatisticTable() {
- connection := getConnection()
- defer connection.Close()
-
- _, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)")
- if err != nil {
- log.Fatalf("createStatisticTable error: %s\n", err)
- }
-}
-
func createSubTable(subTableMaps map[string]*dataRows) {
connection := getConnection()
defer connection.Close()
- connection.Exec("use " + db)
+ _, _ = connection.Exec("use " + db)
createTablePrefix := "create table if not exists "
+ var buffer bytes.Buffer
for subTableName := range subTableMaps {
- superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname)
- tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
+ superTableName := getSuperTableName(subTableMaps[subTableName].config.StName)
+ firstRowValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
- buffers := bytes.Buffer{}
- // create table t using supertTable tags(...);
+ // create table t using superTable tags(...);
for i := 0; i < hnum; i++ {
tableName := getScaleSubTableName(subTableName, i)
@@ -384,21 +382,21 @@ func createSubTable(subTableMaps map[string]*dataRows) {
}
scaleTableNames = append(scaleTableNames, tableName)
- buffers.WriteString(createTablePrefix)
- buffers.WriteString(tableName)
- buffers.WriteString(" using ")
- buffers.WriteString(superTableName)
- buffers.WriteString(" tags(")
+ buffer.WriteString(createTablePrefix)
+ buffer.WriteString(tableName)
+ buffer.WriteString(" using ")
+ buffer.WriteString(superTableName)
+ buffer.WriteString(" tags(")
for _, tag := range subTableMaps[subTableName].config.Tags {
- tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)])
- buffers.WriteString("'" + tagValue + "'")
- buffers.WriteString(",")
+ tagValue := fmt.Sprintf("%v", firstRowValues[strings.ToLower(tag.Name)])
+ buffer.WriteString("'" + tagValue + "'")
+ buffer.WriteString(",")
}
- buffers.Truncate(buffers.Len() - 1)
- buffers.WriteString(")")
+ buffer.Truncate(buffer.Len() - 1)
+ buffer.WriteString(")")
- createTableSql := buffers.String()
- buffers.Reset()
+ createTableSql := buffer.String()
+ buffer.Reset()
//log.Printf("create table: %s\n", createTableSql)
_, err := connection.Exec(createTableSql)
@@ -420,7 +418,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
if err != nil {
log.Fatalf("drop database error: %s\n", err)
}
- log.Printf("dropDb: %s\n", dropDbSql)
+ log.Printf("dropdb: %s\n", dropDbSql)
}
createDbSql := "create database if not exists " + db + " " + dbparam
@@ -431,7 +429,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
}
log.Printf("createDb: %s\n", createDbSql)
- connection.Exec("use " + db)
+ _, _ = connection.Exec("use " + db)
prefix := "create table if not exists "
var buffer bytes.Buffer
@@ -464,7 +462,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
createSql := buffer.String()
buffer.Reset()
- //log.Printf("supertable: %s\n", createSql)
+ //log.Printf("superTable: %s\n", createSql)
_, err = connection.Exec(createSql)
if err != nil {
log.Fatalf("create supertable error: %s\n", err)
@@ -473,15 +471,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
}
-func getScaleSubTableName(subTableName string, hnum int) string {
- if hnum == 0 {
+func getScaleSubTableName(subTableName string, hNum int) string {
+ if hNum == 0 {
return subTableName
}
- return fmt.Sprintf("%s_%d", subTableName, hnum)
+ return fmt.Sprintf("%s_%d", subTableName, hNum)
}
-func getSuperTableName(stname string) string {
- return SUPERTABLE_PREFIX + stname
+func getSuperTableName(stName string) string {
+ return SuperTablePrefix + stName
}
/**
@@ -499,7 +497,7 @@ func normalizationData(fileRows dataRows, minTime int64) int64 {
row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime
- subTableName := getSubTableName(tableValue, fileRows.config.Stname)
+ subTableName := getSubTableName(tableValue, fileRows.config.StName)
value, ok := subTableMap[subTableName]
if !ok {
@@ -527,7 +525,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
continue
}
- subTableName := getSubTableName(tableValue, fileRows.config.Stname)
+ subTableName := getSubTableName(tableValue, fileRows.config.StName)
value, ok := currSubTableMap[subTableName]
if !ok {
@@ -543,7 +541,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
}
- var maxRows, tableRows int = 0, 0
+ var maxRows, tableRows = 0, 0
for tableName := range currSubTableMap {
tableRows = len(currSubTableMap[tableName].rows)
subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
@@ -556,7 +554,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
}
func getSubTableName(subTableValue string, superTableName string) string {
- return SUBTABLE_PREFIX + subTableValue + "_" + superTableName
+ return SubTablePrefix + subTableValue + "_" + superTableName
}
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
@@ -564,25 +562,25 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
defer connection.Close()
defer wg.Done()
- connection.Exec("use " + db) // use db
+ _, _ = connection.Exec("use " + db) // use db
log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end)
num := 0
subTables := scaleTableNames[start:end]
+ var buffer bytes.Buffer
for {
var currSuccessRows int64
var appendRows int
var lastTableName string
- buffers := bytes.Buffer{}
- buffers.WriteString(INSERT_PREFIX)
+ buffer.WriteString(InsertPrefix)
for _, tableName := range subTables {
subTableInfo := subTableMap[scaleTableMap[tableName].subTableName]
subTableRows := int64(len(subTableInfo.rows))
- superTableConf := superTableConfigMap[subTableInfo.config.Stname]
+ superTableConf := superTableConfigMap[subTableInfo.config.StName]
tableStartTime := superTableConf.startTime
var tableEndTime int64
@@ -605,40 +603,35 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
// append
if lastTableName != tableName {
- buffers.WriteString(tableName)
- buffers.WriteString(" values")
+ buffer.WriteString(tableName)
+ buffer.WriteString(" values")
}
lastTableName = tableName
- buffers.WriteString("(")
- buffers.WriteString(fmt.Sprintf("%v", currentTime))
- buffers.WriteString(",")
+ buffer.WriteString("(")
+ buffer.WriteString(fmt.Sprintf("%v", currentTime))
+ buffer.WriteString(",")
- // fieldNum := len(subTableInfo.config.Fields)
for _, field := range subTableInfo.config.Fields {
- buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
- buffers.WriteString(",")
- // if( i != fieldNum -1){
-
- // }
+ buffer.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
+ buffer.WriteString(",")
}
- buffers.Truncate(buffers.Len() - 1)
- buffers.WriteString(") ")
+ buffer.Truncate(buffer.Len() - 1)
+ buffer.WriteString(") ")
appendRows++
insertRows++
if appendRows == batch {
- // executebatch
- insertSql := buffers.String()
- connection.Exec("use " + db)
+ // executeBatch
+ insertSql := buffer.String()
affectedRows := executeBatchInsert(insertSql, connection)
successRows[threadIndex] += affectedRows
currSuccessRows += affectedRows
- buffers.Reset()
- buffers.WriteString(INSERT_PREFIX)
+ buffer.Reset()
+ buffer.WriteString(InsertPrefix)
lastTableName = ""
appendRows = 0
}
@@ -654,15 +647,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
// left := len(rows)
if appendRows > 0 {
- // executebatch
- insertSql := buffers.String()
- connection.Exec("use " + db)
+ // executeBatch
+ insertSql := buffer.String()
affectedRows := executeBatchInsert(insertSql, connection)
successRows[threadIndex] += affectedRows
currSuccessRows += affectedRows
- buffers.Reset()
+ buffer.Reset()
}
// log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6)
@@ -688,65 +680,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
}
-func buildSql(rows []tableRows) string {
-
- var lastTableName string
-
- buffers := bytes.Buffer{}
-
- for i, row := range rows {
- if i == 0 {
- lastTableName = row.tableName
- buffers.WriteString(INSERT_PREFIX)
- buffers.WriteString(row.tableName)
- buffers.WriteString(" values")
- buffers.WriteString(row.value)
- continue
- }
-
- if lastTableName == row.tableName {
- buffers.WriteString(row.value)
- } else {
- buffers.WriteString(" ")
- buffers.WriteString(row.tableName)
- buffers.WriteString(" values")
- buffers.WriteString(row.value)
- lastTableName = row.tableName
- }
- }
-
- inserSql := buffers.String()
- return inserSql
-}
-
-func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows {
-
- tableRows := tableRows{tableName: tableName}
-
- buffers := bytes.Buffer{}
-
- buffers.WriteString("(")
- buffers.WriteString(fmt.Sprintf("%v", currentTime))
- buffers.WriteString(",")
-
- for _, field := range subTableInfo.config.Fields {
- buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
- buffers.WriteString(",")
- }
-
- buffers.Truncate(buffers.Len() - 1)
- buffers.WriteString(")")
-
- insertSql := buffers.String()
- tableRows.value = insertSql
-
- return tableRows
-}
-
func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
- result, error := connection.Exec(insertSql)
- if error != nil {
- log.Printf("execute insertSql %s error, %s\n", insertSql, error)
+ result, err := connection.Exec(insertSql)
+ if err != nil {
+ log.Printf("execute insertSql %s error, %s\n", insertSql, err)
return 0
}
affected, _ := result.RowsAffected()
@@ -754,7 +691,6 @@ func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
affected = 0
}
return affected
- // return 0
}
func getFieldValue(fieldValue interface{}) string {
@@ -762,7 +698,7 @@ func getFieldValue(fieldValue interface{}) string {
}
func getConnection() *sql.DB {
- db, err := sql.Open(DRIVER_NAME, dataSourceName)
+ db, err := sql.Open(DriverName, dataSourceName)
if err != nil {
panic(err)
}
@@ -773,19 +709,11 @@ func getSubTableNameValue(suffix interface{}) string {
return fmt.Sprintf("%v", suffix)
}
-func hash(s string) int {
- v := int(crc32.ChecksumIEEE([]byte(s)))
- if v < 0 {
- return -v
- }
- return v
-}
-
-func readFile(config dataimport.CaseConfig) dataRows {
+func readFile(config dataImport.CaseConfig) dataRows {
fileFormat := strings.ToLower(config.Format)
- if fileFormat == JSON_FORMAT {
+ if fileFormat == JsonFormat {
return readJSONFile(config)
- } else if fileFormat == CSV_FORMAT {
+ } else if fileFormat == CsvFormat {
return readCSVFile(config)
}
@@ -793,7 +721,7 @@ func readFile(config dataimport.CaseConfig) dataRows {
return dataRows{}
}
-func readCSVFile(config dataimport.CaseConfig) dataRows {
+func readCSVFile(config dataImport.CaseConfig) dataRows {
var rows dataRows
f, err := os.Open(config.FilePath)
if err != nil {
@@ -813,7 +741,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
line := strings.ToLower(string(lineBytes))
titles := strings.Split(line, config.Separator)
if len(titles) < 3 {
- // need suffix、 primarykey and at least one other field
+ // need suffix、 primaryKey and at least one other field
log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath)
return rows
}
@@ -848,7 +776,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
}
// if the primary key valid
- primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
+ primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
if primaryKeyValue == -1 {
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
continue
@@ -861,7 +789,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
return rows
}
-func readJSONFile(config dataimport.CaseConfig) dataRows {
+func readJSONFile(config dataImport.CaseConfig) dataRows {
var rows dataRows
f, err := os.Open(config.FilePath)
@@ -899,7 +827,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
continue
}
- primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
+ primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
if primaryKeyValue == -1 {
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
continue
@@ -916,7 +844,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
/**
* get primary key as millisecond , otherwise return -1
*/
-func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
+func getPrimaryKeyMilliSec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
if !existMapKeyAndNotEmpty(key, line) {
return -1
}
@@ -971,13 +899,13 @@ func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool {
return true
}
-func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
+func checkUserCaseConfig(caseName string, caseConfig *dataImport.CaseConfig) {
- if len(caseConfig.Stname) == 0 {
+ if len(caseConfig.StName) == 0 {
log.Fatalf("the stname of case %s can't be empty\n", caseName)
}
- caseConfig.Stname = strings.ToLower(caseConfig.Stname)
+ caseConfig.StName = strings.ToLower(caseConfig.StName)
if len(caseConfig.Tags) == 0 {
log.Fatalf("the tags of case %s can't be empty\n", caseName)
@@ -1029,24 +957,24 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
}
func parseArg() {
- flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.")
- flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
+ flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes useCase and data format.")
+ flag.StringVar(&cases, "cases", "sensor_info", "useCase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.")
flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.")
- flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
+ flag.Int64Var(&delay, "delay", DefaultDelay, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.")
flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.")
- flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.")
+ flag.StringVar(&saveTable, "savetb", DefaultStatisticTable, "the table to save 'statistic' info when save set 1.")
flag.IntVar(&thread, "thread", 10, "number of threads to import data.")
flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.")
- flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
- flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.")
- flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
+ flag.IntVar(&auto, "auto", 0, "whether to use the startTime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
+ flag.StringVar(&startTimeStr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the earliest timestamp in the sample data will be set as the startTime.")
+ flag.Int64Var(&interval, "interval", DefaultInterval, "time interval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.")
flag.IntVar(&port, "port", 6030, "tdengine server port.")
flag.StringVar(&user, "user", "root", "user name to login into the database.")
flag.StringVar(&password, "password", "taosdata", "the import tdengine user password")
- flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.")
+ flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing database. 1 is yes and 0 otherwise.")
flag.StringVar(&db, "db", "", "name of the database to store data.")
flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.")
@@ -1066,7 +994,7 @@ func printArg() {
fmt.Println("-thread:", thread)
fmt.Println("-batch:", batch)
fmt.Println("-auto:", auto)
- fmt.Println("-start:", starttimestr)
+ fmt.Println("-start:", startTimeStr)
fmt.Println("-interval:", interval)
fmt.Println("-host:", host)
fmt.Println("-port", port)
diff --git a/importSampleData/data/sensor_info.csv b/importSampleData/data/sensor_info.csv
index d049c8b004..c5ff898118 100644
--- a/importSampleData/data/sensor_info.csv
+++ b/importSampleData/data/sensor_info.csv
@@ -899,103 +899,103 @@ devid,location,color,devgroup,ts,temperature,humidity
8, haerbing, yellow, 2, 1575129697000, 31, 16.321497
8, haerbing, yellow, 2, 1575129698000, 25, 15.864515
8, haerbing, yellow, 2, 1575129699000, 25, 16.492443
-9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889
-9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610
-9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319
-9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807
-9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642
-9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246
-9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774
-9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364
-9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084
-9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289
-9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973
-9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783
-9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101
-9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450
-9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142
-9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837
-9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730
-9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146
-9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187
-9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345
-9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215
-9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621
-9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735
-9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125
-9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485
-9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433
-9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050
-9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168
-9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681
-9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272
-9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976
-9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909
-9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740
-9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645
-9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036
-9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812
-9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939
-9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587
-9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381
-9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530
-9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318
-9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237
-9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297
-9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282
-9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654
-9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736
-9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354
-9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465
-9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276
-9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892
-9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566
-9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849
-9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709
-9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651
-9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347
-9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095
-9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746
-9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939
-9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819
-9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181
-9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922
-9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530
-9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657
-9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854
-9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554
-9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130
-9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404
-9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847
-9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378
-9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507
-9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808
-9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065
-9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803
-9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544
-9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786
-9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420
-9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522
-9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531
-9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744
-9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456
-9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136
-9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523
-9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873
-9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768
-9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610
-9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299
-9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126
-9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302
-9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350
-9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790
-9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568
-9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267
-9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092
-9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021
-9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909
-9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524
-9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127
-9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012
-9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551
-9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868
\ No newline at end of file
+9, shijiazhuang, blue, 0, 1575129600000, 23, 16.002889
+9, shijiazhuang, blue, 0, 1575129601000, 26, 17.034610
+9, shijiazhuang, blue, 0, 1575129602000, 29, 12.892319
+9, shijiazhuang, blue, 0, 1575129603000, 34, 15.321807
+9, shijiazhuang, blue, 0, 1575129604000, 29, 12.562642
+9, shijiazhuang, blue, 0, 1575129605000, 32, 17.190246
+9, shijiazhuang, blue, 0, 1575129606000, 19, 15.361774
+9, shijiazhuang, blue, 0, 1575129607000, 26, 15.022364
+9, shijiazhuang, blue, 0, 1575129608000, 31, 14.837084
+9, shijiazhuang, blue, 0, 1575129609000, 25, 11.554289
+9, shijiazhuang, blue, 0, 1575129610000, 21, 15.313973
+9, shijiazhuang, blue, 0, 1575129611000, 27, 18.621783
+9, shijiazhuang, blue, 0, 1575129612000, 31, 18.018101
+9, shijiazhuang, blue, 0, 1575129613000, 23, 14.421450
+9, shijiazhuang, blue, 0, 1575129614000, 28, 10.833142
+9, shijiazhuang, blue, 0, 1575129615000, 33, 18.169837
+9, shijiazhuang, blue, 0, 1575129616000, 21, 18.772730
+9, shijiazhuang, blue, 0, 1575129617000, 24, 18.893146
+9, shijiazhuang, blue, 0, 1575129618000, 24, 10.290187
+9, shijiazhuang, blue, 0, 1575129619000, 23, 17.393345
+9, shijiazhuang, blue, 0, 1575129620000, 30, 12.949215
+9, shijiazhuang, blue, 0, 1575129621000, 19, 19.267621
+9, shijiazhuang, blue, 0, 1575129622000, 33, 14.831735
+9, shijiazhuang, blue, 0, 1575129623000, 21, 14.711125
+9, shijiazhuang, blue, 0, 1575129624000, 16, 17.168485
+9, shijiazhuang, blue, 0, 1575129625000, 17, 16.426433
+9, shijiazhuang, blue, 0, 1575129626000, 19, 13.879050
+9, shijiazhuang, blue, 0, 1575129627000, 21, 18.308168
+9, shijiazhuang, blue, 0, 1575129628000, 17, 10.845681
+9, shijiazhuang, blue, 0, 1575129629000, 20, 10.238272
+9, shijiazhuang, blue, 0, 1575129630000, 19, 19.424976
+9, shijiazhuang, blue, 0, 1575129631000, 31, 13.885909
+9, shijiazhuang, blue, 0, 1575129632000, 15, 19.264740
+9, shijiazhuang, blue, 0, 1575129633000, 30, 12.460645
+9, shijiazhuang, blue, 0, 1575129634000, 27, 17.608036
+9, shijiazhuang, blue, 0, 1575129635000, 25, 13.493812
+9, shijiazhuang, blue, 0, 1575129636000, 19, 10.955939
+9, shijiazhuang, blue, 0, 1575129637000, 24, 11.956587
+9, shijiazhuang, blue, 0, 1575129638000, 15, 19.141381
+9, shijiazhuang, blue, 0, 1575129639000, 24, 14.801530
+9, shijiazhuang, blue, 0, 1575129640000, 17, 14.347318
+9, shijiazhuang, blue, 0, 1575129641000, 29, 14.803237
+9, shijiazhuang, blue, 0, 1575129642000, 28, 10.342297
+9, shijiazhuang, blue, 0, 1575129643000, 29, 19.368282
+9, shijiazhuang, blue, 0, 1575129644000, 31, 17.491654
+9, shijiazhuang, blue, 0, 1575129645000, 18, 13.161736
+9, shijiazhuang, blue, 0, 1575129646000, 17, 16.067354
+9, shijiazhuang, blue, 0, 1575129647000, 18, 13.736465
+9, shijiazhuang, blue, 0, 1575129648000, 23, 19.103276
+9, shijiazhuang, blue, 0, 1575129649000, 29, 16.075892
+9, shijiazhuang, blue, 0, 1575129650000, 21, 10.728566
+9, shijiazhuang, blue, 0, 1575129651000, 15, 18.921849
+9, shijiazhuang, blue, 0, 1575129652000, 24, 16.914709
+9, shijiazhuang, blue, 0, 1575129653000, 19, 13.501651
+9, shijiazhuang, blue, 0, 1575129654000, 19, 13.538347
+9, shijiazhuang, blue, 0, 1575129655000, 16, 13.261095
+9, shijiazhuang, blue, 0, 1575129656000, 32, 16.315746
+9, shijiazhuang, blue, 0, 1575129657000, 27, 16.400939
+9, shijiazhuang, blue, 0, 1575129658000, 24, 13.321819
+9, shijiazhuang, blue, 0, 1575129659000, 27, 19.070181
+9, shijiazhuang, blue, 0, 1575129660000, 27, 13.040922
+9, shijiazhuang, blue, 0, 1575129661000, 32, 10.872530
+9, shijiazhuang, blue, 0, 1575129662000, 28, 16.428657
+9, shijiazhuang, blue, 0, 1575129663000, 32, 13.883854
+9, shijiazhuang, blue, 0, 1575129664000, 33, 14.299554
+9, shijiazhuang, blue, 0, 1575129665000, 30, 16.445130
+9, shijiazhuang, blue, 0, 1575129666000, 15, 18.059404
+9, shijiazhuang, blue, 0, 1575129667000, 21, 12.348847
+9, shijiazhuang, blue, 0, 1575129668000, 32, 13.315378
+9, shijiazhuang, blue, 0, 1575129669000, 17, 15.689507
+9, shijiazhuang, blue, 0, 1575129670000, 22, 15.591808
+9, shijiazhuang, blue, 0, 1575129671000, 27, 16.386065
+9, shijiazhuang, blue, 0, 1575129672000, 25, 10.564803
+9, shijiazhuang, blue, 0, 1575129673000, 20, 12.276544
+9, shijiazhuang, blue, 0, 1575129674000, 26, 15.828786
+9, shijiazhuang, blue, 0, 1575129675000, 18, 12.236420
+9, shijiazhuang, blue, 0, 1575129676000, 15, 19.439522
+9, shijiazhuang, blue, 0, 1575129677000, 19, 19.831531
+9, shijiazhuang, blue, 0, 1575129678000, 22, 17.115744
+9, shijiazhuang, blue, 0, 1575129679000, 29, 19.879456
+9, shijiazhuang, blue, 0, 1575129680000, 34, 10.207136
+9, shijiazhuang, blue, 0, 1575129681000, 16, 17.633523
+9, shijiazhuang, blue, 0, 1575129682000, 15, 14.227873
+9, shijiazhuang, blue, 0, 1575129683000, 34, 12.027768
+9, shijiazhuang, blue, 0, 1575129684000, 22, 11.376610
+9, shijiazhuang, blue, 0, 1575129685000, 21, 11.711299
+9, shijiazhuang, blue, 0, 1575129686000, 33, 14.281126
+9, shijiazhuang, blue, 0, 1575129687000, 31, 10.895302
+9, shijiazhuang, blue, 0, 1575129688000, 31, 13.971350
+9, shijiazhuang, blue, 0, 1575129689000, 15, 15.262790
+9, shijiazhuang, blue, 0, 1575129690000, 23, 12.440568
+9, shijiazhuang, blue, 0, 1575129691000, 32, 19.731267
+9, shijiazhuang, blue, 0, 1575129692000, 22, 10.518092
+9, shijiazhuang, blue, 0, 1575129693000, 34, 17.863021
+9, shijiazhuang, blue, 0, 1575129694000, 28, 11.478909
+9, shijiazhuang, blue, 0, 1575129695000, 16, 15.075524
+9, shijiazhuang, blue, 0, 1575129696000, 16, 10.292127
+9, shijiazhuang, blue, 0, 1575129697000, 22, 13.716012
+9, shijiazhuang, blue, 0, 1575129698000, 32, 10.906551
+9, shijiazhuang, blue, 0, 1575129699000, 19, 18.386868
\ No newline at end of file
diff --git a/importSampleData/go.mod b/importSampleData/go.mod
new file mode 100644
index 0000000000..fa1d978e59
--- /dev/null
+++ b/importSampleData/go.mod
@@ -0,0 +1,8 @@
+module github.com/taosdata/TDengine/importSampleData
+
+go 1.13
+
+require (
+ github.com/pelletier/go-toml v1.9.0 // indirect
+ github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect
+)
diff --git a/importSampleData/import/import_config.go b/importSampleData/import/import_config.go
index e7942cc505..fdaeeab7da 100644
--- a/importSampleData/import/import_config.go
+++ b/importSampleData/import/import_config.go
@@ -14,23 +14,23 @@ var (
once sync.Once
)
-// Config inclue all scene import config
+// Config include all scene import config
type Config struct {
UserCases map[string]CaseConfig
}
// CaseConfig include the sample data config and tdengine config
type CaseConfig struct {
- Format string
- FilePath string
- Separator string
- Stname string
- SubTableName string
- Timestamp string
- TimestampType string
- TimestampTypeFormat string
- Tags []FieldInfo
- Fields []FieldInfo
+ Format string
+ FilePath string
+ Separator string
+ StName string
+ SubTableName string
+ Timestamp string
+ TimestampType string
+ TimestampTypeFormat string
+ Tags []FieldInfo
+ Fields []FieldInfo
}
// FieldInfo is field or tag info
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index 83b70ed9f8..d3bd7510a3 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -64,7 +64,7 @@
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
-# offlineThreshold 8640000
+# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 31343ed293..43006928a6 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,7 @@
name: tdengine
base: core18
-version: '2.0.20.0'
+
+version: '2.1.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +73,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.20.0
+ - usr/lib/libtaos.so.2.1.0.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index 15ef54b7b1..f0349c2b3d 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -48,6 +48,8 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
+int tsInsertInitialCheck(SSqlObj *pSql);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 231d52bae4..851adf94a4 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -175,7 +175,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
-int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
+int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
@@ -308,7 +309,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
-int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
+int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h
index a9dcd230a6..0026a27e19 100644
--- a/src/client/inc/tschemautil.h
+++ b/src/client/inc/tschemautil.h
@@ -21,8 +21,8 @@ extern "C" {
#endif
#include "taosmsg.h"
-#include "tstoken.h"
#include "tsclient.h"
+#include "ttoken.h"
/**
* get the number of tags of this table
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 4d21337497..bc301b2b42 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -84,6 +84,7 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
+ uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray
@@ -154,13 +155,12 @@ typedef struct STagCond {
typedef struct SParamInfo {
int32_t idx;
- char type;
+ uint8_t type;
uint8_t timePrec;
int16_t bytes;
uint32_t offset;
} SParamInfo;
-
typedef struct SBoundColumn {
bool hasVal; // denote if current column has bound or not
int32_t offset; // all column offset value
@@ -376,7 +376,8 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
-
+ bool isBind;
+
SSubqueryState subState;
struct SSqlObj **pSubs;
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index b3060e2c82..04bccc1a4a 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: isUpdateQueryImp
- * Signature: (J)J
+ * Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
@@ -185,6 +185,44 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
(JNIEnv *, jobject, jlong, jbyteArray);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: prepareStmtImp
+ * Signature: ([BJ)I
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
+ (JNIEnv *, jobject, jbyteArray, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setBindTableNameImp
+ * Signature: (JLjava/lang/String;J)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
+ (JNIEnv *, jobject, jlong, jstring, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: bindColDataImp
+ * Signature: (J[B[B[BIIIIJ)J
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
+(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c
index 7447e36ac9..da7da17aa3 100644
--- a/src/client/src/TSDBJNIConnector.c
+++ b/src/client/src/TSDBJNIConnector.c
@@ -687,4 +687,194 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec
}
return taos_result_precision(result);
-}
\ No newline at end of file
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ if (jsql == NULL) {
+ jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ jsize len = (*env)->GetArrayLength(env, jsql);
+
+ char *str = (char *) calloc(1, sizeof(char) * (len + 1));
+ if (str == NULL) {
+ jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
+ return JNI_OUT_OF_MEMORY;
+ }
+
+ (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ TAOS_STMT* pStmt = taos_stmt_init(tscon);
+ int32_t code = taos_stmt_prepare(pStmt, str, len);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ free(str);
+ return (jlong) pStmt;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) {
+ TAOS *tsconn = (TAOS *)conn;
+ if (tsconn == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
+ return JNI_SQL_NULL;
+ }
+
+ const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
+
+ int32_t code = taos_stmt_set_tbname((void*)stmt, name);
+ if (code != TSDB_CODE_SUCCESS) {
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name);
+
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt,
+ jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ // todo refactor
+ jsize len = (*env)->GetArrayLength(env, colDataList);
+ char *colBuf = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ len = (*env)->GetArrayLength(env, lengthList);
+ char *lengthArray = (char*) calloc(1, len);
+ (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ len = (*env)->GetArrayLength(env, nullList);
+ char *nullArray = (char*) calloc(1, len);
+ (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ // bind multi-rows with only one invoke.
+ TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND));
+
+ b->num = numOfRows;
+ b->buffer_type = dataType; // todo check data type
+ b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes;
+ b->is_null = nullArray;
+ b->buffer = colBuf;
+ b->length = (int32_t*)lengthArray;
+
+ // set the length and is_null array
+ switch(dataType) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDB_DATA_TYPE_BIGINT: {
+ int32_t bytes = tDataTypes[dataType].bytes;
+ for(int32_t i = 0; i < numOfRows; ++i) {
+ b->length[i] = bytes;
+ }
+ break;
+ }
+
+ case TSDB_DATA_TYPE_NCHAR:
+ case TSDB_DATA_TYPE_BINARY: {
+ // do nothing
+ }
+ }
+
+ int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex);
+ tfree(b->length);
+ tfree(b->buffer);
+ tfree(b->is_null);
+ tfree(b);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ taos_stmt_add_batch(pStmt);
+ int32_t code = taos_stmt_execute(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ int32_t code = taos_stmt_close(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
+ return JNI_SUCCESS;
+}
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 71c960b454..09b31e4b19 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) {
- tscError("%p failed to malloc sql string buffer", pSql);
+ tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscAsyncResultOnError(pSql);
return;
@@ -81,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
- tscError("bug!!! pObj:%p", pObj);
+ tscError("pObj:%p is NULL or freed", pObj);
terrno = TSDB_CODE_TSC_DISCONNECTED;
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
return NULL;
@@ -288,7 +288,7 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) {
}
assert(pSql->res.code != TSDB_CODE_SUCCESS);
- tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code));
+ tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code));
SSqlRes *pRes = &pSql->res;
if (pSql->fp == NULL || pSql->fetchFp == NULL){
@@ -368,7 +368,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) {
- tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
+ tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
goto _error;
}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index b4d03ec461..6b55780af9 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -326,6 +326,7 @@ TAOS_ROW tscFetchRow(void *param) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
@@ -679,6 +680,9 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta != NULL);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
+ if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@@ -907,7 +911,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
*/
pRes->qId = 0x1;
pRes->numOfRows = 0;
- } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
+ } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) {
pRes->code = tscProcessShowCreateTable(pSql);
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
@@ -926,7 +930,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pRes->code = tscProcessServStatus(pSql);
} else {
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
- tscError("%p not support command:%d", pSql, pCmd->command);
+ tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command);
}
// keep the code in local variable in order to avoid invalid read in case of async query
diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c
index 3450a3173f..f5b691324a 100644
--- a/src/client/src/tscLocalMerge.c
+++ b/src/client/src/tscLocalMerge.c
@@ -113,14 +113,14 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- tscError("%p pMemBuffer is NULL", pMemBuffer);
+ tscError("pMemBuffer:%p is NULL", pMemBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- tscError("%p no local buffer or intermediate result format model", pSql);
+ tscError("0x%"PRIx64" no local buffer or intermediate result format model", pSql->self);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
@@ -144,7 +144,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
}
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
- tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
+ tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", pSql->self, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
@@ -156,7 +156,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
SLocalMerger *pMerger = (SLocalMerger *) calloc(1, size);
if (pMerger == NULL) {
- tscError("%p failed to create local merge structure, out of memory", pSql);
+ tscError("0x%"PRIx64" failed to create local merge structure, out of memory", pSql->self);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -180,7 +180,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
if (ds == NULL) {
- tscError("%p failed to create merge structure", pSql);
+ tscError("0x%"PRIx64" failed to create merge structure", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tfree(pMerger);
return;
@@ -538,7 +538,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub);
if (*pMemBuffer == NULL) {
- tscError("%p failed to allocate memory", pSql);
+ tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
@@ -547,7 +547,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
if (pSchema == NULL) {
- tscError("%p failed to allocate memory", pSql);
+ tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 68c974359c..0fad4f97f5 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -29,8 +29,7 @@
#include "taosdef.h"
#include "tscLog.h"
-#include "tscSubquery.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tdataformat.h"
@@ -68,7 +67,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
// do nothing
} else if (pToken->type == TK_INTEGER) {
- useconds = tsosStr2int64(pToken->z);
+ useconds = taosStr2int64(pToken->z);
} else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
@@ -386,7 +385,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
*/
-static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
// once the data block is disordered, we do NOT keep previous timestamp any more
if (!pDataBlocks->ordered) {
return TSDB_CODE_SUCCESS;
@@ -411,6 +410,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
pDataBlocks->ordered = false;
+ tscWarn("NOT ordered input timestamp");
}
pDataBlocks->prevTS = k;
@@ -463,23 +463,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
// Remove quotation marks
if (TK_STRING == sToken.type) {
// delete escape character: \\, \', \"
- char delim = sToken.z[0];
+ char delim = sToken.z[0];
+
int32_t cnt = 0;
int32_t j = 0;
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
- if (sToken.z[k] == delim || sToken.z[k] == '\\') {
- if (sToken.z[k + 1] == delim) {
- cnt++;
+ if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
- j++;
- k++;
- continue;
- }
+
+ cnt++;
+ j++;
+ k++;
+ continue;
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
+
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
@@ -693,6 +694,8 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
+
+ dataBuf->prevTS = INT64_MIN;
}
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
@@ -705,19 +708,11 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
}
code = TSDB_CODE_TSC_INVALID_SQL;
- char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
- if (NULL == tmpTokenBuf) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
+ char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
- free(tmpTokenBuf);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) {
@@ -934,6 +929,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
+ /* parse columns after super table tags values.
+ * insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
+ * (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
+ * */
+ index = 0;
+ sToken = tStrGetToken(sql, &index, false);
+ sql += index;
+ int numOfColsAfterTags = 0;
+ if (sToken.type == TK_LP) {
+ if (*boundColumn != NULL) {
+ return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
+ } else {
+ *boundColumn = &sToken.z[0];
+ }
+
+ while (1) {
+ index = 0;
+ sToken = tStrGetToken(sql, &index, false);
+
+ if (sToken.type == TK_RP) {
+ break;
+ }
+
+ sql += index;
+ ++numOfColsAfterTags;
+ }
+
+ if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+
+ sToken = tStrGetToken(sql, &index, false);
+ }
+
+ sql = sToken.z;
+
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
@@ -975,7 +1006,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
psTblToken->n = len;
psTblToken->type = TK_ID;
- tSQLGetToken(psTblToken->z, &psTblToken->type);
+ tGetToken(psTblToken->z, &psTblToken->type);
return tscValidateName(psTblToken);
}
@@ -1147,7 +1178,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
return code;
}
- tscError("%p async insert parse error, code:%s", pSql, tstrerror(code));
+ tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
pCmd->curSql = NULL;
goto _clean;
}
@@ -1262,7 +1293,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
- if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
+ if ((pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
@@ -1415,7 +1446,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
assert(pSql->res.numOfRows == 0);
int32_t ret = fseek(fp, 0, SEEK_SET);
if (ret < 0) {
- tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
+ tscError("0x%"PRIx64" failed to seek SEEK_SET since:%s", pSql->self, tstrerror(errno));
code = TAOS_SYSTEM_ERROR(errno);
goto _error;
}
@@ -1536,7 +1567,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
- tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
+ tscError("0x%"PRIx64" failed to open file %s to load data from file, code:%s", pSql->self, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter);
taos_free_result(pNew);
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index c3c8986e2f..dc1bc35fc0 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -24,6 +24,7 @@
#include "tscSubquery.h"
int tsParseInsertSql(SSqlObj *pSql);
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start);
////////////////////////////////////////////////////////////////////////////////
// functions for normal statement preparation
@@ -43,10 +44,32 @@ typedef struct SNormalStmt {
tVariant* params;
} SNormalStmt;
+typedef struct SMultiTbStmt {
+ bool nameSet;
+ uint64_t currentUid;
+ uint32_t tbNum;
+ SStrToken tbname;
+ SHashObj *pTableHash;
+ SHashObj *pTableBlockHashList; // data block for each table
+} SMultiTbStmt;
+
+typedef enum {
+ STMT_INIT = 1,
+ STMT_PREPARE,
+ STMT_SETTBNAME,
+ STMT_BIND,
+ STMT_BIND_COL,
+ STMT_ADD_BATCH,
+ STMT_EXECUTE
+} STMT_ST;
+
typedef struct STscStmt {
bool isInsert;
+ bool multiTbInsert;
+ int16_t last;
STscObj* taos;
SSqlObj* pSql;
+ SMultiTbStmt mtb;
SNormalStmt normal;
} STscStmt;
@@ -135,7 +158,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
break;
default:
- tscDebug("param %d: type mismatch or invalid", i);
+ tscDebug("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
@@ -151,7 +174,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
while (sql[i] != 0) {
SStrToken token = {0};
- token.n = tSQLGetToken(sql + i, &token.type);
+ token.n = tGetToken(sql + i, &token.type);
if (token.type == TK_QUESTION) {
sql[i] = 0;
@@ -255,12 +278,13 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
-static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
+static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
+#if 0
if (0) {
// allow user bind param data with different type
union {
@@ -641,6 +665,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
}
}
+#endif
if (bind->buffer_type != param->type) {
return TSDB_CODE_TSC_INVALID_VALUE;
@@ -690,29 +715,106 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
memcpy(data + param->offset, bind->buffer, size);
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
+ if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) {
+ tscError("BINARY/NCHAR no length");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ for (int i = 0; i < bind->num; ++i) {
+ char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
+
+ if (bind->is_null != NULL && bind->is_null[i]) {
+ setNull(data + param->offset, param->type, param->bytes);
+ continue;
+ }
+
+ if (!IS_VAR_DATA_TYPE(param->type)) {
+ memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes);
+
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+ } else if (param->type == TSDB_DATA_TYPE_BINARY) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ int16_t bsize = (short)bind->length[i];
+ STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize);
+ } else if (param->type == TSDB_DATA_TYPE_NCHAR) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int32_t output = 0;
+ if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i));
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ varDataSetLen(data + param->offset, output);
+ }
+ }
+
return TSDB_CODE_SUCCESS;
}
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
-
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
-
- STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
- }
-
+ STscStmt* pStmt = (STscStmt*)stmt;
+
STableDataBlocks* pBlock = NULL;
+
+ if (pStmt->multiTbInsert) {
+ if (pCmd->pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
- int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
- if (ret != 0) {
- // todo handle error
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->pTableBlockHashList == NULL) {
+ pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
}
- uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + 1) * pBlock->rowSize;
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
@@ -729,9 +831,9 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
- int code = doBindParam(data, param, &bind[param->idx]);
+ int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
if (code != TSDB_CODE_SUCCESS) {
- tscDebug("param %d: type mismatch or invalid", param->idx);
+ tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return code;
}
}
@@ -739,9 +841,135 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
return TSDB_CODE_SUCCESS;
}
+
+static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ SSqlCmd* pCmd = &stmt->pSql->cmd;
+ STscStmt* pStmt = (STscStmt*)stmt;
+ int rowNum = bind->num;
+
+ STableDataBlocks* pBlock = NULL;
+
+ if (pStmt->multiTbInsert) {
+ if (pCmd->pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->pTableBlockHashList == NULL) {
+ pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams));
+
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize;
+ if (totalDataSize > pBlock->nAllocSize) {
+ const double factor = 1.5;
+
+ void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pBlock->pData = (char*)tmp;
+ pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
+ }
+
+ if (colIdx == -1) {
+ for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
+ SParamInfo* param = &pBlock->params[j];
+ if (bind[param->idx].num != rowNum) {
+ tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return code;
+ }
+ }
+
+ pCmd->batchSize += rowNum - 1;
+ } else {
+ SParamInfo* param = &pBlock->params[colIdx];
+
+ int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return code;
+ }
+
+ if (colIdx == (pBlock->numOfParams - 1)) {
+ pCmd->batchSize += rowNum - 1;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int insertStmtUpdateBatch(STscStmt* stmt) {
+ SSqlObj* pSql = stmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+ STableDataBlocks* pBlock = NULL;
+
+ if (pCmd->batchSize > INT16_MAX) {
+ tscError("too many record:%d", pCmd->batchSize);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ assert(pCmd->numOfClause == 1);
+ if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, stmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+
+ STableMeta* pTableMeta = pBlock->pTableMeta;
+
+ pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
+ pBlk->numOfRows = pCmd->batchSize;
+ pBlk->dataLen = 0;
+ pBlk->uid = pTableMeta->id.uid;
+ pBlk->tid = pTableMeta->id.tid;
+
+ return TSDB_CODE_SUCCESS;
+}
+
static int insertStmtAddBatch(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
++pCmd->batchSize;
+
+ if (stmt->multiTbInsert) {
+ return insertStmtUpdateBatch(stmt);
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -835,6 +1063,83 @@ static int insertStmtExecute(STscStmt* stmt) {
return pSql->res.code;
}
+static void insertBatchClean(STscStmt* pStmt) {
+ SSqlCmd *pCmd = &pStmt->pSql->cmd;
+ SSqlObj *pSql = pStmt->pSql;
+ int32_t size = taosHashGetSize(pCmd->pTableBlockHashList);
+
+ // data block reset
+ pCmd->batchSize = 0;
+
+ for(int32_t i = 0; i < size; ++i) {
+ if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
+ tfree(pCmd->pTableNameList[i]);
+ }
+ }
+
+ tfree(pCmd->pTableNameList);
+
+/*
+ STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
+
+ STableDataBlocks* pOneTableBlock = *p;
+
+ while (1) {
+ SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
+
+ pOneTableBlock->size = sizeof(SSubmitBlk);
+
+ pBlocks->numOfRows = 0;
+
+ p = taosHashIterate(pCmd->pTableBlockHashList, p);
+ if (p == NULL) {
+ break;
+ }
+
+ pOneTableBlock = *p;
+ }
+*/
+
+ pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
+ pCmd->numOfTables = 0;
+
+ taosHashEmpty(pCmd->pTableBlockHashList);
+ tscFreeSqlResult(pSql);
+ tscFreeSubobj(pSql);
+ tfree(pSql->pSubs);
+ pSql->subState.numOfSub = 0;
+}
+
+static int insertBatchStmtExecute(STscStmt* pStmt) {
+ int32_t code = 0;
+
+ if(pStmt->mtb.nameSet == false) {
+ tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
+
+ if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) > 0) { // merge according to vgId
+ if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+
+ code = tscHandleMultivnodeInsert(pStmt->pSql);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ insertBatchClean(pStmt);
+
+ return pStmt->pSql->res.code;
+}
+
////////////////////////////////////////////////////////////////////////////////
// interface functions
@@ -866,7 +1171,9 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
+ pSql->isBind = true;
pStmt->pSql = pSql;
+ pStmt->last = STMT_INIT;
return pStmt;
}
@@ -879,6 +1186,13 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return TSDB_CODE_TSC_DISCONNECTED;
}
+ if (pStmt->last != STMT_INIT) {
+ tscError("prepare status error, last:%d", pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_PREPARE;
+
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
@@ -917,6 +1231,36 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
registerSqlObj(pSql);
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ if ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+
+ int32_t index = 0;
+ SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false);
+
+ if (sToken.n == 0) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+
+ if (sToken.n == 1 && sToken.type == TK_QUESTION) {
+ pStmt->multiTbInsert = true;
+ pStmt->mtb.tbname = sToken;
+ pStmt->mtb.nameSet = false;
+ if (pStmt->mtb.pTableHash == NULL) {
+ pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ }
+ if (pStmt->mtb.pTableBlockHashList == NULL) {
+ pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStmt->multiTbInsert = false;
+ memset(&pStmt->mtb, 0, sizeof(pStmt->mtb));
+
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
@@ -931,6 +1275,105 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return normalStmtPrepare(pStmt);
}
+
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ SSqlObj* pSql = pStmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (name == NULL) {
+ terrno = TSDB_CODE_TSC_APP_ERROR;
+ tscError("0x%"PRIx64" name is NULL", pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) {
+ terrno = TSDB_CODE_TSC_APP_ERROR;
+ tscError("0x%"PRIx64" not multi table insert", pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) {
+ tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_SETTBNAME;
+
+ uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name));
+ if (uid != NULL) {
+ pStmt->mtb.currentUid = *uid;
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
+ pCmd->batchSize = pBlk->numOfRows;
+
+ taosHashPut(pCmd->pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
+
+ tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
+ pStmt->mtb.nameSet = true;
+
+ tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
+
+ pSql->cmd.parseFinished = 0;
+ pSql->cmd.numOfParams = 0;
+ pSql->cmd.batchSize = 0;
+
+ if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) {
+ SHashObj* hashList = pCmd->pTableBlockHashList;
+ pCmd->pTableBlockHashList = NULL;
+ tscResetSqlCmd(pCmd, true);
+ pCmd->pTableBlockHashList = hashList;
+ }
+
+ int32_t code = tsParseSql(pStmt->pSql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ code = pStmt->pSql->res.code;
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ STableDataBlocks* pBlock = NULL;
+ code = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData;
+ blk->numOfRows = 0;
+
+ pStmt->mtb.currentUid = pTableMeta->id.uid;
+ pStmt->mtb.tbNum++;
+
+ taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
+
+ taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
+
+ tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
+ }
+
+ return code;
+}
+
int taos_stmt_close(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (!pStmt->isInsert) {
@@ -943,6 +1386,13 @@ int taos_stmt_close(TAOS_STMT* stmt) {
}
free(normal->parts);
free(normal->sql);
+ } else {
+ if (pStmt->multiTbInsert) {
+ taosHashCleanup(pStmt->mtb.pTableHash);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
+ taosHashCleanup(pStmt->pSql->cmd.pTableBlockHashList);
+ pStmt->pSql->cmd.pTableBlockHashList = NULL;
+ }
}
taos_free_result(pStmt->pSql);
@@ -952,18 +1402,122 @@ int taos_stmt_close(TAOS_STMT* stmt) {
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
return insertStmtBindParam(pStmt, bind);
} else {
return normalStmtBindParam(pStmt, bind);
}
}
+
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
+ return insertStmtBindParamBatch(pStmt, bind, -1);
+}
+
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND_COL;
+
+ return insertStmtBindParamBatch(pStmt, bind, colIdx);
+}
+
+
+
int taos_stmt_add_batch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
+ if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_ADD_BATCH;
+
return insertStmtAddBatch(pStmt);
}
+
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
}
@@ -978,8 +1532,24 @@ int taos_stmt_reset(TAOS_STMT* stmt) {
int taos_stmt_execute(TAOS_STMT* stmt) {
int ret = 0;
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
- ret = insertStmtExecute(pStmt);
+ if (pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_EXECUTE;
+
+ if (pStmt->multiTbInsert) {
+ ret = insertBatchStmtExecute(pStmt);
+ } else {
+ ret = insertStmtExecute(pStmt);
+ }
} else { // normal stmt query
char* sql = normalStmtBuildSql(pStmt);
if (sql == NULL) {
@@ -1074,7 +1644,7 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
}
if (idx<0 || idx>=pBlock->numOfParams) {
- tscError("param %d: out of range", idx);
+ tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx);
abort();
}
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 7ffc9bc310..777a136a6e 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -104,7 +104,7 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
char *sql = malloc(sqlSize);
if (sql == NULL) {
- tscError("%p failed to allocate memory to sent slow query to dnode", pSql);
+ tscError("0x%"PRIx64" failed to allocate memory to sent slow query to dnode", pSql->self);
return;
}
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 691f21bab4..555b45bbca 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -21,20 +21,20 @@
#endif // __APPLE__
#include "os.h"
-#include "ttype.h"
-#include "texpr.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
+#include "texpr.h"
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
-#include "tstoken.h"
#include "tstrbuild.h"
+#include "ttoken.h"
#include "ttokendef.h"
#include "qScript.h"
+#include "ttype.h"
#include "qUtil.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@@ -65,7 +65,7 @@ static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
-static bool hasSpecifyDB(SStrToken* pTableName);
+static int32_t getDelimiterIndex(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@@ -570,17 +570,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: {
const char* msg1 = "invalid table name";
- const char* msg2 = "table name too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
- if (!tscValidateTableNameLength(pToken->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
// additional msg has been attached already
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
@@ -589,19 +583,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
+ case TSDB_SQL_SHOW_CREATE_STABLE:
case TSDB_SQL_SHOW_CREATE_TABLE: {
const char* msg1 = "invalid table name";
- const char* msg2 = "table name is too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (!tscValidateTableNameLength(pToken->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -788,18 +778,26 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// set the command/global limit parameters from the first subclause to the sqlcmd object
SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, 0);
pCmd->command = pQueryInfo1->command;
+ int32_t diffSize = 0;
// if there is only one element, the limit of clause is the limit of global result.
// validate the select node for "UNION ALL" subclause
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
- int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
+ int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize);
if (ret != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
+ if (diffSize) {
+ for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
+ SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
+ tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
+ }
+ }
+
pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here
}
@@ -1126,11 +1124,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
+ const char* msg4 = "db name too long";
+ const char* msg5 = "table name too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
-
- if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
+ int32_t idx = getDelimiterIndex(pTableName);
+ if (idx != -1) { // db has been specified in sql string so we ignore current db path
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -1140,7 +1140,14 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
-
+ if (idx >= TSDB_DB_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
+
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
@@ -1484,14 +1491,13 @@ static char* cloneCurrentDBName(SSqlObj* pSql) {
}
/* length limitation, strstr cannot be applied */
-static bool hasSpecifyDB(SStrToken* pTableName) {
+static int32_t getDelimiterIndex(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
- return true;
+ return i;
}
}
-
- return false;
+ return -1;
}
int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
@@ -1750,6 +1756,21 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
return false;
}
+static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
+
+ if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
void genUdfList(SArray* pUdfInfo, tSqlExpr *pNode) {
if (pNode == NULL) {
return;
@@ -1816,6 +1837,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
assert(pSelNodeList != NULL && pCmd != NULL);
const char* msg1 = "too many items in selection clause";
+
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one tag";
@@ -1885,7 +1907,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
- if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
+ if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
addPrimaryTsColIntoResult(pQueryInfo);
}
@@ -4914,7 +4936,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
}
} else {
SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID};
- int32_t len = tSQLGetToken(pRight->value.pz, &token.type);
+ int32_t len = tGetToken(pRight->value.pz, &token.type);
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -5514,7 +5536,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
- tscError("%p failed to malloc for alter table msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -5812,13 +5834,13 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) {
}
int32_t validateColumnName(char* name) {
- bool ret = isKeyWord(name, (int32_t)strlen(name));
+ bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name));
if (ret) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SStrToken token = {.z = name};
- token.n = tSQLGetToken(name, &token.type);
+ token.n = tGetToken(name, &token.type);
if (token.type != TK_STRING && token.type != TK_ID) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -5829,7 +5851,7 @@ int32_t validateColumnName(char* name) {
strntolower(token.z, token.z, token.n);
token.n = (uint32_t)strtrim(token.z);
- int32_t k = tSQLGetToken(token.z, &token.type);
+ int32_t k = tGetToken(token.z, &token.type);
if (k != token.n) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -7843,4 +7865,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}
-
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index efbf87aa53..80ed37b56b 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -222,7 +222,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
assert(online <= total);
if (online < total) {
- tscError("HB:%p, total dnode:%d, online dnode:%d", pSql, total, online);
+ tscError("0x%"PRIx64", HB, total dnode:%d, online dnode:%d", pSql->self, total, online);
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
}
@@ -274,7 +274,7 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
taosReleaseRef(tscObjRef, pObj->hbrid);
if (code != TSDB_CODE_SUCCESS) {
- tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code));
+ tscError("0x%"PRIx64" failed to sent HB to server, reason:%s", pHB->self, tstrerror(code));
}
taosReleaseRef(tscRefId, rid);
@@ -286,7 +286,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
char *pMsg = rpcMallocCont(pCmd->payloadLen);
if (NULL == pMsg) {
- tscError("%p msg:%s malloc failed", pSql, taosMsg[pSql->cmd.msgType]);
+ tscError("0x%"PRIx64" msg:%s malloc failed", pSql->self, taosMsg[pSql->cmd.msgType]);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -370,11 +370,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
pSql->retry++;
- tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), pSql->retry);
+ tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) {
- tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
+ tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry);
} else {
// wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queueu to process
@@ -667,8 +667,8 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
} else {
- tscError("%p No vgroup info found", pSql);
-
+ tscError("0x%"PRIx64" No vgroup info found", pSql->self);
+
*succeed = 0;
return pMsg;
}
@@ -762,21 +762,20 @@ static int32_t serializeColFilterInfo(SColumnFilterInfo* pColFilters, int16_t nu
return TSDB_CODE_SUCCESS;
}
-static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, char** pMsg, void* addr) {
+static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, char** pMsg, int64_t id, bool validateColumn) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
// the queried table has been removed and a new table with the same name has already been created already
// return error msg
if (pExpr->uid != pTableMeta->id.uid) {
- tscError("%p table has already been destroyed", addr);
+ tscError("0x%"PRIx64" table has already been destroyed", id);
return TSDB_CODE_TSC_INVALID_TABLE_NAME;
}
- //TODO disable it temporarily
-// if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
-// tscError("%p table schema is not matched with parsed sql", addr);
-// return TSDB_CODE_TSC_INVALID_SQL;
-// }
+ if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
+ tscError("0x%"PRIx64" table schema is not matched with parsed sql", id);
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
assert(pExpr->resColId < 0);
SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg);
@@ -909,14 +908,14 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
for (int32_t i = 0; i < query.numOfOutput; ++i) {
- code = serializeSqlExpr(&query.pExpr1[i].base, pTableMetaInfo, &pMsg, pSql);
+ code = serializeSqlExpr(&query.pExpr1[i].base, pTableMetaInfo, &pMsg, pSql->self, true);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
}
for (int32_t i = 0; i < query.numOfExpr2; ++i) {
- code = serializeSqlExpr(&query.pExpr2[i].base, pTableMetaInfo, &pMsg, pSql);
+ code = serializeSqlExpr(&query.pExpr2[i].base, pTableMetaInfo, &pMsg, pSql->self, false);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
@@ -1104,7 +1103,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1122,7 +1121,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateAcctMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1168,7 +1167,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SCreateUserMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1207,7 +1206,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SDropDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1240,7 +1239,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SCMDropTableMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1261,7 +1260,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1282,7 +1281,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1297,7 +1296,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SUseDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1314,7 +1313,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SSyncDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1333,7 +1332,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SShowMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1427,7 +1426,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// Reallocate the payload size
size = tscEstimateCreateTableMsgLength(pSql, pInfo);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
- tscError("%p failed to malloc for create table msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for create table msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1526,7 +1525,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
- tscError("%p failed to malloc for alter table msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1600,7 +1599,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SRetrieveTableMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1722,7 +1721,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SConnectMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1885,7 +1884,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
pthread_mutex_unlock(&pObj->mutex);
- tscError("%p failed to create heartbeat msg", pSql);
+ tscError("0x%"PRIx64" failed to create heartbeat msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1961,10 +1960,12 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
- tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
+ tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
return TSDB_CODE_TSC_INVALID_VALUE;
}
+ assert(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE || pTableMeta->tableType == TSDB_NORMAL_TABLE || pTableMeta->tableType == TSDB_STREAM_TABLE);
+
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// check if super table hashmap or not
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
@@ -2205,8 +2206,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
if (pInfo->vgroupList->numOfVgroups <= 0) {
- //tfree(pInfo->vgroupList);
- tscError("%p empty vgroup info", pSql);
+ tscDebug("0x%"PRIx64" empty vgroup info, no corresponding tables for stable", pSql->self);
} else {
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
// just init, no need to lock
@@ -2522,7 +2522,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
if (NULL == pNew) {
- tscError("%p malloc failed for new sqlobj to get table meta", pSql);
+ tscError("0x%"PRIx64" malloc failed for new sqlobj to get table meta", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2536,7 +2536,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
- tscError("%p malloc failed for payload to get table meta", pSql);
+ tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2549,7 +2549,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
if (pSql->cmd.autoCreated) {
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
if (code != TSDB_CODE_SUCCESS) {
- tscError("%p malloc failed for new tag data to get table meta", pSql);
+ tscError("0x%"PRIx64" malloc failed for new tag data to get table meta", pSql->self);
tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2577,10 +2577,23 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(tIsValidName(&pTableMetaInfo->name));
- tfree(pTableMetaInfo->pTableMeta);
-
uint32_t size = tscGetTableMetaMaxSize();
- pTableMetaInfo->pTableMeta = calloc(1, size);
+ if (pTableMetaInfo->pTableMeta == NULL) {
+ pTableMetaInfo->pTableMeta = calloc(1, size);
+ pTableMetaInfo->tableMetaSize = size;
+ } else if (pTableMetaInfo->tableMetaSize < size) {
+ char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
+ memset(pTableMetaInfo->pTableMeta, 0, size);
+ pTableMetaInfo->tableMetaSize = size;
+ } else {
+ //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
+ memset(pTableMetaInfo->pTableMeta, 0, size);
+ pTableMetaInfo->tableMetaSize = size;
+ }
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
@@ -2592,10 +2605,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
+ char buf[80*1024] = {0};
+ assert(size < 80*1024);
+
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
- int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
+ int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@@ -2681,7 +2697,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
if (code != TSDB_CODE_SUCCESS) {
- tscError("%p failed to generate the table full name", pSql);
+ tscError("0x%"PRIx64" failed to generate the table full name", pSql->self);
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2835,6 +2851,7 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
+ tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
tscKeepConn[TSDB_SQL_SHOW] = 1;
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index eb16adbad7..562731b59a 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -457,6 +457,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
@@ -588,7 +589,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
void taos_free_result(TAOS_RES *res) {
SSqlObj* pSql = (SSqlObj*) res;
if (pSql == NULL || pSql->signature != pSql) {
- tscError("%p already released sqlObj", res);
+ tscError("0x%"PRIx64" already released sqlObj", pSql ? pSql->self : -1);
return;
}
@@ -881,15 +882,14 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
int32_t sqlLen = (int32_t)strlen(sql);
if (sqlLen > tsMaxSQLStringLen) {
- tscError("%p sql too long", pSql);
+ tscError("0x%"PRIx64" sql too long", pSql->self);
tfree(pSql);
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if (pSql->sqlstr == NULL) {
- tscError("%p failed to malloc sql string buffer", pSql);
- tscDebug("0x%"PRIx64" Valid SQL result:%d, %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj);
+ tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -914,7 +914,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
}
if (code != TSDB_CODE_SUCCESS) {
- tscDebug("0x%"PRIx64" Valid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
+ tscError("0x%"PRIx64" invalid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
}
taos_free_result(pSql);
@@ -963,7 +963,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
len = (int32_t)strtrim(tblName);
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
- tSQLGetToken(tblName, &sToken.type);
+ tGetToken(tblName, &sToken.type);
// Check if the table name available or not
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
@@ -1031,14 +1031,14 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
int32_t tblListLen = (int32_t)strlen(tableNameList);
if (tblListLen > MAX_TABLE_NAME_LENGTH) {
- tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH);
+ tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH);
tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
char *str = calloc(1, tblListLen + 1);
if (str == NULL) {
- tscError("%p failed to malloc sql string buffer", pSql);
+ tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index 2551662d93..17bf575b60 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -102,7 +102,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
}
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->pVgroupTables == NULL) && (pTableMetaInfo->vgroupList == NULL || pTableMetaInfo->vgroupList->numOfVgroups <= 0)) {
- tscDebug("%p empty vgroup list", pSql);
+ tscDebug("0x%"PRIx64" empty vgroup list", pSql->self);
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
code = TSDB_CODE_TSC_APP_ERROR;
}
@@ -110,10 +110,9 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformFuncForSTableQuery(pQueryInfo);
- tscDebug("0x%"PRIx64" stream:%p, start stream query on:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name));
+ tscDebug("0x%"PRIx64" stream:%p started to query table:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name));
pQueryInfo->command = TSDB_SQL_SELECT;
- pSql->cmd.active = pQueryInfo;
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
@@ -140,7 +139,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pStream->numOfRes = 0; // reset the numOfRes.
SSqlObj *pSql = pStream->pSql;
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
- tscDebug("0x%"PRIx64" add into timer", pSql->self);
+ tscDebug("0x%"PRIx64" timer launch query", pSql->self);
if (pStream->isProject) {
/*
@@ -195,8 +194,8 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
- tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
- retryDelay);
+ tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
+ pStream, numOfRows, retryDelay);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
@@ -204,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+
+ tfree(pTableMetaInfo->pTableMeta);
+
+ tscFreeSqlResult(pStream->pSql);
+ tscFreeSubobj(pStream->pSql);
+ tfree(pStream->pSql->pSubs);
+ pStream->pSql->subState.numOfSub = 0;
+
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
@@ -260,13 +267,14 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
- tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
+ tscError("0x%"PRIx64" stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pSql->self, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
return;
}
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
+ STableMetaInfo *pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
for(int32_t i = 0; i < numOfRows; ++i) {
@@ -293,7 +301,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
/* no resuls in the query range, retry */
// todo set retry dynamic time
int32_t retry = tsProjectExecInterval;
- tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry);
+ tscError("0x%"PRIx64" stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql->self, pStream, numOfRows, retry);
tscSetRetryTimer(pStream, pStream->pSql, retry);
return;
@@ -306,6 +314,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pStream->numOfRes);
tfree(pTableMetaInfo->pTableMeta);
+ if (pQueryInfo->pQInfo != NULL) {
+ qDestroyQueryInfo(pQueryInfo->pQInfo);
+ pQueryInfo->pQInfo = NULL;
+ }
tscFreeSqlResult(pSql);
tscFreeSubobj(pSql);
@@ -338,10 +350,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
return;
}
- tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
+ tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
now + timer, timer, delay, pStream->stime, etime);
} else {
- tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
+ tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
}
@@ -399,7 +411,6 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
}
} else {
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
- //int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql->self, pStream,
pStream->stime, pStream->etime);
@@ -441,7 +452,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
}
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
- tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
+ tscWarn("0x%"PRIx64" stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
(int64_t)pQueryInfo->interval.interval, minIntervalTime);
pQueryInfo->interval.interval = minIntervalTime;
}
@@ -458,14 +469,14 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
- tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
+ tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
pQueryInfo->interval.sliding, minSlidingTime);
pQueryInfo->interval.sliding = minSlidingTime;
}
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
- tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
+ tscWarn("0x%"PRIx64" stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql->self, pStream,
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
@@ -508,7 +519,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else {
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
if (newStime != stime) {
- tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
+ tscWarn("0x%"PRIx64" stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql->self, pStream, stime, newStime);
stime = newStime;
}
}
@@ -539,7 +550,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
- tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code));
+ tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
pStream->fp(pStream->param, NULL, NULL);
return;
@@ -558,7 +569,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
- tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream);
+ tscError("0x%"PRIx64" stream %p open failed, since the interval value is incorrect", pSql->self, pStream);
pStream->fp(pStream->param, NULL, NULL);
return;
}
@@ -598,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
if (pStream == NULL) {
- tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code);
+ tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:0x%08x", pSql->self, sqlstr, pCmd->payload, pRes->code);
tscFreeSqlObj(pSql);
return NULL;
}
@@ -614,26 +625,26 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
if (pSql->sqlstr == NULL) {
- tscError("%p failed to malloc sql string buffer", pSql);
+ tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscFreeSqlObj(pSql);
return NULL;
}
strtolower(pSql->sqlstr, sqlstr);
- tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
+ registerSqlObj(pSql);
+
+ tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
tsem_init(&pSql->rspSem, 0, 0);
pSql->fp = tscCreateStream;
pSql->fetchFp = tscCreateStream;
- registerSqlObj(pSql);
-
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_SUCCESS) {
tscCreateStream(pStream, pSql, code);
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(code));
+ tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
taosReleaseRef(tscObjRef, pSql->self);
free(pStream);
return NULL;
diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c
index a32cadb907..6928058f23 100644
--- a/src/client/src/tscSub.c
+++ b/src/client/src/tscSub.c
@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
if (pNew == NULL) {
- tscError("failed to retrieve table id: cannot create new sql object.");
+ tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self);
return NULL;
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
- tscError("failed to retrieve table id: %s", tstrerror(taos_errno(pNew)));
+ tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
return NULL;
}
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 4db9cf7bc2..16cbc0693b 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -71,7 +71,7 @@ static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, i
pthread_mutex_lock(&subState->mutex);
- tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
+ tscDebug("subquery:0x%"PRIx64",%d state set to %d", pSql->self, idx, state);
subState->states[idx] = state;
@@ -85,12 +85,18 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
//lock in caller
tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub);
for (int i = 0; i < subState->numOfSub; i++) {
+ SSqlObj* pSub = pParentSql->pSubs[i];
if (0 == subState->states[i]) {
- tscDebug("0x%"PRIx64" subquery:%p, index: %d NOT finished, abort query completion check", pParentSql->self, pParentSql->pSubs[i], i);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished, abort query completion check", pParentSql->self,
+ pSub->self, i);
done = false;
break;
} else {
- tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pParentSql->pSubs[i], i);
+ if (pSub != NULL) {
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i);
+ } else {
+ tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i);
+ }
}
}
@@ -107,14 +113,15 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
bool done = allSubqueryDone(pParentSql);
if (done) {
- tscDebug("0x%"PRIx64" subquery:%p,%d all subs already done", pParentSql->self, pSql, idx);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self,
+ pSql->self, idx);
pthread_mutex_unlock(&subState->mutex);
return false;
}
- tscDebug("0x%"PRIx64" subquery:%p,%d state set to 1", pParentSql->self, pSql, idx);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1;
@@ -171,7 +178,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
return 0;
}
- tscDebug("0x%"PRIx64" sub:%p table idx:%d, input group number:%d", pSql->self, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" table idx:%d, input group number:%d", pSql->self,
+ pSql->pSubs[i]->self, i, pSupporter->pTSBuf->numOfGroups);
ctxlist[i].p = pSupporter;
ctxlist[i].res = output;
@@ -377,9 +385,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
TSKEY et = taosGetTimestampUs();
for (int32_t i = 0; i < joinNum; ++i) {
- tscDebug("0x%"PRIx64" sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
- pSql->self, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
+ pSql->self, pSql->pSubs[i]->self, i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(ctxlist[i].res), et - st);
}
@@ -630,7 +638,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value
+#ifndef _TD_NINGSI_60
pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
+#else
+ pExpr->base.param[0].i64 = colId;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
+ pExpr->base.param[0].nLen = sizeof(int64_t);
+#endif
pExpr->base.numOfParams = 1;
}
@@ -656,7 +670,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
//prepare the subqueries object failed, abort
if (!success) {
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql,
+ tscError("0x%"PRIx64" failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql->self,
pSql->subState.numOfSub, pSql->res.code);
freeJoinSubqueryObj(pSql);
@@ -701,7 +715,7 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
- tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
+ tscError("0x%"PRIx64" all subquery return and query failed, global code:%s", pSqlObj->self, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj);
return 0;
}
@@ -785,7 +799,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr
STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN};
taosArrayPush(vgTables, &item);
- tscTrace("%p tid:%d, uid:%"PRIu64",vgId:%d added", pSql, tt->tid, tt->uid, tt->vgId);
+ tscTrace("0x%"PRIx64" tid:%d, uid:%"PRIu64",vgId:%d added", pSql->self, tt->tid, tt->uid, tt->vgId);
prev = tt;
}
@@ -851,9 +865,9 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug(
- "%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
+ "0x%"PRIx64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
- pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
+ pParent->self, pSql->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscBuildAndSendRequest(pSql, NULL);
@@ -866,7 +880,7 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
assert(prev->vgId >= 1 && p->vgId >= 1);
if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) {
- tscError("%p join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj);
+ tscError("0x%"PRIx64" join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj->self);
pPSqlObj->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY;
return false;
}
@@ -1102,7 +1116,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
- tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
+ tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
@@ -1117,7 +1131,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// todo retry if other subqueries are not failed
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
- tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
+ tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@@ -1136,7 +1150,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// todo handle memory error
char* tmp = realloc(pSupporter->pIdTagList, length);
if (tmp == NULL) {
- tscError("%p failed to malloc memory", pSql);
+ tscError("0x%"PRIx64" failed to malloc memory", pSql->self);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@@ -1256,7 +1270,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
- tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
+ tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
@@ -1270,7 +1284,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
- tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
+ tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
@@ -1286,7 +1300,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pSupporter->f = fopen(pSupporter->path, "wb");
if (pSupporter->f == NULL) {
- tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
+ tscError("0x%"PRIx64" failed to create tmp file:%s, reason:%s", pSql->self, pSupporter->path, strerror(errno));
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
@@ -1306,7 +1320,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true);
if (pBuf == NULL) { // in error process, close the fd
- tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
+ tscError("0x%"PRIx64" invalid ts comp file from vnode, abort subquery, file size:%d", pSql->self, numOfRows);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
@@ -1403,7 +1417,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
- tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
+ tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
@@ -1418,7 +1432,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
assert(numOfRows == taos_errno(pSql));
pParentSql->res.code = numOfRows;
- tscError("%p retrieve failed, index:%d, code:%s", pSql, pSupporter->subqueryIndex, tstrerror(numOfRows));
+ tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
tscAsyncResultOnError(pParentSql);
return;
@@ -1454,7 +1468,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
}
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
- tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pState->numOfSub);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub);
return;
}
@@ -1475,16 +1489,16 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SSqlRes* pRes1 = &pParentSql->pSubs[i]->res;
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
- tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i], i,
- pRes1->numOfRows, pRes1->numOfTotal);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
+ pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
assert(pRes1->row < pRes1->numOfRows);
} else {
if (!pQueryInfo->globalMerge) {
pRes1->numOfClauseTotal += pRes1->numOfRows;
}
- tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64, pParentSql->self, pParentSql->pSubs[i], i,
- pRes1->numOfRows, pRes1->numOfTotal);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self,
+ pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
}
}
@@ -1665,7 +1679,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pRes1->row >= pRes1->numOfRows) {
- tscDebug("0x%"PRIx64" subquery:%p retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1,
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64" retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1->self,
pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex);
tscResetForNextRetrieve(pRes1);
@@ -1745,7 +1759,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// retrieve actual query results from vnode during the second stage join subquery
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
- tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
+ tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, code, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
@@ -1759,7 +1773,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(taos_errno(pSql) == code);
- tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code));
+ tscError("0x%"PRIx64" abort query, code:%s, global code:%s", pSql->self, tstrerror(code), tstrerror(pParentSql->res.code));
pParentSql->res.code = code;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@@ -1985,12 +1999,12 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self);
-
+
tscDebug("0x%"PRIx64" start subquery, total:%d", pSql->self, pQueryInfo->numOfTables);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i);
if (pSupporter == NULL) { // failed to create support struct, abort current query
- tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i);
+ tscError("0x%"PRIx64" tableIndex:%d, failed to allocate join support object, abort further query", pSql->self, i);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
@@ -2388,9 +2402,9 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
tscTansformFuncForSTableQuery(pNewQueryInfo);
tscDebug(
- "%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
+ "0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
- pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
+ pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscHandleMasterSTableQuery(pNew);
@@ -2476,7 +2490,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
for (; i < pState->numOfSub; ++i) {
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
if (trs == NULL) {
- tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
+ tscError("0x%"PRIx64" failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
break;
}
@@ -2485,7 +2499,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trs->localBuffer == NULL) {
- tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
+ tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
tfree(trs);
break;
}
@@ -2497,7 +2511,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
if (pNew == NULL) {
- tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
+ tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
tfree(trs->localBuffer);
tfree(trs);
break;
@@ -2510,11 +2524,12 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
assert(pNewQueryInfo->tsBuf != NULL);
}
- tscDebug("0x%"PRIx64" sub:%p create subquery success. orderOfSub:%d", pSql->self, pNew, trs->subqueryIndex);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" create subquery success. orderOfSub:%d", pSql->self, pNew->self,
+ trs->subqueryIndex);
}
if (i < pState->numOfSub) {
- tscError("%p failed to prepare subquery structure and launch subqueries", pSql);
+ tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
@@ -2558,7 +2573,7 @@ static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, i
static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) {
// set no disk space error info
- tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code));
+ tscError("sub:0x%"PRIx64" failed to flush data to disk, reason:%s", ((SSqlObj *)tres)->self, tstrerror(code));
SSqlObj* pParentSql = trsupport->pParentSql;
pParentSql->res.code = code;
@@ -2583,7 +2598,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
const uint32_t nBufferSize = (1u << 16u); // 64KB
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trsupport->localBuffer == NULL) {
- tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno));
+ tscError("0x%"PRIx64" failed to malloc buffer for local buffer, reason:%s", pSql->self, strerror(errno));
tfree(trsupport);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2598,13 +2613,13 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
// clear local saved number of results
trsupport->localBuffer->num = 0;
- tscError("%p sub:%p retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql->self, pSql->self,
tstrerror(code), subqueryIndex, trsupport->numOfRetry);
SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
if (pNew == NULL) {
- tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
- oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex);
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
+ oriTrs->pParentSql->self, pSql->self, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex);
pParentSql->res.code = terrno;
oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
@@ -2658,7 +2673,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d,orderOfSub:%d", pParentSql->self, pSql->self, numOfRows, subqueryIndex);
- tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql, pSql,
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql->self, pSql->self,
subqueryIndex, tstrerror(pParentSql->res.code));
} else {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
@@ -2670,20 +2685,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
} else { // reach the maximum retry count, abort
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
- tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql, pSql,
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql->self, pSql->self,
tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code));
}
}
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
- tscDebug("0x%"PRIx64" sub:%p,%d freed, not finished, total:%d", pParentSql->self, pSql, trsupport->subqueryIndex, pState->numOfSub);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d freed, not finished, total:%d", pParentSql->self,
+ pSql->self, trsupport->subqueryIndex, pState->numOfSub);
tscFreeRetrieveSup(pSql);
return;
}
// all subqueries are failed
- tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub,
+ tscError("0x%"PRIx64" retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql->self, pState->numOfSub,
tstrerror(pParentSql->res.code));
// release allocated resource
@@ -2717,8 +2733,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
// data in from current vnode is stored in cache and disk
uint32_t numOfRowsFromSubquery = (uint32_t)(trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num);
SVgroupsInfo* vgroupsInfo = pTableMetaInfo->vgroupList;
- tscDebug("0x%"PRIx64" sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self, pSql,
- vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self,
+ pSql->self, vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx);
tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity);
@@ -2731,7 +2747,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
#endif
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
- tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql,
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self,
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE);
return;
@@ -2746,7 +2762,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
}
if (!subAndCheckDone(pSql, pParentSql, idx)) {
- tscDebug("0x%"PRIx64" sub:%p orderOfSub:%d freed, not finished", pParentSql->self, pSql, trsupport->subqueryIndex);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d freed, not finished", pParentSql->self, pSql->self,
+ trsupport->subqueryIndex);
tscFreeRetrieveSup(pSql);
return;
@@ -2837,7 +2854,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
}
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
- tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(numOfRows), trsupport->numOfRetry);
int32_t sent = 0;
@@ -2865,8 +2882,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
- tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
- pParentSql, pSql, tsMaxNumOfOrderedResults, num);
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
+ pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
return;
}
@@ -2881,7 +2898,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
// no disk space for tmp directory
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
- tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql,
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self,
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
return;
@@ -2951,8 +2968,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
// stable query killed or other subquery failed, all query stopped
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
- tscError("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
- pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
+ tscError("0x%"PRIx64" query cancelled or failed, sub:0x%"PRIx64", vgId:%d, orderOfSub:%d, code:%s, global code:%s",
+ pParentSql->self, pSql->self, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
tscHandleSubqueryError(param, tres, code);
return;
@@ -2969,7 +2986,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
assert(code == taos_errno(pSql));
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
- tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
int32_t sent = 0;
@@ -2978,7 +2995,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
return;
}
} else {
- tscError("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
}
@@ -2998,7 +3015,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) {
if (pParentObj->retry > pParentObj->maxRetry) {
- tscError("%p max retry reached, abort the retry effort", pParentObj);
+ tscError("0x%"PRIx64" max retry reached, abort the retry effort", pParentObj->self);
return false;
}
@@ -3090,16 +3107,17 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
}
- tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj,
+ tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self,
pParentObj->res.numOfRows, numOfFailed, numOfSub);
- tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable", pParentObj->self, pParentObj->cmd.numOfTables);
+ tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.numOfTables);
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
+ pParentObj->res.code = TSDB_CODE_SUCCESS;
pParentObj->cmd.parseFinished = false;
tscResetSqlCmd(&pParentObj->cmd, false);
@@ -3159,7 +3177,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
pSup->pSql = pSql;
pSub->param = pSup;
- tscDebug("0x%"PRIx64" sub:%p launch sub insert, orderOfSub:%d", pSql->self, pSub, i);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch sub insert, orderOfSub:%d", pSql->self, pSub->self, i);
if (pSub->res.code != TSDB_CODE_SUCCESS) {
tscHandleInsertRetry(pSql, pSub);
}
@@ -3207,7 +3225,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
if (pNew == NULL) {
- tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, numOfSub, strerror(errno));
+ tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, numOfSub, strerror(errno));
goto _error;
}
@@ -3231,7 +3249,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
}
if (numOfSub < pSql->subState.numOfSub) {
- tscError("%p failed to prepare subObj structure and launch sub-insertion", pSql);
+ tscError("0x%"PRIx64" failed to prepare subObj structure and launch sub-insertion", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 06a8b91177..13aa21bbdc 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -865,7 +865,7 @@ void tscFreeSqlResult(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
tscDestroyResPointerInfo(pRes);
-
+
memset(&pSql->res, 0, sizeof(SSqlRes));
}
@@ -1112,7 +1112,8 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
tfree(pTableMetaInfo->pTableMeta);
}
- pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
+ pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
}
/*
@@ -1329,67 +1330,73 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* pOneTableBlock = *p;
while(pOneTableBlock) {
- // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
- int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
- STableDataBlocks* dataBuf = NULL;
-
- int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
- INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
- if (ret != TSDB_CODE_SUCCESS) {
- tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
- taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
- return ret;
- }
-
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
- int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
-
- if (dataBuf->nAllocSize < destSize) {
- while (dataBuf->nAllocSize < destSize) {
- dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
- }
-
- char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
- if (tmp != NULL) {
- dataBuf->pData = tmp;
- memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
- } else { // failed to allocate memory, free already allocated memory and return error code
- tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize);
-
+ if (pBlocks->numOfRows > 0) {
+ // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
+ int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
+ STableDataBlocks* dataBuf = NULL;
+
+ int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
+ INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
- tfree(dataBuf->pData);
-
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return ret;
}
+
+ int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+
+ if (dataBuf->nAllocSize < destSize) {
+ while (dataBuf->nAllocSize < destSize) {
+ dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
+ }
+
+ char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
+ if (tmp != NULL) {
+ dataBuf->pData = tmp;
+ memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
+ } else { // failed to allocate memory, free already allocated memory and return error code
+ tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
+
+ taosHashCleanup(pVnodeDataBlockHashList);
+ tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tfree(dataBuf->pData);
+
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ }
+
+ tscSortRemoveDataBlockDupRows(pOneTableBlock);
+ char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
+
+ tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
+ pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
+
+ int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+
+ pBlocks->tid = htonl(pBlocks->tid);
+ pBlocks->uid = htobe64(pBlocks->uid);
+ pBlocks->sversion = htonl(pBlocks->sversion);
+ pBlocks->numOfRows = htons(pBlocks->numOfRows);
+ pBlocks->schemaLen = 0;
+
+ // erase the empty space reserved for binary data
+ int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
+ assert(finalLen <= len);
+
+ dataBuf->size += (finalLen + sizeof(SSubmitBlk));
+ assert(dataBuf->size <= dataBuf->nAllocSize);
+
+ // the length does not include the SSubmitBlk structure
+ pBlocks->dataLen = htonl(finalLen);
+ dataBuf->numOfTables += 1;
+
+ pBlocks->numOfRows = 0;
+ }else {
+ tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname);
}
-
- tscSortRemoveDataBlockDupRows(pOneTableBlock);
- char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
-
- tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
- pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
-
- int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
-
- pBlocks->tid = htonl(pBlocks->tid);
- pBlocks->uid = htobe64(pBlocks->uid);
- pBlocks->sversion = htonl(pBlocks->sversion);
- pBlocks->numOfRows = htons(pBlocks->numOfRows);
- pBlocks->schemaLen = 0;
-
- // erase the empty space reserved for binary data
- int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
- assert(finalLen <= len);
-
- dataBuf->size += (finalLen + sizeof(SSubmitBlk));
- assert(dataBuf->size <= dataBuf->nAllocSize);
-
- // the length does not include the SSubmitBlk structure
- pBlocks->dataLen = htonl(finalLen);
- dataBuf->numOfTables += 1;
-
+
p = taosHashIterate(pCmd->pTableBlockHashList, p);
if (p == NULL) {
break;
@@ -1511,7 +1518,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
return pInfo->pExpr->base.offset;
}
-int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
+int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
@@ -1523,15 +1530,36 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
if (pField1->type != pField2->type ||
- pField1->bytes != pField2->bytes ||
strcasecmp(pField1->name, pField2->name) != 0) {
return 1;
}
+
+ if (pField1->bytes != pField2->bytes) {
+ *diffSize = 1;
+
+ if (pField2->bytes > pField1->bytes) {
+ pField1->bytes = pField2->bytes;
+ }
+ }
}
return 0;
}
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
+ assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
+
+ for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) {
+ TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i);
+ TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
+
+ pField2->bytes = pField1->bytes;
+ }
+
+ return 0;
+}
+
+
int32_t tscGetResRowLength(SArray* pExprList) {
size_t num = taosArrayGetSize(pExprList);
if (num == 0) {
@@ -1941,7 +1969,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
static int32_t validateQuoteToken(SStrToken* pToken) {
tscDequoteAndTrimToken(pToken);
- int32_t k = tSQLGetToken(pToken->z, &pToken->type);
+ int32_t k = tGetToken(pToken->z, &pToken->type);
if (pToken->type == TK_STRING) {
return tscValidateName(pToken);
@@ -2009,7 +2037,7 @@ int32_t tscValidateName(SStrToken* pToken) {
tscStrToLower(pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
- int len = tSQLGetToken(pToken->z, &pToken->type);
+ int len = tGetToken(pToken->z, &pToken->type);
// single token, validate it
if (len == pToken->n) {
@@ -2035,7 +2063,7 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->n = (uint32_t)strtrim(pToken->z);
}
- pToken->n = tSQLGetToken(pToken->z, &pToken->type);
+ pToken->n = tGetToken(pToken->z, &pToken->type);
if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2052,7 +2080,7 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->z = sep + 1;
pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1);
- int32_t len = tSQLGetToken(pToken->z, &pToken->type);
+ int32_t len = tGetToken(pToken->z, &pToken->type);
if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2507,6 +2535,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pTableMeta = pTableMeta;
+ if (pTableMetaInfo->pTableMeta == NULL) {
+ pTableMetaInfo->tableMetaSize = 0;
+ } else {
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
+ }
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
@@ -2565,7 +2598,7 @@ void registerSqlObj(SSqlObj* pSql) {
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) {
- tscError("%p new subquery failed, tableIndex:%d", pSql, 0);
+ tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, 0);
return NULL;
}
@@ -2579,7 +2612,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
if (code != TSDB_CODE_SUCCESS) {
- tscError("%p new subquery failed, unable to malloc tag data, tableIndex:%d", pSql, 0);
+ tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0);
free(pNew);
return NULL;
}
@@ -2655,7 +2688,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) {
- tscError("%p new subquery failed, tableIndex:%d", pSql, tableIndex);
+ tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, tableIndex);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL;
}
@@ -2755,7 +2788,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
}
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
- tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
+ tscError("0x%"PRIx64" new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql->self, tableIndex, pTableMetaInfo->vgroupIndex);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
@@ -2791,6 +2824,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
+
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
@@ -2806,7 +2840,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
// this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) {
- tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
+ tscError("0x%"PRIx64" new subquery failed since no tableMeta, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
@@ -2824,13 +2858,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
assert(pFinalInfo->vgroupList != NULL);
}
+ registerSqlObj(pNew);
+
if (cmd == TSDB_SQL_SELECT) {
size_t size = taosArrayGetSize(pNewQueryInfo->colList);
- tscDebug(
- "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
+ tscDebug("0x%"PRIx64" new subquery:0x%"PRIx64", tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
- pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
+ pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
@@ -2839,7 +2874,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex);
}
- registerSqlObj(pNew);
return pNew;
_error:
@@ -3185,7 +3219,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
+
+
+ // DON't free final since it may be recoreded and used later in APP
+ TAOS_FIELD* finalBk = pRes->final;
+ pRes->final = NULL;
tscFreeSqlResult(pSql);
+ pRes->final = finalBk;
pRes->numOfTotal = num;
@@ -3418,11 +3458,11 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
-int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
- assert(pChild != NULL);
+int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
+ assert(pChild != NULL && buf != NULL);
- uint32_t size = tscGetTableMetaMaxSize();
- STableMeta* p = calloc(1, size);
+// uint32_t size = tscGetTableMetaMaxSize();
+ STableMeta* p = buf;//calloc(1, size);
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
@@ -3434,12 +3474,12 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
- tfree(p);
+// tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
- tfree(p);
+// tfree(p);
return -1;
}
}
diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp
index d7325430cd..ba06a6b9aa 100644
--- a/src/client/tests/timeParseTest.cpp
+++ b/src/client/tests/timeParseTest.cpp
@@ -4,7 +4,7 @@
#include
#include "taos.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tutil.h"
int main(int argc, char** argv) {
diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h
index 1e362f5546..75de6ac125 100644
--- a/src/common/inc/tcmdtype.h
+++ b/src/common/inc/tcmdtype.h
@@ -83,6 +83,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 4c18b8c8b8..02bc1c6735 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -15,10 +15,7 @@
#ifndef _TD_DATA_FORMAT_H_
#define _TD_DATA_FORMAT_H_
-#include
-#include
-#include
-
+#include "os.h"
#include "talgo.h"
#include "ttype.h"
#include "tutil.h"
diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h
index a0854ce81b..d67de9ff69 100644
--- a/src/common/inc/texpr.h
+++ b/src/common/inc/texpr.h
@@ -89,9 +89,6 @@ tExprNode* exprdup(tExprNode* pTree);
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
-typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
- int32_t rightType, void *output, int32_t order);
-
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 26475834d5..2f4aa4c2b2 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -44,6 +44,7 @@ extern int32_t tsDnodeId;
// common
extern int tsRpcTimer;
extern int tsRpcMaxTime;
+extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index f37a4d9a36..48bec7fe4d 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -18,7 +18,7 @@
#include "os.h"
#include "taosmsg.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tvariant.h"
typedef struct SDataStatis {
diff --git a/src/common/inc/tvariant.h b/src/common/inc/tvariant.h
index f8f715c6ca..21b7fd8223 100644
--- a/src/common/inc/tvariant.h
+++ b/src/common/inc/tvariant.h
@@ -16,8 +16,8 @@
#ifndef TDENGINE_TVARIANT_H
#define TDENGINE_TVARIANT_H
-#include "tstoken.h"
#include "tarray.h"
+#include "ttoken.h"
#ifdef __cplusplus
extern "C" {
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 69b01e6c08..db97c3a5af 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -48,6 +48,7 @@ int32_t tsDnodeId = 0;
// common
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
+int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second
@@ -139,7 +140,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
-int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
+int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1;
@@ -625,6 +626,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_MS;
taosInitConfigOption(cfg);
+ cfg.option = "rpcForceTcp";
+ cfg.ptr = &tsRpcForceTcp;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "rpcMaxTime";
cfg.ptr = &tsRpcMaxTime;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -921,7 +932,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
- cfg.maxValue = 10000000;
+ cfg.maxValue = 100000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index f1ddc60637..dc868d8057 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -2,7 +2,7 @@
#include "tutil.h"
#include "tname.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c
index c872d8731b..9988450c30 100644
--- a/src/common/src/tvariant.c
+++ b/src/common/src/tvariant.c
@@ -14,14 +14,14 @@
*/
#include "os.h"
-#include "tvariant.h"
#include "hash.h"
#include "taos.h"
#include "taosdef.h"
-#include "tstoken.h"
+#include "ttoken.h"
#include "ttokendef.h"
-#include "tutil.h"
#include "ttype.h"
+#include "tutil.h"
+#include "tvariant.h"
void tVariantCreate(tVariant *pVar, SStrToken *token) {
int32_t ret = 0;
@@ -49,7 +49,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
if (ret != 0) {
SStrToken t = {0};
- tSQLGetToken(token->z, &t.type);
+ tGetToken(token->z, &t.type);
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
pVar->nType = -1; // -1 means error type
return;
@@ -460,7 +460,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
*result = (int64_t) pVariant->dKey;
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
- /*int32_t n = */tSQLGetToken(pVariant->pz, &token.type);
+ /*int32_t n = */tGetToken(pVariant->pz, &token.type);
if (token.type == TK_NULL) {
if (releaseVariantPtr) {
@@ -495,10 +495,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
wchar_t *endPtr = NULL;
SStrToken token = {0};
- token.n = tSQLGetToken(pVariant->pz, &token.type);
+ token.n = tGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
- token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
+ token.n = tGetToken(pVariant->pz + token.n, &token.type);
}
if (token.type == TK_FLOAT) {
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index de4b8f6bfb..61e976cb18 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml
index a31796ffde..968a9bf470 100755
--- a/src/connector/jdbc/deploy-pom.xml
+++ b/src/connector/jdbc/deploy-pom.xml
@@ -5,7 +5,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.28
+ 2.0.29
jar
JDBCDriver
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index 3400a82e73..d94d28d9fa 100755
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.28
+ 2.0.29
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
index 4b5b88d93b..f8ea9af423 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
@@ -84,10 +84,12 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
+ @Deprecated
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
-
+ }
+
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -171,6 +173,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
+ @Deprecated
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
index c8ab9fb15a..02fee74eb5 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
@@ -49,7 +49,7 @@ public class TSDBConnection extends AbstractConnection {
this.databaseMetaData.setConnection(this);
}
- public TSDBJNIConnector getConnection() {
+ public TSDBJNIConnector getConnector() {
return this.connector;
}
@@ -58,7 +58,7 @@ public class TSDBConnection extends AbstractConnection {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
- return new TSDBStatement(this, this.connector);
+ return new TSDBStatement(this);
}
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
@@ -74,14 +74,18 @@ public class TSDBConnection extends AbstractConnection {
}
public PreparedStatement prepareStatement(String sql) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
- return new TSDBPreparedStatement(this, this.connector, sql);
+ }
+
+ return new TSDBPreparedStatement(this, sql);
}
public void close() throws SQLException {
- if (isClosed)
+ if (isClosed) {
return;
+ }
+
this.connector.closeConnection();
this.isClosed = true;
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index bbd8519a03..55533bd28c 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -104,7 +104,7 @@ public class TSDBDriver extends AbstractDriver {
static {
try {
- java.sql.DriverManager.registerDriver(new TSDBDriver());
+ DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index 5e3ffffa4f..d6934b8e46 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -18,6 +18,7 @@ package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TaosInfo;
+import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
@@ -29,10 +30,13 @@ public class TSDBJNIConnector {
private static volatile Boolean isInitialized = false;
private TaosInfo taosInfo = TaosInfo.getInstance();
+
// Connection pointer used in C
private long taos = TSDBConstants.JNI_NULL_POINTER;
+
// result set status in current connection
- private boolean isResultsetClosed = true;
+ private boolean isResultsetClosed;
+
private int affectedRows = -1;
static {
@@ -75,7 +79,6 @@ public class TSDBJNIConnector {
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
-// this.closeConnectionImp(this.taos);
closeConnection();
this.taos = TSDBConstants.JNI_NULL_POINTER;
}
@@ -97,12 +100,6 @@ public class TSDBJNIConnector {
* @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
- // close previous result set if the user forgets to invoke the
- // free method to close previous result set.
-// if (!this.isResultsetClosed) {
-// freeResultSet(taosResultSetPointer);
-// }
-
Long pSql = 0l;
try {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
@@ -135,6 +132,7 @@ public class TSDBJNIConnector {
// Try retrieving result set for the executed SQL using the current connection pointer.
pSql = this.getResultSetImp(this.taos, pSql);
+ // if pSql == 0L that means resultset is closed
isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER);
return pSql;
@@ -169,37 +167,14 @@ public class TSDBJNIConnector {
private native long isUpdateQueryImp(long connection, long pSql);
/**
- * Free resultset operation from C to release resultset pointer by JNI
+ * Free result set operation from C to release result set pointer by JNI
*/
public int freeResultSet(long pSql) {
- int res = TSDBConstants.JNI_SUCCESS;
-// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
-// throw new RuntimeException("Invalid result set pointer");
-// }
-
-// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
- res = this.freeResultSetImp(this.taos, pSql);
-// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
-// }
-
+ int res = this.freeResultSetImp(this.taos, pSql);
isResultsetClosed = true;
return res;
}
- /**
- * Close the open result set which is associated to the current connection. If the result set is already
- * closed, return 0 for success.
- */
-// public int freeResultSet() {
-// int resCode = TSDBConstants.JNI_SUCCESS;
-// if (!isResultsetClosed) {
-// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer);
-// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
-// isResultsetClosed = true;
-// }
-// return resCode;
-// }
-
private native int freeResultSetImp(long connection, long result);
/**
@@ -246,6 +221,7 @@ public class TSDBJNIConnector {
*/
public void closeConnection() throws SQLException {
int code = this.closeConnectionImp(this.taos);
+
if (code < 0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
} else if (code == 0) {
@@ -253,6 +229,7 @@ public class TSDBJNIConnector {
} else {
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
}
+
// invoke closeConnectionImpl only here
taosInfo.connect_close_increment();
}
@@ -289,7 +266,7 @@ public class TSDBJNIConnector {
private native void unsubscribeImp(long subscription, boolean isKeep);
/**
- * Validate if a create table sql statement is correct without actually creating that table
+ * Validate if a create table SQL statement is correct without actually creating that table
*/
public boolean validateCreateTableSql(String sql) {
int res = validateCreateTableSqlImp(taos, sql.getBytes());
@@ -297,4 +274,66 @@ public class TSDBJNIConnector {
}
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
+
+ public long prepareStmt(String sql) throws SQLException {
+ Long stmt = 0L;
+ try {
+ stmt = prepareStmtImp(sql.getBytes(), this.taos);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
+ }
+
+ if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ }
+
+ if (stmt == TSDBConstants.JNI_SQL_NULL) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
+ }
+
+ if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
+ }
+
+ return stmt;
+ }
+
+ private native long prepareStmtImp(byte[] sql, long con);
+
+ public void setBindTableName(long stmt, String tableName) throws SQLException {
+ int code = setBindTableNameImp(stmt, tableName, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name");
+ }
+ }
+
+ private native int setBindTableNameImp(long stmt, String name, long conn);
+
+ public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException {
+ int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data");
+ }
+ }
+
+ private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn);
+
+ public void executeBatch(long stmt) throws SQLException {
+ int code = executeBatchImp(stmt, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind");
+ }
+ }
+
+ private native int executeBatchImp(long stmt, long con);
+
+ public void closeBatch(long stmt) throws SQLException {
+ int code = closeStmt(stmt, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind");
+ }
+ }
+
+ private native int closeStmt(long stmt, long con);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
index e545bbc8f2..71e07252a3 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
@@ -14,36 +14,44 @@
*****************************************************************************/
package com.taosdata.jdbc;
+import com.taosdata.jdbc.utils.Utils;
+
import java.io.InputStream;
import java.io.Reader;
+import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.net.URL;
-import java.nio.charset.Charset;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
+ * TDengine only supports a subset of the standard SQL, thus this implementation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
-
private String rawSql;
private Object[] parameters;
private boolean isPrepared;
-
+
+ private ArrayList colData;
+ private String tableName;
+ private long nativeStmtHandle = 0;
+
private volatile TSDBParameterMetaData parameterMetaData;
- TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
- super(connection, connecter);
+ TSDBPreparedStatement(TSDBConnection connection, String sql) {
+ super(connection);
init(sql);
+ int parameterCnt = 0;
if (sql.contains("?")) {
- int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
if ('?' == sql.charAt(i)) {
parameterCnt++;
@@ -52,6 +60,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
parameters = new Object[parameterCnt];
this.isPrepared = true;
}
+
+ if (parameterCnt > 1) {
+ // the table name is also a parameter, so ignore it.
+ this.colData = new ArrayList(parameterCnt - 1);
+ this.colData.addAll(Collections.nCopies(parameterCnt - 1, null));
+ }
}
private void init(String sql) {
@@ -126,28 +140,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
* @return a string of the native sql statement for TSDB
*/
private String getNativeSql(String rawSql) throws SQLException {
- String sql = rawSql;
- for (int i = 0; i < parameters.length; ++i) {
- Object para = parameters[i];
- if (para != null) {
- String paraStr;
- if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
- } else {
- paraStr = para.toString();
- }
- // if para is timestamp or String or byte[] need to translate ' character
- if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
- paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
- paraStr = "'" + paraStr + "'";
- }
- sql = sql.replaceFirst("[?]", paraStr);
- } else {
- sql = sql.replaceFirst("[?]", "NULL");
- }
- }
- clearParameters();
- return sql;
+ return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@@ -275,15 +268,19 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- setObject(parameterIndex,x);
+ setObject(parameterIndex, x);
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (parameterIndex < 1 && parameterIndex >= parameters.length)
+ }
+
+ if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
+ }
+
parameters[parameterIndex - 1] = x;
}
@@ -320,9 +317,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-
+ }
+
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -535,4 +533,276 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
+
+ ///////////////////////////////////////////////////////////////////////
+ // NOTE: the following APIs are not JDBC compatible
+ // set the bind table name
+ private static class ColumnInfo {
+ @SuppressWarnings("rawtypes")
+ private ArrayList data;
+ private int type;
+ private int bytes;
+ private boolean typeIsSet;
+
+ public ColumnInfo() {
+ this.typeIsSet = false;
+ }
+
+ public void setType(int type) throws SQLException {
+ if (this.isTypeSet()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type has been set");
+ }
+
+ this.typeIsSet = true;
+ this.type = type;
+ }
+
+ public boolean isTypeSet() {
+ return this.typeIsSet;
+ }
+ };
+
+ public void setTableName(String name) {
+ this.tableName = name;
+ }
+
+ public void setValueImpl(int columnIndex, ArrayList list, int type, int bytes) throws SQLException {
+ ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex);
+ if (col == null) {
+ ColumnInfo p = new ColumnInfo();
+ p.setType(type);
+ p.bytes = bytes;
+ p.data = (ArrayList>) list.clone();
+ this.colData.set(columnIndex, p);
+ } else {
+ if (col.type != type) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch");
+ }
+ col.data.addAll(list);
+ }
+ }
+
+ public void setInt(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT, Integer.BYTES);
+ }
+
+ public void setFloat(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT, Float.BYTES);
+ }
+
+ public void setTimestamp(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP, Long.BYTES);
+ }
+
+ public void setLong(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT, Long.BYTES);
+ }
+
+ public void setDouble(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE, Double.BYTES);
+ }
+
+ public void setBoolean(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL, Byte.BYTES);
+ }
+
+ public void setByte(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT, Byte.BYTES);
+ }
+
+ public void setShort(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT, Short.BYTES);
+ }
+
+ public void setString(int columnIndex, ArrayList list, int size) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY, size);
+ }
+
+ // note: expand the required space for each NChar character
+ public void setNString(int columnIndex, ArrayList list, int size) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES);
+ }
+
+ public void columnDataAddBatch() throws SQLException {
+ // pass the data block to native code
+ if (rawSql == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "sql statement not set yet");
+ }
+
+ // table name is not set yet, abort
+ if (this.tableName == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet");
+ }
+
+ int numOfCols = this.colData.size();
+ if (numOfCols == 0) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ this.nativeStmtHandle = connector.prepareStmt(rawSql);
+ connector.setBindTableName(this.nativeStmtHandle, this.tableName);
+
+ ColumnInfo colInfo = (ColumnInfo) this.colData.get(0);
+ if (colInfo == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ int rows = colInfo.data.size();
+ for (int i = 0; i < numOfCols; ++i) {
+ ColumnInfo col1 = this.colData.get(i);
+ if (col1 == null || !col1.isTypeSet()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ if (rows != col1.data.size()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical");
+ }
+
+ ByteBuffer colDataList = ByteBuffer.allocate(rows * col1.bytes);
+ colDataList.order(ByteOrder.LITTLE_ENDIAN);
+
+ ByteBuffer lengthList = ByteBuffer.allocate(rows * Integer.BYTES);
+ lengthList.order(ByteOrder.LITTLE_ENDIAN);
+
+ ByteBuffer isNullList = ByteBuffer.allocate(rows * Byte.BYTES);
+ isNullList.order(ByteOrder.LITTLE_ENDIAN);
+
+ switch (col1.type) {
+ case TSDBConstants.TSDB_DATA_TYPE_INT: {
+ for (int j = 0; j < rows; ++j) {
+ Integer val = (Integer) col1.data.get(j);
+ colDataList.putInt(val == null? Integer.MIN_VALUE:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
+ for (int j = 0; j < rows; ++j) {
+ Byte val = (Byte) col1.data.get(j);
+ colDataList.put(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
+ for (int j = 0; j < rows; ++j) {
+ Boolean val = (Boolean) col1.data.get(j);
+ if (val == null) {
+ colDataList.put((byte) 0);
+ } else {
+ colDataList.put((byte) (val? 1:0));
+ }
+
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
+ for (int j = 0; j < rows; ++j) {
+ Short val = (Short) col1.data.get(j);
+ colDataList.putShort(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
+ for (int j = 0; j < rows; ++j) {
+ Long val = (Long) col1.data.get(j);
+ colDataList.putLong(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
+ for (int j = 0; j < rows; ++j) {
+ Float val = (Float) col1.data.get(j);
+ colDataList.putFloat(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
+ for (int j = 0; j < rows; ++j) {
+ Double val = (Double) col1.data.get(j);
+ colDataList.putDouble(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
+ case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
+ String charset = TaosGlobalConfig.getCharset();
+ for (int j = 0; j < rows; ++j) {
+ String val = (String) col1.data.get(j);
+
+ colDataList.position(j * col1.bytes); // seek to the correct position
+ if (val != null) {
+ byte[] b = null;
+ try {
+ if (col1.type == TSDBConstants.TSDB_DATA_TYPE_BINARY) {
+ b = val.getBytes();
+ } else {
+ b = val.getBytes(charset);
+ }
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+
+ if (val.length() > col1.bytes) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long");
+ }
+
+ colDataList.put(b);
+ lengthList.putInt(b.length);
+ isNullList.put((byte) 0);
+ } else {
+ lengthList.putInt(0);
+ isNullList.put((byte) 1);
+ }
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
+ case TSDBConstants.TSDB_DATA_TYPE_USMALLINT:
+ case TSDBConstants.TSDB_DATA_TYPE_UINT:
+ case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types");
+ }
+ };
+
+ connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i);
+ }
+ }
+
+ public void columnDataExecuteBatch() throws SQLException {
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ connector.executeBatch(this.nativeStmtHandle);
+ this.columnDataClearBatch();
+ }
+
+ public void columnDataClearBatch() {
+ int size = this.colData.size();
+ this.colData.clear();
+
+ this.colData.addAll(Collections.nCopies(size, null));
+ this.tableName = null; // clear the table name
+ }
+
+ public void columnDataCloseBatch() throws SQLException {
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ connector.closeBatch(this.nativeStmtHandle);
+
+ this.nativeStmtHandle = 0L;
+ this.tableName = null;
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
index 2576a25f0d..aba29d602b 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
@@ -109,6 +109,8 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
public void close() throws SQLException {
if (isClosed)
return;
+ if (this.statement == null)
+ return;
if (this.jniConnector != null) {
int code = this.jniConnector.freeResultSet(this.resultSetPointer);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
@@ -461,12 +463,13 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
}
public boolean isClosed() throws SQLException {
- if (isClosed)
- return true;
- if (jniConnector != null) {
- isClosed = jniConnector.isResultsetClosed();
- }
return isClosed;
+// if (isClosed)
+// return true;
+// if (jniConnector != null) {
+// isClosed = jniConnector.isResultsetClosed();
+// }
+// return isClosed;
}
public String getNString(int columnIndex) throws SQLException {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
index ce5290de66..7b3be5d263 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import com.taosdata.jdbc.utils.NullType;
+
public class TSDBResultSetBlockData {
private int numOfRows = 0;
private int rowIndex = 0;
@@ -164,59 +166,7 @@ public class TSDBResultSetBlockData {
}
}
- private static class NullType {
- private static final byte NULL_BOOL_VAL = 0x2;
- private static final String NULL_STR = "null";
-
- public String toString() {
- return NullType.NULL_STR;
- }
-
- public static boolean isBooleanNull(byte val) {
- return val == NullType.NULL_BOOL_VAL;
- }
-
- private static boolean isTinyIntNull(byte val) {
- return val == Byte.MIN_VALUE;
- }
-
- private static boolean isSmallIntNull(short val) {
- return val == Short.MIN_VALUE;
- }
-
- private static boolean isIntNull(int val) {
- return val == Integer.MIN_VALUE;
- }
-
- private static boolean isBigIntNull(long val) {
- return val == Long.MIN_VALUE;
- }
-
- private static boolean isFloatNull(float val) {
- return Float.isNaN(val);
- }
-
- private static boolean isDoubleNull(double val) {
- return Double.isNaN(val);
- }
-
- private static boolean isBinaryNull(byte[] val, int length) {
- if (length != Byte.BYTES) {
- return false;
- }
-
- return val[0] == 0xFF;
- }
-
- private static boolean isNcharNull(byte[] val, int length) {
- if (length != Integer.BYTES) {
- return false;
- }
-
- return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
- }
-
- }
+
/**
* The original type may not be a string type, but will be converted to by
@@ -488,8 +438,8 @@ public class TSDBResultSetBlockData {
}
try {
- String ss = TaosGlobalConfig.getCharset();
- return new String(dest, ss);
+ String charset = TaosGlobalConfig.getCharset();
+ return new String(dest, charset);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
index 34470fbc4e..618e896a6d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
@@ -84,7 +84,8 @@ public class TSDBResultSetRowData {
data.set(col, value);
}
- public int getInt(int col, int srcType) throws SQLException {
+ @SuppressWarnings("deprecation")
+ public int getInt(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch (srcType) {
@@ -128,7 +129,7 @@ public class TSDBResultSetRowData {
long value = (long) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
- return new Long(value).intValue();
+ return Long.valueOf(value).intValue();
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
index fb20a621b0..d8ba67576d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
@@ -19,8 +19,6 @@ import java.sql.ResultSet;
import java.sql.SQLException;
public class TSDBStatement extends AbstractStatement {
-
- private TSDBJNIConnector connector;
/**
* Status of current statement
*/
@@ -29,29 +27,26 @@ public class TSDBStatement extends AbstractStatement {
private TSDBConnection connection;
private TSDBResultSet resultSet;
- public void setConnection(TSDBConnection connection) {
+ TSDBStatement(TSDBConnection connection) {
this.connection = connection;
}
- TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
- this.connection = connection;
- this.connector = connector;
- }
-
public ResultSet executeQuery(String sql) throws SQLException {
// check if closed
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
+ }
+
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
// execute query
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (this.connector.isUpdateQuery(pSql)) {
- this.connector.freeResultSet(pSql);
+ if (this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY);
}
- TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql);
+ TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql);
res.setBatchFetch(this.connection.getBatchFetch());
return res;
}
@@ -60,14 +55,14 @@ public class TSDBStatement extends AbstractStatement {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (!this.connector.isUpdateQuery(pSql)) {
- this.connector.freeResultSet(pSql);
+ if (!this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE);
}
- int affectedRows = this.connector.getAffectedRows(pSql);
- this.connector.freeResultSet(pSql);
+ int affectedRows = this.connection.getConnector().getAffectedRows(pSql);
+ this.connection.getConnector().freeResultSet(pSql);
return affectedRows;
}
@@ -81,30 +76,29 @@ public class TSDBStatement extends AbstractStatement {
public boolean execute(String sql) throws SQLException {
// check if closed
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
+ }
+
// execute query
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (this.connector.isUpdateQuery(pSql)) {
- this.affectedRows = this.connector.getAffectedRows(pSql);
- this.connector.freeResultSet(pSql);
+ if (this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.affectedRows = this.connection.getConnector().getAffectedRows(pSql);
+ this.connection.getConnector().freeResultSet(pSql);
return false;
}
- this.resultSet = new TSDBResultSet(this, this.connector, pSql);
+ this.resultSet = new TSDBResultSet(this, this.connection.getConnector(), pSql);
this.resultSet.setBatchFetch(this.connection.getBatchFetch());
return true;
}
public ResultSet getResultSet() throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-// long resultSetPointer = connector.getResultSet();
-// TSDBResultSet resSet = null;
-// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
-// resSet = new TSDBResultSet(connector, resultSetPointer);
-// }
+ }
+
return this.resultSet;
}
@@ -115,12 +109,20 @@ public class TSDBStatement extends AbstractStatement {
}
public Connection getConnection() throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (this.connector == null)
+ }
+
+ if (this.connection.getConnector() == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ }
+
return this.connection;
}
+
+ public void setConnection(TSDBConnection connection) {
+ this.connection = connection;
+ }
public boolean isClosed() throws SQLException {
return isClosed;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
index 6efe13561d..a94cfa6e07 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
@@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver {
static {
try {
- java.sql.DriverManager.registerDriver(new RestfulDriver());
+ DriverManager.registerDriver(new RestfulDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
index f82955ca9d..f58e3f8cd2 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
@@ -2,12 +2,12 @@ package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
+import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
-import java.nio.charset.Charset;
import java.sql.*;
import java.util.Calendar;
@@ -21,6 +21,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) {
super(conn, database);
this.rawSql = sql;
+
if (sql.contains("?")) {
int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
@@ -58,29 +59,14 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
return executeUpdate(sql);
}
- private String getNativeSql(String rawSql) throws SQLException {
- String sql = rawSql;
- for (int i = 0; i < parameters.length; ++i) {
- Object para = parameters[i];
- if (para != null) {
- String paraStr;
- if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
- } else {
- paraStr = para.toString();
- }
- // if para is timestamp or String or byte[] need to translate ' character
- if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
- paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
- paraStr = "'" + paraStr + "'";
- }
- sql = sql.replaceFirst("[?]", paraStr);
- } else {
- sql = sql.replaceFirst("[?]", "NULL");
- }
- }
- clearParameters();
- return sql;
+ /****
+ * 将rawSql转换成一条可执行的sql语句,使用属性parameters中的变脸进行替换
+ * 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?)
+ * @param rawSql,可能是insert、select或其他,使用?做占位符
+ * @return
+ */
+ private String getNativeSql(String rawSql) {
+ return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@@ -220,8 +206,8 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-
- setObject(parameterIndex,x);
+
+ setObject(parameterIndex, x);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index e9cc3a009f..fbc3a50a27 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -136,21 +136,21 @@ public class RestfulStatement extends AbstractStatement {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
}
this.resultSet = null;
- this.affectedRows = checkJsonResultSet(jsonObject);
+ this.affectedRows = getAffectedRows(jsonObject);
return this.affectedRows;
}
- private int checkJsonResultSet(JSONObject jsonObject) {
+ private int getAffectedRows(JSONObject jsonObject) throws SQLException {
// create ... SQLs should return 0 , and Restful result is this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head");
+ if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
JSONArray data = jsonObject.getJSONArray("data");
- int rows = Integer.parseInt(jsonObject.getString("rows"));
- if (head.size() == 1 && "affected_rows".equals(head.getString(0))
- && data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) {
- return 0;
- }
- return rows;
+ if (data != null)
+ return data.getJSONArray(0).getInteger(0);
+
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java
new file mode 100755
index 0000000000..0e05aeeee7
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java
@@ -0,0 +1,91 @@
+package com.taosdata.jdbc.utils;
+
+public class NullType {
+ private static final byte NULL_BOOL_VAL = 0x2;
+ private static final String NULL_STR = "null";
+
+ public String toString() {
+ return NullType.NULL_STR;
+ }
+
+ public static boolean isBooleanNull(byte val) {
+ return val == NullType.NULL_BOOL_VAL;
+ }
+
+ public static boolean isTinyIntNull(byte val) {
+ return val == Byte.MIN_VALUE;
+ }
+
+ public static boolean isSmallIntNull(short val) {
+ return val == Short.MIN_VALUE;
+ }
+
+ public static boolean isIntNull(int val) {
+ return val == Integer.MIN_VALUE;
+ }
+
+ public static boolean isBigIntNull(long val) {
+ return val == Long.MIN_VALUE;
+ }
+
+ public static boolean isFloatNull(float val) {
+ return Float.isNaN(val);
+ }
+
+ public static boolean isDoubleNull(double val) {
+ return Double.isNaN(val);
+ }
+
+ public static boolean isBinaryNull(byte[] val, int length) {
+ if (length != Byte.BYTES) {
+ return false;
+ }
+
+ return val[0] == 0xFF;
+ }
+
+ public static boolean isNcharNull(byte[] val, int length) {
+ if (length != Integer.BYTES) {
+ return false;
+ }
+
+ return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
+ }
+
+ public static byte getBooleanNull() {
+ return NullType.NULL_BOOL_VAL;
+ }
+
+ public static byte getTinyintNull() {
+ return Byte.MIN_VALUE;
+ }
+
+ public static int getIntNull() {
+ return Integer.MIN_VALUE;
+ }
+
+ public static short getSmallIntNull() {
+ return Short.MIN_VALUE;
+ }
+
+ public static long getBigIntNull() {
+ return Long.MIN_VALUE;
+ }
+
+ public static int getFloatNull() {
+ return 0x7FF00000;
+ }
+
+ public static long getDoubleNull() {
+ return 0x7FFFFF0000000000L;
+ }
+
+ public static byte getBinaryNull() {
+ return (byte) 0xFF;
+ }
+
+ public static byte[] getNcharNull() {
+ return new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
+ }
+
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
new file mode 100644
index 0000000000..052f34858d
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
@@ -0,0 +1,136 @@
+package com.taosdata.jdbc.utils;
+
+import com.google.common.collect.Range;
+import com.google.common.collect.RangeSet;
+import com.google.common.collect.TreeRangeSet;
+
+import java.nio.charset.Charset;
+import java.sql.Timestamp;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class Utils {
+
+ private static Pattern ptn = Pattern.compile(".*?'");
+
+ public static String escapeSingleQuota(String origin) {
+ Matcher m = ptn.matcher(origin);
+ StringBuffer sb = new StringBuffer();
+ int end = 0;
+ while (m.find()) {
+ end = m.end();
+ String seg = origin.substring(m.start(), end);
+ int len = seg.length();
+ if (len == 1) {
+ if ('\'' == seg.charAt(0)) {
+ sb.append("\\'");
+ } else {
+ sb.append(seg);
+ }
+ } else { // len > 1
+ sb.append(seg.substring(0, seg.length() - 2));
+ char lastcSec = seg.charAt(seg.length() - 2);
+ if (lastcSec == '\\') {
+ sb.append("\\'");
+ } else {
+ sb.append(lastcSec);
+ sb.append("\\'");
+ }
+ }
+ }
+
+ if (end < origin.length()) {
+ sb.append(origin.substring(end));
+ }
+ return sb.toString();
+ }
+
+ public static String getNativeSql(String rawSql, Object[] parameters) {
+ // toLowerCase
+ String preparedSql = rawSql.trim().toLowerCase();
+
+ String[] clause = new String[0];
+ if (SqlSyntaxValidator.isInsertSql(preparedSql)) {
+ // insert or import
+ clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"};
+ }
+ if (SqlSyntaxValidator.isSelectSql(preparedSql)) {
+ // select
+ clause = new String[]{"where\\s*.*"};
+ }
+ Map placeholderPositions = new HashMap<>();
+ RangeSet clauseRangeSet = TreeRangeSet.create();
+ findPlaceholderPosition(preparedSql, placeholderPositions);
+ findClauseRangeSet(preparedSql, clause, clauseRangeSet);
+
+ return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
+ }
+
+ private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet clauseRangeSet) {
+ clauseRangeSet.clear();
+ for (String regex : regexArr) {
+ Matcher matcher = Pattern.compile(regex).matcher(preparedSql);
+ while (matcher.find()) {
+ int start = matcher.start();
+ int end = matcher.end();
+ clauseRangeSet.add(Range.closed(start, end));
+ }
+ }
+ }
+
+ private static void findPlaceholderPosition(String preparedSql, Map placeholderPosition) {
+ placeholderPosition.clear();
+ Matcher matcher = Pattern.compile("\\?").matcher(preparedSql);
+ int index = 0;
+ while (matcher.find()) {
+ int pos = matcher.start();
+ placeholderPosition.put(index, pos);
+ index++;
+ }
+ }
+
+ /***
+ *
+ * @param rawSql
+ * @param paramArr
+ * @param placeholderPosition
+ * @param clauseRangeSet
+ * @return
+ */
+ private static String transformSql(String rawSql, Object[] paramArr, Map placeholderPosition, RangeSet clauseRangeSet) {
+ String[] sqlArr = rawSql.split("\\?");
+
+ return IntStream.range(0, sqlArr.length).mapToObj(index -> {
+ if (index == paramArr.length)
+ return sqlArr[index];
+
+ Object para = paramArr[index];
+ String paraStr;
+ if (para != null) {
+ if (para instanceof byte[]) {
+ paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
+ } else {
+ paraStr = para.toString();
+ }
+ // if para is timestamp or String or byte[] need to translate ' character
+ if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
+ paraStr = Utils.escapeSingleQuota(paraStr);
+
+ Integer pos = placeholderPosition.get(index);
+ boolean contains = clauseRangeSet.contains(pos);
+ if (contains) {
+ paraStr = "'" + paraStr + "'";
+ }
+ }
+ } else {
+ paraStr = "NULL";
+ }
+ return sqlArr[index] + paraStr;
+ }).collect(Collectors.joining());
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index 11c3de3052..24c73fdd5c 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -1,6 +1,7 @@
package com.taosdata.jdbc;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -12,69 +13,76 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeTest {
+
Connection connection;
Statement statement;
String dbName = "test";
String tName = "t0";
String host = "127.0.0.1";
String topic = "test";
-
- @Before
- public void createDatabase() {
- try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- Properties properties = new Properties();
- properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
-
- statement = connection.createStatement();
- statement.execute("drop database if exists " + dbName);
- statement.execute("create database if not exists " + dbName);
- statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
- long ts = System.currentTimeMillis();
- for (int i = 0; i < 2; i++) {
- ts += i;
- String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
- statement.executeUpdate(sql);
- }
-
- } catch (ClassNotFoundException | SQLException e) {
- return;
- }
- }
+ private long ts;
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
- System.out.println(rawSql);
-// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
+ TSDBConnection conn = connection.unwrap(TSDBConnection.class);
+ TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
-// int a = 0;
-// while (true) {
-// TimeUnit.MILLISECONDS.sleep(1000);
-// TSDBResultSet resSet = subscribe.consume();
-// while (resSet.next()) {
-// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
-// System.out.printf(i + ": " + resSet.getString(i) + "\t");
-// }
-// System.out.println("\n======" + a + "==========");
-// }
-// a++;
-// if (a >= 2) {
-// break;
-// }
-// resSet.close();
-// }
-//
-// subscribe.close(true);
- } catch (Exception e) {
- e.printStackTrace();
+ for (int j = 0; j < 10; j++) {
+ TimeUnit.SECONDS.sleep(1);
+ TSDBResultSet resSet = subscribe.consume();
+
+ int rowCnt = 0;
+ while (resSet.next()) {
+ if (rowCnt == 0) {
+ long cur_ts = resSet.getTimestamp(1).getTime();
+ int k = resSet.getInt(2);
+ int v = resSet.getInt(3);
+ Assert.assertEquals(ts, cur_ts);
+ Assert.assertEquals(100, k);
+ Assert.assertEquals(1, v);
+ }
+ if (rowCnt == 1) {
+ long cur_ts = resSet.getTimestamp(1).getTime();
+ int k = resSet.getInt(2);
+ int v = resSet.getInt(3);
+ Assert.assertEquals(ts + 1, cur_ts);
+ Assert.assertEquals(101, k);
+ Assert.assertEquals(2, v);
+
+ }
+ rowCnt++;
+ }
+ if (j == 0)
+ Assert.assertEquals(2, rowCnt);
+ resSet.close();
+ }
+ subscribe.close(true);
+
+
+ } catch (SQLException | InterruptedException throwables) {
+ throwables.printStackTrace();
}
}
+ @Before
+ public void createDatabase() throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
+
+ statement = connection.createStatement();
+ statement.execute("drop database if exists " + dbName);
+ statement.execute("create database if not exists " + dbName);
+ statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
+ ts = System.currentTimeMillis();
+ statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
+ statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
+ }
+
@After
public void close() {
try {
@@ -86,6 +94,5 @@ public class SubscribeTest {
} catch (SQLException e) {
e.printStackTrace();
}
-
}
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
index c5c6f7bca5..f304fd6874 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
@@ -3,7 +3,6 @@ package com.taosdata.jdbc;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
-import com.taosdata.jdbc.rs.RestfulResultSet;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -177,7 +176,8 @@ public class TSDBResultSetTest {
rs.getAsciiStream("f1");
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @SuppressWarnings("deprecation")
+ @Test(expected = SQLFeatureNotSupportedException.class)
public void getUnicodeStream() throws SQLException {
rs.getUnicodeStream("f1");
}
@@ -326,7 +326,7 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void getRow() throws SQLException {
- int row = rs.getRow();
+ rs.getRow();
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -405,12 +405,12 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateByte() throws SQLException {
- rs.updateByte(1, new Byte("0"));
+ rs.updateByte(1, (byte) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateShort() throws SQLException {
- rs.updateShort(1, new Short("0"));
+ rs.updateShort(1, (short) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
new file mode 100644
index 0000000000..4b4e83719f
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
@@ -0,0 +1,401 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class InsertSpecialCharacterJniTest {
+
+ private static final String host = "127.0.0.1";
+ private static Connection conn;
+ private static String dbName = "spec_char_test";
+ private static String tbname1 = "test";
+ private static String tbname2 = "weather";
+ private static String special_character_str_1 = "$asd$$fsfsf$";
+ private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
+ private static String special_character_str_3 = "\\\\asdfsfsf\\";
+ private static String special_character_str_4 = "?asd??fsf?sf?";
+ private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
+
+ @Test
+ public void testCase01() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_1.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from ?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, tbname1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_1, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+
+ @Test
+ public void testCase02() throws SQLException {
+ //TODO:
+ // Expected :\asdfsfsf\\
+ // Actual :\asdfsfsf\
+
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_2.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ //TODO: bug to be fixed
+// Assert.assertEquals(special_character_str_2, f1);
+ Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase03() throws SQLException {
+ //TODO:
+ // TDengine ERROR (216): Syntax error in SQL
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_3.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_3, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase04() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase05() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase06() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_4);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query t1
+ final String query = "select * from t1";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase07() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ pstmt.setString(3, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase08() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_5);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase09() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ // query t1
+ String query = "select * from t?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t2";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test
+ public void testCase10() throws SQLException {
+ final long now = System.currentTimeMillis();
+
+ // insert
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ //query t1
+ String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(4, new Timestamp(0));
+ pstmt.setString(5, "f1");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ byte[] f2 = rs.getBytes(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 2);
+ pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(3, new Timestamp(0));
+ pstmt.setString(4, "f2");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase11() throws SQLException {
+ final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
+ final long now = System.currentTimeMillis();
+
+ final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setTimestamp(2, new Timestamp(now));
+ pstmt.setBytes(3, speicalCharacterStr.getBytes());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+
+ @Test
+ public void testCase12() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setString(2, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals("HelloTDengine", f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Before
+ public void before() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tbname1 + "");
+ stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
+ stmt.execute("drop table if exists " + tbname2);
+ stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (conn != null)
+ conn.close();
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
new file mode 100644
index 0000000000..ea0d1aec41
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
@@ -0,0 +1,401 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class InsertSpecialCharacterRestfulTest {
+
+ private static final String host = "127.0.0.1";
+ // private static final String host = "master";
+ private static Connection conn;
+ private static String dbName = "spec_char_test";
+ private static String tbname1 = "test";
+ private static String tbname2 = "weather";
+ private static String special_character_str_1 = "$asd$$fsfsf$";
+ private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
+ private static String special_character_str_3 = "\\\\asdfsfsf\\";
+ private static String special_character_str_4 = "?asd??fsf?sf?";
+ private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
+
+ @Test
+ public void testCase01() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_1.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from ?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, tbname1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_1, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+
+ @Test
+ public void testCase02() throws SQLException {
+ //TODO:
+ // Expected :\asdfsfsf\
+ // Actual :\asdfsfsf\
+
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_2.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ //TODO: bug to be fixed
+// Assert.assertEquals(special_character_str_2, f1);
+ Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase03() throws SQLException {
+ //TODO:
+ // TDengine ERROR (216): Syntax error in SQL
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_3.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_3, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase04() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase05() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase06() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_4);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query t1
+ final String query = "select * from t1";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase07() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ pstmt.setString(3, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase08() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_5);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase09() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ // query t1
+ String query = "select * from t?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t2";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test
+ public void testCase10() throws SQLException {
+ final long now = System.currentTimeMillis();
+
+ // insert
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ //query t1
+ String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(4, new Timestamp(0));
+ pstmt.setString(5, "f1");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ byte[] f2 = rs.getBytes(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 2);
+ pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(3, new Timestamp(0));
+ pstmt.setString(4, "f2");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase11() throws SQLException {
+ final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
+ final long now = System.currentTimeMillis();
+
+ final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setTimestamp(2, new Timestamp(now));
+ pstmt.setBytes(3, speicalCharacterStr.getBytes());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase12() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setString(2, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals("HelloTDengine", f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Before
+ public void before() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tbname1 + "");
+ stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
+ stmt.execute("drop table if exists " + tbname2);
+ stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (conn != null)
+ conn.close();
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
index 40956a601f..e4dd6384f9 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
@@ -6,11 +6,11 @@ import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
-import java.io.Serializable;
import java.sql.*;
public class RestfulPreparedStatementTest {
private static final String host = "127.0.0.1";
+ // private static final String host = "master";
private static Connection conn;
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static PreparedStatement pstmt_insert;
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
new file mode 100644
index 0000000000..c861ef2966
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
@@ -0,0 +1,24 @@
+package com.taosdata.jdbc.utils;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class UtilsTest {
+
+ @Test
+ public void escapeSingleQuota() {
+ String s = "'''''a\\'";
+ String news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+
+ s = "\'''''a\\'";
+ news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+
+ s = "\'\'\'\''a\\'";
+ news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+ }
+}
\ No newline at end of file
diff --git a/src/connector/python/.gitignore b/src/connector/python/.gitignore
new file mode 100644
index 0000000000..228a0b4530
--- /dev/null
+++ b/src/connector/python/.gitignore
@@ -0,0 +1,154 @@
+
+# Created by https://www.toptal.com/developers/gitignore/api/python
+# Edit at https://www.toptal.com/developers/gitignore?templates=python
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+pytestdebug.log
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+doc/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+#poetry.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+# .env
+.env/
+.venv/
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+pythonenv*
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# operating system-related files
+# file properties cache/storage on macOS
+*.DS_Store
+# thumbnail cache on Windows
+Thumbs.db
+
+# profiling data
+.prof
+
+
+# End of https://www.toptal.com/developers/gitignore/api/python
diff --git a/src/connector/python/linux/python2/LICENSE b/src/connector/python/LICENSE
similarity index 100%
rename from src/connector/python/linux/python2/LICENSE
rename to src/connector/python/LICENSE
diff --git a/src/connector/python/README.md b/src/connector/python/README.md
new file mode 100644
index 0000000000..9151e9b8f0
--- /dev/null
+++ b/src/connector/python/README.md
@@ -0,0 +1,17 @@
+# TDengine Connector for Python
+
+[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
+
+## Install
+
+```sh
+pip install git+https://github.com/taosdata/TDengine-connector-python
+```
+
+## Source Code
+
+[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python).
+
+## License - AGPL
+
+Keep same with [TDengine](https://github.com/taosdata/TDengine).
diff --git a/src/connector/python/examples/demo.py b/src/connector/python/examples/demo.py
new file mode 100644
index 0000000000..6c7c03f3e2
--- /dev/null
+++ b/src/connector/python/examples/demo.py
@@ -0,0 +1,12 @@
+import taos
+
+conn = taos.connect(host='127.0.0.1',
+ user='root',
+ passworkd='taodata',
+ database='log')
+cursor = conn.cursor()
+
+sql = "select * from log.log limit 10"
+cursor.execute(sql)
+for row in cursor:
+ print(row)
diff --git a/src/connector/python/linux/python2 b/src/connector/python/linux/python2
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/linux/python2
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/linux/python2/README.md b/src/connector/python/linux/python2/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/linux/python2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py
deleted file mode 100644
index ff2d90fcb3..0000000000
--- a/src/connector/python/linux/python2/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.8",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 2",
- "Operating System :: Linux",
- ],
-)
diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py
deleted file mode 100644
index 4367947341..0000000000
--- a/src/connector/python/linux/python2/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.CDLL('libtaos.so')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py
deleted file mode 100644
index 4c0456b503..0000000000
--- a/src/connector/python/linux/python2/taos/cursor.py
+++ /dev/null
@@ -1,278 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def next(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def log(self, logfile):
- self._logfile = logfile
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- # global querySeqNum
- # querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
- # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- # print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
- with open(self._logfile, "a") as logfile:
- logfile.write("%s;\n" % operation)
-
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
- return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
- return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
- return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
- return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
- return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
- return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
- return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
- return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
- return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
- return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
- return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
- return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
- return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
- return True
-
- return False
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/linux/python3 b/src/connector/python/linux/python3
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/linux/python3
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/linux/python3/LICENSE b/src/connector/python/linux/python3/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/linux/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/linux/python3/README.md b/src/connector/python/linux/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/linux/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py
deleted file mode 100644
index 296e79b973..0000000000
--- a/src/connector/python/linux/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: Linux",
- ],
-)
diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/linux/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py
deleted file mode 100644
index f6c395342c..0000000000
--- a/src/connector/python/linux/python3/taos/connection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py
deleted file mode 100644
index 93466f5184..0000000000
--- a/src/connector/python/linux/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/linux/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/linux/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/linux/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/osx/python3 b/src/connector/python/osx/python3
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/osx/python3
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/osx/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/osx/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py
deleted file mode 100644
index 9bce1a976f..0000000000
--- a/src/connector/python/osx/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: MacOS X",
- ],
-)
diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/osx/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py
deleted file mode 100644
index dca9bd42e8..0000000000
--- a/src/connector/python/osx/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.CDLL('libtaos.dylib')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py
deleted file mode 100644
index f6c395342c..0000000000
--- a/src/connector/python/osx/python3/taos/connection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py
deleted file mode 100644
index 93466f5184..0000000000
--- a/src/connector/python/osx/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py
deleted file mode 100644
index 32dc0ea3c3..0000000000
--- a/src/connector/python/osx/python3/taos/cursor.py
+++ /dev/null
@@ -1,280 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the rowcount of insertion
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def log(self, logfile):
- self._logfile = logfile
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- # global querySeqNum
- # querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
- # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- # print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
- with open(self._logfile, "a") as logfile:
- logfile.write("%s;\n" % operation)
-
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
- return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
- return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
- return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
- return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
- return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
- return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
- return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
- return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
- return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
- return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
- return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
- return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
- return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
- return True
-
- return False
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/osx/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/osx/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/osx/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py
new file mode 100644
index 0000000000..901e8396c0
--- /dev/null
+++ b/src/connector/python/setup.py
@@ -0,0 +1,34 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+setuptools.setup(
+ name="taos",
+ version="2.0.10",
+ author="Taosdata Inc.",
+ author_email="support@taosdata.com",
+ description="TDengine python client package",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/taosdata/TDengine/tree/develop/src/connector/python",
+ packages=setuptools.find_packages(),
+ classifiers=[
+ "Environment :: Console",
+ "Environment :: MacOS X",
+ "Environment :: Win32 (MS Windows)",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 2.7",
+ "Operating System :: Linux",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: Microsoft :: Windows :: Windows 10",
+ ],
+)
diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/taos/__init__.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/__init__.py
rename to src/connector/python/taos/__init__.py
diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/taos/cinterface.py
similarity index 70%
rename from src/connector/python/linux/python3/taos/cinterface.py
rename to src/connector/python/taos/cinterface.py
index 4367947341..b8824327b0 100644
--- a/src/connector/python/linux/python3/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -3,6 +3,7 @@ from .constants import FieldType
from .error import *
import math
import datetime
+import platform
def _convert_millisecond_to_datetime(milli):
@@ -20,40 +21,28 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if micro:
_timestamp_converter = _convert_microsecond_to_datetime
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bool row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_byte))[
+ :abs(num_of_rows)]]
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C tinyint row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
def _crow_tinyint_unsigned_to_python(
@@ -63,92 +52,56 @@ def _crow_tinyint_unsigned_to_python(
micro=False):
"""Function to convert C tinyint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_ubyte))[
+ :abs(num_of_rows)]]
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_short))[
+ :abs(num_of_rows)]]
def _crow_smallint_unsigned_to_python(
data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_ushort))[
+ :abs(num_of_rows)]]
def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_uint))[
+ :abs(num_of_rows)]]
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bigint row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
@@ -158,52 +111,33 @@ def _crow_bigint_unsigned_to_python(
micro=False):
"""Function to convert C bigint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_uint64))[
+ :abs(num_of_rows)]]
def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C float row to python row
"""
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
+ return [None if math.isnan(ele) else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C double row to python row
"""
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
+ return [None if math.isnan(ele) else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row
"""
assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
+ return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
+ 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
@@ -230,30 +164,17 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""
assert(nbytes is not None)
res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
+ for i in range(abs(num_of_rows)):
+ try:
+ rbyte = ctypes.cast(
+ data + nbytes * i,
+ ctypes.POINTER(
+ ctypes.c_short))[
+ :1].pop()
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode()[0:rbyte])
+ except ValueError:
+ res.append(None)
return res
@@ -262,20 +183,12 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""
assert(nbytes is not None)
res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
+ for i in range(abs(num_of_rows)):
+ try:
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode())
+ except ValueError:
+ res.append(None)
return res
@@ -324,14 +237,38 @@ class TaosField(ctypes.Structure):
# C interface class
+def _load_taos_linux():
+ return ctypes.CDLL('libtaos.so')
+
+
+def _load_taos_darwin():
+ return ctypes.cDLL('libtaos.dylib')
+
+
+def _load_taos_windows():
+ return ctypes.windll.LoadLibrary('taos')
+
+
+def _load_taos():
+ load_func = {
+ 'Linux': _load_taos_linux,
+ 'Darwin': _load_taos_darwin,
+ 'Windows': _load_taos_windows,
+ }
+ try:
+ return load_func[platform.system()]()
+ except:
+ sys.exit('unsupported platform to TDengine connector')
+
+
class CTaosInterface(object):
- libtaos = ctypes.CDLL('libtaos.so')
+ libtaos = _load_taos()
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
libtaos.taos_init.restype = None
libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
+ # libtaos.taos_use_result.restype = ctypes.c_void_p
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
libtaos.taos_errstr.restype = ctypes.c_char_p
libtaos.taos_subscribe.restype = ctypes.c_void_p
@@ -432,7 +369,7 @@ class CTaosInterface(object):
'''Close the TDengine handle
'''
CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
+ # print('connection is closed')
@staticmethod
def query(connection, sql):
diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/taos/connection.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/connection.py
rename to src/connector/python/taos/connection.py
diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/taos/constants.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/constants.py
rename to src/connector/python/taos/constants.py
diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/taos/cursor.py
similarity index 98%
rename from src/connector/python/linux/python3/taos/cursor.py
rename to src/connector/python/taos/cursor.py
index 32dc0ea3c3..d443ec95d0 100644
--- a/src/connector/python/linux/python3/taos/cursor.py
+++ b/src/connector/python/taos/cursor.py
@@ -45,6 +45,12 @@ class TDengineCursor(object):
return self
def __next__(self):
+ return self._taos_next()
+
+ def next(self):
+ return self._taos_next()
+
+ def _taos_next(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetch iterator")
diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/taos/dbapi.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/dbapi.py
rename to src/connector/python/taos/dbapi.py
diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/taos/error.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/error.py
rename to src/connector/python/taos/error.py
diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/taos/subscription.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/subscription.py
rename to src/connector/python/taos/subscription.py
diff --git a/src/connector/python/windows/python2 b/src/connector/python/windows/python2
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/windows/python2
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/windows/python2/LICENSE b/src/connector/python/windows/python2/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/windows/python2/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/windows/python2/README.md b/src/connector/python/windows/python2/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/windows/python2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py
deleted file mode 100644
index 47d374fe67..0000000000
--- a/src/connector/python/windows/python2/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 2",
- "Operating System :: Windows",
- ],
-)
diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/windows/python2/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py
deleted file mode 100644
index ec72474df9..0000000000
--- a/src/connector/python/windows/python2/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.windll.LoadLibrary('taos')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py
deleted file mode 100644
index 5729d01c6d..0000000000
--- a/src/connector/python/windows/python2/taos/connection.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- if len(kwargs) > 0:
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py
deleted file mode 100644
index 8a8011c3e3..0000000000
--- a/src/connector/python/windows/python2/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Time precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py
deleted file mode 100644
index 5f4666b593..0000000000
--- a/src/connector/python/windows/python2/taos/cursor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
-
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/windows/python2/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/windows/python2/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/windows/python2/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/windows/python3 b/src/connector/python/windows/python3
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/windows/python3
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/windows/python3/LICENSE b/src/connector/python/windows/python3/LICENSE
deleted file mode 100644
index 2d032e65d8..0000000000
--- a/src/connector/python/windows/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/windows/python3/README.md b/src/connector/python/windows/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/windows/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py
deleted file mode 100644
index cdcec62a21..0000000000
--- a/src/connector/python/windows/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: Windows",
- ],
-)
diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py
deleted file mode 100644
index b57e25fd2c..0000000000
--- a/src/connector/python/windows/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py
deleted file mode 100644
index ec72474df9..0000000000
--- a/src/connector/python/windows/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.windll.LoadLibrary('taos')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py
deleted file mode 100644
index 5729d01c6d..0000000000
--- a/src/connector/python/windows/python3/taos/connection.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- if len(kwargs) > 0:
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py
deleted file mode 100644
index 49fc17b2fb..0000000000
--- a/src/connector/python/windows/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py
deleted file mode 100644
index 136cd42fe4..0000000000
--- a/src/connector/python/windows/python3/taos/cursor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
-
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py
deleted file mode 100644
index a29621f7a3..0000000000
--- a/src/connector/python/windows/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py
deleted file mode 100644
index 238b293a0b..0000000000
--- a/src/connector/python/windows/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/windows/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c
index c573d709f5..586adacc98 100644
--- a/src/dnode/src/dnodeCfg.c
+++ b/src/dnode/src/dnodeCfg.c
@@ -158,7 +158,7 @@ static int32_t dnodeWriteCfg() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c
index 9554651776..9b15353647 100644
--- a/src/dnode/src/dnodeEps.c
+++ b/src/dnode/src/dnodeEps.c
@@ -277,7 +277,7 @@ static int32_t dnodeWriteEps() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeMInfos.c b/src/dnode/src/dnodeMInfos.c
index 0dca116d84..611c30b843 100644
--- a/src/dnode/src/dnodeMInfos.c
+++ b/src/dnode/src/dnodeMInfos.c
@@ -286,7 +286,7 @@ static int32_t dnodeWriteMInfos() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/inc/taos.h b/src/inc/taos.h
index cd8e116053..6dd695b320 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -82,6 +82,7 @@ typedef struct TAOS_BIND {
uintptr_t buffer_length; // unused
uintptr_t *length;
int * is_null;
+
int is_unsigned; // unused
int * error; // unused
union {
@@ -99,12 +100,25 @@ typedef struct TAOS_BIND {
unsigned int allocated;
} TAOS_BIND;
+typedef struct TAOS_MULTI_BIND {
+ int buffer_type;
+ void *buffer;
+ uintptr_t buffer_length;
+ int32_t *length;
+ char *is_null;
+ int num;
+} TAOS_MULTI_BIND;
+
+
TAOS_STMT *taos_stmt_init(TAOS *taos);
int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name);
int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind);
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind);
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx);
int taos_stmt_add_batch(TAOS_STMT *stmt);
int taos_stmt_execute(TAOS_STMT *stmt);
TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index e9282d0816..d812f77580 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -22,7 +22,6 @@ extern "C" {
#include
#include
-#include "osDef.h"
#include "taos.h"
#define TSDB__packed
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index c4167b8c37..b81816ca05 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -226,6 +226,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
+#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index f7eac0d205..a8665cb48c 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -80,12 +80,12 @@
#define TK_DOT 61
#define TK_CREATE 62
#define TK_TABLE 63
-#define TK_DATABASE 64
-#define TK_TABLES 65
-#define TK_STABLES 66
-#define TK_VGROUPS 67
-#define TK_DROP 68
-#define TK_STABLE 69
+#define TK_STABLE 64
+#define TK_DATABASE 65
+#define TK_TABLES 66
+#define TK_STABLES 67
+#define TK_VGROUPS 68
+#define TK_DROP 69
#define TK_TOPIC 70
#define TK_FUNCTION 71
#define TK_DNODE 72
diff --git a/src/inc/ttype.h b/src/inc/ttype.h
index 01ec49a417..8eaa52f90a 100644
--- a/src/inc/ttype.h
+++ b/src/inc/ttype.h
@@ -5,6 +5,8 @@
extern "C" {
#endif
+#include
+#include
#include "taosdef.h"
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c
index b88244ea01..8f7f475536 100644
--- a/src/kit/shell/src/shellCheck.c
+++ b/src/kit/shell/src/shellCheck.c
@@ -142,7 +142,7 @@ static void *shellCheckThreadFp(void *arg) {
taos_free_result(pSql);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
return NULL;
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 3631fbec1c..8866bf2607 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -68,20 +68,17 @@ enum TEST_MODE {
INVAID_TEST
};
-enum QUERY_MODE {
- SYNC_QUERY_MODE, // 0
- ASYNC_QUERY_MODE, // 1
- INVALID_MODE
-};
+#define MAX_RECORDS_PER_REQ 32766
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
+#define COND_BUF_LEN BUFFER_SIZE - 30
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE 16000
+#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
@@ -118,8 +115,8 @@ typedef enum TALBE_EXISTS_EN {
} TALBE_EXISTS_EN;
enum MODE {
- SYNC,
- ASYNC,
+ SYNC_MODE,
+ ASYNC_MODE,
MODE_BUT
};
@@ -188,7 +185,7 @@ typedef struct {
/* Used by main to communicate with parse_opt. */
typedef struct SArguments_S {
char * metaFile;
- int test_mode;
+ uint32_t test_mode;
char * host;
uint16_t port;
char * user;
@@ -205,82 +202,82 @@ typedef struct SArguments_S {
bool verbose_print;
bool performance_print;
char * output_file;
- int query_mode;
+ bool async_mode;
char * datatype[MAX_NUM_DATATYPE + 1];
- int len_of_binary;
- int num_of_CPR;
- int num_of_threads;
- int insert_interval;
- int query_times;
- int interlace_rows;
- int num_of_RPR;
- int max_sql_len;
- int num_of_tables;
- int num_of_DPT;
+ uint32_t len_of_binary;
+ uint32_t num_of_CPR;
+ uint32_t num_of_threads;
+ uint64_t insert_interval;
+ int64_t query_times;
+ uint64_t interlace_rows;
+ uint64_t num_of_RPR; // num_of_records_per_req
+ uint64_t max_sql_len;
+ uint64_t num_of_tables;
+ uint64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int method_of_delete;
+ uint32_t method_of_delete;
char ** arg_list;
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
- int dataLen;
+ char field[TSDB_COL_NAME_LEN + 1];
+ char dataType[MAX_TB_NAME_SIZE];
+ uint32_t dataLen;
char note[128];
} StrColumn;
typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1];
- int childTblCount;
+ int64_t childTblCount;
bool childTblExists; // 0: no, 1: yes
- int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
- int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
+ uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
+ uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
char childTblPrefix[MAX_TB_NAME_SIZE];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
- char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful
- int childTblLimit;
- int childTblOffset;
+ char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
+ int64_t childTblLimit;
+ uint64_t childTblOffset;
- int multiThreadWriteOneTbl; // 0: no, 1: yes
- int interlaceRows; //
+// int multiThreadWriteOneTbl; // 0: no, 1: yes
+ uint64_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int maxSqlLen; //
+ uint64_t maxSqlLen; //
- int insertInterval; // insert interval, will override global insert interval
- int64_t insertRows; // 0: no limit
- int timeStampStep;
+ uint64_t insertInterval; // insert interval, will override global insert interval
+ uint64_t insertRows;
+ int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN+1];
char tagsFile[MAX_FILE_NAME_LEN+1];
- int columnCount;
+ uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
- int tagCount;
+ uint32_t tagCount;
StrColumn tags[MAX_TAG_COUNT];
char* childTblName;
char* colsOfCreateChildTable;
- int lenOfOneRow;
- int lenOfTagOfOneRow;
+ uint64_t lenOfOneRow;
+ uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
//int sampleRowCount;
//int sampleUsePos;
- int tagSource; // 0: rand, 1: tag sample
+ uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
- int tagSampleCount;
- int tagUsePos;
+ uint32_t tagSampleCount;
+ uint32_t tagUsePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SSuperTable;
typedef struct {
@@ -307,8 +304,8 @@ typedef struct {
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
- int minRows;
- int maxRows;
+ uint32_t minRows; // 0 means default
+ uint32_t maxRows; // 0 means default
int comp;
int walLevel;
int cacheLast;
@@ -327,13 +324,15 @@ typedef struct SDataBase_S {
char dbName[MAX_DB_NAME_SIZE];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
- int superTblCount;
+ uint64_t superTblCount;
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
} SDataBase;
typedef struct SDbs_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
+ struct sockaddr_in serv_addr;
+
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
@@ -341,106 +340,107 @@ typedef struct SDbs_S {
bool use_metric;
bool insert_only;
bool do_aggreFunc;
- bool queryMode;
+ bool asyncMode;
- int threadCount;
- int threadCountByCreateTbl;
- int dbCount;
+ uint32_t threadCount;
+ uint32_t threadCountByCreateTbl;
+ uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SDbs;
typedef struct SpecifiedQueryInfo_S {
- int queryInterval; // 0: unlimit > 0 loop/s
- int concurrent;
- int sqlCount;
- int mode; // 0: sync, 1: async
- int subscribeInterval; // ms
- int queryTimes;
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint64_t concurrent;
+ uint64_t sqlCount;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
+ uint64_t queryTimes;
int subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
- int totalQueried;
+ uint64_t totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
- int queryInterval; // 0: unlimit > 0 loop/s
- int threadCnt;
- int mode; // 0: sync, 1: async
- int subscribeInterval; // ms
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint32_t threadCnt;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
int subscribeRestart;
int subscribeKeepProgress;
- int queryTimes;
- int childTblCount;
+ uint64_t queryTimes;
+ uint64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
- int sqlCount;
+ uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
- int totalQueried;
+ uint64_t totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
+ struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[MAX_DB_NAME_SIZE+1];
- char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful
+ char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
- int totalQueried;
+ uint64_t totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
- TAOS *taos;
- int threadID;
- char db_name[MAX_DB_NAME_SIZE+1];
- uint32_t time_precision;
- char fp[4096];
- char tb_prefix[MAX_TB_NAME_SIZE];
- int start_table_from;
- int end_table_to;
- int ntables;
- int data_of_rate;
- uint64_t start_time;
- char* cols;
- bool use_metric;
+ TAOS * taos;
+ int threadID;
+ char db_name[MAX_DB_NAME_SIZE+1];
+ uint32_t time_precision;
+ char fp[4096];
+ char tb_prefix[MAX_TB_NAME_SIZE];
+ uint64_t start_table_from;
+ uint64_t end_table_to;
+ uint64_t ntables;
+ uint64_t data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
SSuperTable* superTblInfo;
// for async insert
- tsem_t lock_sem;
- int64_t counter;
+ tsem_t lock_sem;
+ int64_t counter;
uint64_t st;
uint64_t et;
- int64_t lastTs;
+ uint64_t lastTs;
// sample data
- int samplePos;
+ int64_t samplePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
// insert delay statistics
- int64_t cntDelay;
- int64_t totalDelay;
- int64_t avgDelay;
- int64_t maxDelay;
- int64_t minDelay;
+ uint64_t cntDelay;
+ uint64_t totalDelay;
+ uint64_t avgDelay;
+ uint64_t maxDelay;
+ uint64_t minDelay;
// query
- int querySeq; // sequence number of sql command
+ uint64_t querySeq; // sequence number of sql command
} threadInfo;
#ifdef WINDOWS
@@ -519,6 +519,8 @@ static int taosRandom()
static int createDatabasesAndStables();
static void createChildTables();
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
+static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
+ char* sqlstr, char *resultFile);
/* ************ Global variables ************ */
@@ -530,50 +532,50 @@ char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
"max(col0)", "min(col0)", "first(col0)", "last(col0)"};
SArguments g_args = {
- NULL, // metaFile
- 0, // test_mode
- "127.0.0.1", // host
- 6030, // port
- "root", // user
-#ifdef _TD_POWER_
- "powerdb", // password
-#else
- "taosdata", // password
-#endif
- "test", // database
- 1, // replica
- "t", // tb_prefix
- NULL, // sqlFile
- true, // use_metric
- true, // drop_database
- true, // insert_only
- false, // debug_print
- false, // verbose_print
- false, // performance statistic print
- false, // answer_yes;
- "./output.txt", // output_file
- 0, // mode : sync or async
- {
- "INT", // datatype
- "INT", // datatype
- "INT", // datatype
- "INT", // datatype
- },
- 16, // len_of_binary
- 4, // num_of_CPR
- 10, // num_of_connections/thread
- 0, // insert_interval
- 1, // query_times
- 0, // interlace_rows;
- 30000, // num_of_RPR
- 1024000, // max_sql_len
- 10000, // num_of_tables
- 10000, // num_of_DPT
- 0, // abort
- 0, // disorderRatio
- 1000, // disorderRange
- 1, // method_of_delete
- NULL // arg_list
+ NULL, // metaFile
+ 0, // test_mode
+ "127.0.0.1", // host
+ 6030, // port
+ "root", // user
+ #ifdef _TD_POWER_
+ "powerdb", // password
+ #else
+ "taosdata", // password
+ #endif
+ "test", // database
+ 1, // replica
+ "t", // tb_prefix
+ NULL, // sqlFile
+ true, // use_metric
+ true, // drop_database
+ true, // insert_only
+ false, // debug_print
+ false, // verbose_print
+ false, // performance statistic print
+ false, // answer_yes;
+ "./output.txt", // output_file
+ 0, // mode : sync or async
+ {
+ "INT", // datatype
+ "INT", // datatype
+ "INT", // datatype
+ "INT", // datatype
+ },
+ 16, // len_of_binary
+ 4, // num_of_CPR
+ 10, // num_of_connections/thread
+ 0, // insert_interval
+ 1, // query_times
+ 0, // interlace_rows;
+ 30000, // num_of_RPR
+ (1024*1024), // max_sql_len
+ 10000, // num_of_tables
+ 10000, // num_of_DPT
+ 0, // abort
+ 0, // disorderRatio
+ 1000, // disorderRange
+ 1, // method_of_delete
+ NULL // arg_list
};
@@ -663,11 +665,11 @@ static void printHelp() {
printf("%s%s%s%s\n", indent, "-q", indent,
"Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
printf("%s%s%s%s\n", indent, "-b", indent,
- "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.");
+ "The data_type of columns, default: INT,INT,INT,INT.");
printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
printf("%s%s%s%s\n", indent, "-l", indent,
- "The number of columns per record. Default is 10.");
+ "The number of columns per record. Default is 4.");
printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
@@ -722,7 +724,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "\n\t-c need a valid path following!\n");
exit(EXIT_FAILURE);
}
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
} else if (strcmp(argv[i], "-h") == 0) {
if (argc == i+1) {
@@ -733,7 +735,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->host = argv[++i];
} else if (strcmp(argv[i], "-p") == 0) {
if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-p need a number following!\n");
exit(EXIT_FAILURE);
@@ -774,7 +776,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n");
exit(EXIT_FAILURE);
}
- arguments->query_mode = atoi(argv[++i]);
+ arguments->async_mode = atoi(argv[++i]);
} else if (strcmp(argv[i], "-T") == 0) {
if ((argc == i+1) ||
(!isStringNumber(argv[i+1]))) {
@@ -793,7 +795,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->insert_interval = atoi(argv[++i]);
} else if (strcmp(argv[i], "-qt") == 0) {
if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-qt need a number following!\n");
exit(EXIT_FAILURE);
@@ -801,7 +803,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->query_times = atoi(argv[++i]);
} else if (strcmp(argv[i], "-B") == 0) {
if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-B need a number following!\n");
exit(EXIT_FAILURE);
@@ -852,14 +854,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
if (strstr(argv[i], ",") == NULL) {
// only one col
if (strcasecmp(argv[i], "INT")
- && strcasecmp(argv[i], "FLOAT")
- && strcasecmp(argv[i], "TINYINT")
- && strcasecmp(argv[i], "BOOL")
- && strcasecmp(argv[i], "SMALLINT")
- && strcasecmp(argv[i], "BIGINT")
- && strcasecmp(argv[i], "DOUBLE")
- && strcasecmp(argv[i], "BINARY")
- && strcasecmp(argv[i], "NCHAR")) {
+ && strcasecmp(argv[i], "FLOAT")
+ && strcasecmp(argv[i], "TINYINT")
+ && strcasecmp(argv[i], "BOOL")
+ && strcasecmp(argv[i], "SMALLINT")
+ && strcasecmp(argv[i], "BIGINT")
+ && strcasecmp(argv[i], "DOUBLE")
+ && strcasecmp(argv[i], "BINARY")
+ && strcasecmp(argv[i], "NCHAR")) {
printHelp();
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
@@ -873,14 +875,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
char *token = strsep(&running, ",");
while(token != NULL) {
if (strcasecmp(token, "INT")
- && strcasecmp(token, "FLOAT")
- && strcasecmp(token, "TINYINT")
- && strcasecmp(token, "BOOL")
- && strcasecmp(token, "SMALLINT")
- && strcasecmp(token, "BIGINT")
- && strcasecmp(token, "DOUBLE")
- && strcasecmp(token, "BINARY")
- && strcasecmp(token, "NCHAR")) {
+ && strcasecmp(token, "FLOAT")
+ && strcasecmp(token, "TINYINT")
+ && strcasecmp(token, "BOOL")
+ && strcasecmp(token, "SMALLINT")
+ && strcasecmp(token, "BIGINT")
+ && strcasecmp(token, "DOUBLE")
+ && strcasecmp(token, "BINARY")
+ && strcasecmp(token, "NCHAR")) {
printHelp();
free(dupstr);
errorPrint("%s", "-b: Invalid data_type!\n");
@@ -895,7 +897,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
} else if (strcmp(argv[i], "-w") == 0) {
if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-w need a number following!\n");
exit(EXIT_FAILURE);
@@ -903,7 +905,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->len_of_binary = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m") == 0) {
if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-m need a number following!\n");
exit(EXIT_FAILURE);
@@ -964,9 +966,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
- if (arguments->method_of_delete < 0
- || arguments->method_of_delete > 3) {
- arguments->method_of_delete = 0;
+ if (arguments->method_of_delete > 3) {
+ errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ exit(EXIT_FAILURE);
}
} else if ((strcmp(argv[i], "--version") == 0) ||
(strcmp(argv[i], "-V") == 0)){
@@ -983,31 +985,36 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
if (((arguments->debug_print) && (arguments->metaFile == NULL))
- || arguments->verbose_print) {
+ || arguments->verbose_print) {
printf("###################################################################\n");
printf("# meta file: %s\n", arguments->metaFile);
printf("# Server IP: %s:%hu\n",
- arguments->host == NULL ? "localhost" : arguments->host,
- arguments->port );
+ arguments->host == NULL ? "localhost" : arguments->host,
+ arguments->port );
printf("# User: %s\n", arguments->user);
printf("# Password: %s\n", arguments->password);
printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false");
if (*(arguments->datatype)) {
- printf("# Specified data type: ");
- for (int i = 0; i < MAX_NUM_DATATYPE; i++)
- if (arguments->datatype[i])
- printf("%s,", arguments->datatype[i]);
- else
- break;
- printf("\n");
+ printf("# Specified data type: ");
+ for (int i = 0; i < MAX_NUM_DATATYPE; i++)
+ if (arguments->datatype[i])
+ printf("%s,", arguments->datatype[i]);
+ else
+ break;
+ printf("\n");
}
- printf("# Insertion interval: %d\n", arguments->insert_interval);
- printf("# Number of records per req: %d\n", arguments->num_of_RPR);
- printf("# Max SQL length: %d\n", arguments->max_sql_len);
+ printf("# Insertion interval: %"PRIu64"\n",
+ arguments->insert_interval);
+ printf("# Number of records per req: %"PRIu64"\n",
+ arguments->num_of_RPR);
+ printf("# Max SQL length: %"PRIu64"\n",
+ arguments->max_sql_len);
printf("# Length of Binary: %d\n", arguments->len_of_binary);
printf("# Number of Threads: %d\n", arguments->num_of_threads);
- printf("# Number of Tables: %d\n", arguments->num_of_tables);
- printf("# Number of Data per Table: %d\n", arguments->num_of_DPT);
+ printf("# Number of Tables: %"PRIu64"\n",
+ arguments->num_of_tables);
+ printf("# Number of Data per Table: %"PRIu64"\n",
+ arguments->num_of_DPT);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
if (arguments->disorderRatio) {
@@ -1081,27 +1088,34 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return 0;
}
-static void getResult(TAOS_RES *res, char* resultFileName) {
+static void appendResultBufToFile(char *resultBuf, char *resultFile)
+{
+ FILE *fp = NULL;
+ if (resultFile[0] != 0) {
+ fp = fopen(resultFile, "at");
+ if (fp == NULL) {
+ errorPrint(
+ "%s() LN%d, failed to open result file: %s, result will not save to file\n",
+ __func__, __LINE__, resultFile);
+ return;
+ }
+ }
+
+
+ fprintf(fp, "%s", resultBuf);
+ tmfclose(fp);
+}
+
+static void appendResultToFile(TAOS_RES *res, char* resultFile) {
TAOS_ROW row = NULL;
int num_rows = 0;
int num_fields = taos_field_count(res);
TAOS_FIELD *fields = taos_fetch_fields(res);
- FILE *fp = NULL;
- if (resultFileName[0] != 0) {
- fp = fopen(resultFileName, "at");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n",
- __func__, __LINE__, resultFileName);
- }
- }
-
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
- if (fp)
- fclose(fp);
return ;
}
@@ -1111,7 +1125,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
- if (fp) fprintf(fp, "%s", databuf);
+ appendResultBufToFile(databuf, resultFile);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
@@ -1123,22 +1137,40 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
totalLen += len;
}
- if (fp) fprintf(fp, "%s", databuf);
- tmfclose(fp);
+ verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", __func__, __LINE__, databuf, resultFile);
+ appendResultBufToFile(databuf, resultFile);
free(databuf);
}
-static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) {
- TAOS_RES *res = taos_query(taos, command);
- if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return;
- }
+static void selectAndGetResult(threadInfo *pThreadInfo, char *command, char* resultFile)
+{
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return;
+ }
- getResult(res, resultFileName);
- taos_free_result(res);
+ if ((resultFile) && (strlen(resultFile))) {
+ appendResultToFile(res, resultFile);
+ }
+ taos_free_result(res);
+
+ } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ int retCode = postProceSql(
+ g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
+ command,
+ resultFile);
+ if (0 != retCode) {
+ printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ }
+
+ } else {
+ errorPrint("%s() LN%d, unknown query mode: %s\n",
+ __func__, __LINE__, g_queryInfo.queryMode);
+ }
}
static int32_t rand_bool(){
@@ -1183,13 +1215,31 @@ static float rand_float(){
return randfloat[cursor];
}
+#if 0
+static const char charNum[] = "0123456789";
+
+static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose
+static void nonrand_string(char *str, int size)
+{
+ str[0] = 0;
+ if (size > 0) {
+ int n;
+ for (n = 0; n < size; n++) {
+ str[n] = charNum[n % 10];
+ }
+ str[n] = 0;
+ }
+}
+#endif
+
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
+
static void rand_string(char *str, int size) {
str[0] = 0;
if (size > 0) {
//--size;
int n;
- for (n = 0; n < size - 1; n++) {
+ for (n = 0; n < size; n++) {
int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
str[n] = charset[key];
}
@@ -1235,7 +1285,7 @@ static void init_rand_data() {
g_args.metaFile); } while(0)
static int printfInsertMeta() {
- SHOW_PARSE_RESULT_START();
+ SHOW_PARSE_RESULT_START();
printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port);
printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
@@ -1244,9 +1294,12 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
- printf("top insert interval: \033[33m%d\033[0m\n", g_args.insert_interval);
- printf("number of records per req: \033[33m%d\033[0m\n", g_args.num_of_RPR);
- printf("max sql length: \033[33m%d\033[0m\n", g_args.max_sql_len);
+ printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
+ g_args.insert_interval);
+ printf("number of records per req: \033[33m%"PRIu64"\033[0m\n",
+ g_args.num_of_RPR);
+ printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
+ g_args.max_sql_len);
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
@@ -1297,23 +1350,23 @@ static int printfInsertMeta() {
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
- || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
printf(" precision: \033[33m%s\033[0m\n",
- g_Dbs.db[i].dbCfg.precision);
+ g_Dbs.db[i].dbCfg.precision);
} else {
printf("\033[1m\033[40;31m precision error: %s\033[0m\n",
- g_Dbs.db[i].dbCfg.precision);
+ g_Dbs.db[i].dbCfg.precision);
return -1;
}
}
- printf(" super table count: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTblCount);
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- printf(" super table[\033[33m%d\033[0m]:\n", j);
+ printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTblCount);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].sTblName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
@@ -1331,86 +1384,87 @@ static int printfInsertMeta() {
printf(" childTblExists: \033[33m%s\033[0m\n", "error");
}
- printf(" childTblCount: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblCount);
+ printf(" childTblCount: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblCount);
printf(" childTblPrefix: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblPrefix);
+ g_Dbs.db[i].superTbls[j].childTblPrefix);
printf(" dataSource: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].dataSource);
+ g_Dbs.db[i].superTbls[j].dataSource);
printf(" insertMode: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].insertMode);
+ g_Dbs.db[i].superTbls[j].insertMode);
if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) {
- printf(" childTblLimit: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblLimit);
+ printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblLimit);
}
- if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) {
- printf(" childTblOffset: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblOffset);
+ if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
+ printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblOffset);
}
- printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].insertRows);
-
+ printf(" insertRows: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].insertRows);
+/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
}else {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
- printf(" interlaceRows: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].interlaceRows);
+ */
+ printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- printf(" stable insert interval: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].insertInterval);
+ printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].insertInterval);
}
printf(" disorderRange: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].disorderRange);
+ g_Dbs.db[i].superTbls[j].disorderRange);
printf(" disorderRatio: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].disorderRatio);
- printf(" maxSqlLen: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].maxSqlLen);
- printf(" timeStampStep: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].timeStampStep);
+ g_Dbs.db[i].superTbls[j].disorderRatio);
+ printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].maxSqlLen);
+ printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].timeStampStep);
printf(" startTimestamp: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].startTimestamp);
+ g_Dbs.db[i].superTbls[j].startTimestamp);
printf(" sampleFormat: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sampleFormat);
+ g_Dbs.db[i].superTbls[j].sampleFormat);
printf(" sampleFile: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sampleFile);
+ g_Dbs.db[i].superTbls[j].sampleFile);
printf(" tagsFile: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].tagsFile);
+ g_Dbs.db[i].superTbls[j].tagsFile);
printf(" columnCount: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].columnCount);
+ g_Dbs.db[i].superTbls[j].columnCount);
for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "binary", 6))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "nchar", 5))) {
+ "binary", 6))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "nchar", 5))) {
printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k,
- g_Dbs.db[i].superTbls[j].columns[k].dataType,
- g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ g_Dbs.db[i].superTbls[j].columns[k].dataLen);
} else {
printf("column[%d]:\033[33m%s\033[0m ", k,
- g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ g_Dbs.db[i].superTbls[j].columns[k].dataType);
}
}
printf("\n");
printf(" tagCount: \033[33m%d\033[0m\n ",
- g_Dbs.db[i].superTbls[j].tagCount);
+ g_Dbs.db[i].superTbls[j].tagCount);
for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "binary", strlen("binary")))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "nchar", strlen("nchar")))) {
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "nchar", strlen("nchar")))) {
printf("tag[%d]:\033[33m%s(%d)\033[0m ", k,
- g_Dbs.db[i].superTbls[j].tags[k].dataType,
- g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ g_Dbs.db[i].superTbls[j].tags[k].dataLen);
} else {
printf("tag[%d]:\033[33m%s\033[0m ", k,
- g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ g_Dbs.db[i].superTbls[j].tags[k].dataType);
}
}
printf("\n");
@@ -1433,8 +1487,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %d\n", g_args.num_of_RPR);
- fprintf(fp, "max sql length: %d\n", g_args.max_sql_len);
+ fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR);
+ fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
for (int i = 0; i < g_Dbs.dbCount; i++) {
@@ -1484,14 +1538,14 @@ static void printfInsertMetaToFile(FILE* fp) {
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
- || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision);
} else {
fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision);
}
}
- fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount);
+ fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
fprintf(fp, " super table[%d]:\n", j);
@@ -1513,7 +1567,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " childTblExists: %s\n", "error");
}
- fprintf(fp, " childTblCount: %d\n",
+ fprintf(fp, " childTblCount: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].childTblCount);
fprintf(fp, " childTblPrefix: %s\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
@@ -1521,28 +1575,32 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs.db[i].superTbls[j].dataSource);
fprintf(fp, " insertMode: %s\n",
g_Dbs.db[i].superTbls[j].insertMode);
- fprintf(fp, " insertRows: %"PRId64"\n",
+ fprintf(fp, " insertRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
- fprintf(fp, " interlace rows: %d\n",
+ fprintf(fp, " interlace rows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- fprintf(fp, " stable insert interval: %d\n",
+ fprintf(fp, " stable insert interval: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
-
+/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
fprintf(fp, " multiThreadWriteOneTbl: no\n");
}else {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
- fprintf(fp, " interlaceRows: %d\n",
+ */
+ fprintf(fp, " interlaceRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
- fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
+ fprintf(fp, " maxSqlLen: %"PRIu64"\n",
+ g_Dbs.db[i].superTbls[j].maxSqlLen);
- fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep);
- fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp);
+ fprintf(fp, " timeStampStep: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].timeStampStep);
+ fprintf(fp, " startTimestamp: %s\n",
+ g_Dbs.db[i].superTbls[j].startTimestamp);
fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat);
fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile);
fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile);
@@ -1551,10 +1609,10 @@ static void printfInsertMetaToFile(FILE* fp) {
for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
if ((0 == strncasecmp(
- g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "binary", strlen("binary")))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "nchar", strlen("nchar")))) {
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "nchar", strlen("nchar")))) {
fprintf(fp, "column[%d]:%s(%d) ", k,
g_Dbs.db[i].superTbls[j].columns[k].dataType,
g_Dbs.db[i].superTbls[j].columns[k].dataLen);
@@ -1569,9 +1627,9 @@ static void printfInsertMetaToFile(FILE* fp) {
for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "binary", strlen("binary")))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "nchar", strlen("nchar")))) {
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "nchar", strlen("nchar")))) {
fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType,
g_Dbs.db[i].superTbls[j].tags[k].dataLen);
} else {
@@ -1591,69 +1649,73 @@ static void printfQueryMeta() {
SHOW_PARSE_RESULT_START();
printf("host: \033[33m%s:%u\033[0m\n",
- g_queryInfo.host, g_queryInfo.port);
+ g_queryInfo.host, g_queryInfo.port);
printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
printf("\n");
- printf("specified table query info: \n");
- printf("query interval: \033[33m%d ms\033[0m\n",
- g_queryInfo.specifiedQueryInfo.queryInterval);
- printf("top query times:\033[33m%d\033[0m\n", g_args.query_times);
- printf("concurrent: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.concurrent);
- printf("sqlCount: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.sqlCount);
- printf("specified tbl query times:\n");
- printf(" \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.queryTimes);
- if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.mode);
- printf("interval: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
- }
+ if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
+ printf("specified table query info: \n");
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
+ printf("specified tbl query times:\n");
+ printf(" \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryTimes);
+ printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryInterval);
+ printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
+ printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n",
- i, g_queryInfo.specifiedQueryInfo.sql[i]);
- }
- printf("\n");
- printf("super table query info:\n");
- printf("query interval: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.queryInterval);
- printf("threadCnt: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.childTblCount);
- printf("stable name: \033[33m%s\033[0m\n",
- g_queryInfo.superQueryInfo.sTblName);
- printf("stb query times:\033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.queryTimes);
+ for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.specifiedQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
- if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.mode);
- printf("interval: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeKeepProgress);
- }
+ printf("super table query info:\n");
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.sqlCount);
- printf("sqlCount: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.sqlCount);
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n",
- i, g_queryInfo.superQueryInfo.sql[i]);
+ if (g_queryInfo.superQueryInfo.sqlCount > 0) {
+ printf("query interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryInterval);
+ printf("threadCnt: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n",
+ g_queryInfo.superQueryInfo.sTblName);
+ printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryTimes);
+
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.superQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
}
- printf("\n");
SHOW_PARSE_RESULT_END();
}
@@ -1691,7 +1753,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
static void xDumpFieldToFile(FILE* fp, const char* val,
- TAOS_FIELD* field, int32_t length, int precision) {
+ TAOS_FIELD* field, int32_t length, int precision) {
if (val == NULL) {
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
@@ -1798,7 +1860,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
while((row = taos_fetch_row(res)) != NULL) {
// sys database name : 'log'
if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) {
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) {
continue;
}
@@ -1809,10 +1871,10 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
}
tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
formatTimestamp(dbInfos[count]->create_time,
- *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
- TSDB_TIME_PRECISION_MILLI);
+ *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
+ TSDB_TIME_PRECISION_MILLI);
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
@@ -1820,7 +1882,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
@@ -1829,19 +1891,19 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
dbInfos[count]->cachelast =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
tstrncpy(dbInfos[count]->precision,
- (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX],
- fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
+ fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
count++;
if (count > MAX_DATABASE_COUNT) {
errorPrint("%s() LN%d, The database count overflow than %d\n",
- __func__, __LINE__, MAX_DATABASE_COUNT);
+ __func__, __LINE__, MAX_DATABASE_COUNT);
break;
}
}
@@ -1850,10 +1912,10 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
}
static void printfDbInfoForQueryToFile(
- char* filename, SDbInfo* dbInfos, int index) {
+ char* filename, SDbInfo* dbInfos, int index) {
if (filename[0] == 0)
- return;
+ return;
FILE *fp = fopen(filename, "at");
if (fp == NULL) {
@@ -1896,18 +1958,18 @@ static void printfQuerySystemInfo(TAOS * taos) {
time(&t);
lt = localtime(&t);
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
- lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
- lt->tm_sec);
+ lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
+ lt->tm_sec);
// show variables
res = taos_query(taos, "show variables;");
- //getResult(res, filename);
+ //appendResultToFile(res, filename);
xDumpResultToFile(filename, res);
// show dnodes
res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res);
- //getResult(res, filename);
+ //appendResultToFile(res, filename);
// show databases
res = taos_query(taos, "show databases;");
@@ -1918,8 +1980,8 @@ static void printfQuerySystemInfo(TAOS * taos) {
}
int dbCount = getDbFromServer(taos, dbInfos);
if (dbCount <= 0) {
- free(dbInfos);
- return;
+ free(dbInfos);
+ return;
}
for (int i = 0; i < dbCount; i++) {
@@ -1942,186 +2004,169 @@ static void printfQuerySystemInfo(TAOS * taos) {
free(dbInfos);
}
-static int postProceSql(char* host, uint16_t port, char* sqlstr)
+static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
+ char* sqlstr, char *resultFile)
{
- char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
+ char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
- char *url = "/rest/sql";
+ char *url = "/rest/sql";
- struct hostent *server;
- struct sockaddr_in serv_addr;
- int bytes, sent, received, req_str_len, resp_len;
- char *request_buf;
- char response_buf[RESP_BUF_LEN];
- uint16_t rest_port = port + TSDB_PORT_HTTP;
+ int bytes, sent, received, req_str_len, resp_len;
+ char *request_buf;
+ char response_buf[RESP_BUF_LEN];
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
- int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN;
+ int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN;
- request_buf = malloc(req_buf_len);
- if (NULL == request_buf) {
- errorPrint("%s", "ERROR, cannot allocate memory.\n");
- exit(EXIT_FAILURE);
- }
+ request_buf = malloc(req_buf_len);
+ if (NULL == request_buf) {
+ errorPrint("%s", "ERROR, cannot allocate memory.\n");
+ exit(EXIT_FAILURE);
+ }
- char userpass_buf[INPUT_BUF_LEN];
- int mod_table[] = {0, 2, 1};
+ char userpass_buf[INPUT_BUF_LEN];
+ int mod_table[] = {0, 2, 1};
- static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
- 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
- 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
- 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
- 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
- 'w', 'x', 'y', 'z', '0', '1', '2', '3',
- '4', '5', '6', '7', '8', '9', '+', '/'};
+ static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
+ 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
+ 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
+ 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
+ 'w', 'x', 'y', 'z', '0', '1', '2', '3',
+ '4', '5', '6', '7', '8', '9', '+', '/'};
- snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
- g_Dbs.user, g_Dbs.password);
- size_t userpass_buf_len = strlen(userpass_buf);
- size_t encoded_len = 4 * ((userpass_buf_len +2) / 3);
+ snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
+ g_Dbs.user, g_Dbs.password);
+ size_t userpass_buf_len = strlen(userpass_buf);
+ size_t encoded_len = 4 * ((userpass_buf_len +2) / 3);
- char base64_buf[INPUT_BUF_LEN];
+ char base64_buf[INPUT_BUF_LEN];
#ifdef WINDOWS
- WSADATA wsaData;
+ WSADATA wsaData;
WSAStartup(MAKEWORD(2, 2), &wsaData);
SOCKET sockfd;
#else
- int sockfd;
+ int sockfd;
#endif
- sockfd = socket(AF_INET, SOCK_STREAM, 0);
- if (sockfd < 0) {
+ sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (sockfd < 0) {
#ifdef WINDOWS
- errorPrint( "Could not create socket : %d" , WSAGetLastError());
+ errorPrint( "Could not create socket : %d" , WSAGetLastError());
#endif
- debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd);
- free(request_buf);
- ERROR_EXIT("ERROR opening socket");
- }
-
- server = gethostbyname(host);
- if (server == NULL) {
- free(request_buf);
- ERROR_EXIT("ERROR, no such host");
- }
-
- debugPrint("h_name: %s\nh_addretype: %s\nh_length: %d\n",
- server->h_name,
- (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
- server->h_length);
-
- memset(&serv_addr, 0, sizeof(serv_addr));
- serv_addr.sin_family = AF_INET;
- serv_addr.sin_port = htons(rest_port);
-#ifdef WINDOWS
- serv_addr.sin_addr.s_addr = inet_addr(host);
-#else
- memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length);
-#endif
-
- int retConn = connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr));
- debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
- if (retConn < 0) {
- free(request_buf);
- ERROR_EXIT("ERROR connecting");
- }
-
- memset(base64_buf, 0, INPUT_BUF_LEN);
-
- for (int n = 0, m = 0; n < userpass_buf_len;) {
- uint32_t oct_a = n < userpass_buf_len ?
- (unsigned char) userpass_buf[n++]:0;
- uint32_t oct_b = n < userpass_buf_len ?
- (unsigned char) userpass_buf[n++]:0;
- uint32_t oct_c = n < userpass_buf_len ?
- (unsigned char) userpass_buf[n++]:0;
- uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c;
-
- base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f];
- base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f];
- base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f];
- base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f];
- }
-
- for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++)
- base64_buf[encoded_len - 1 - l] = '=';
-
- debugPrint("%s() LN%d: auth string base64 encoded: %s\n",
- __func__, __LINE__, base64_buf);
- char *auth = base64_buf;
-
- int r = snprintf(request_buf,
- req_buf_len,
- req_fmt, url, host, rest_port,
- auth, strlen(sqlstr), sqlstr);
- if (r >= req_buf_len) {
- free(request_buf);
- ERROR_EXIT("ERROR too long request");
- }
- verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf);
-
- req_str_len = strlen(request_buf);
- sent = 0;
- do {
-#ifdef WINDOWS
- bytes = send(sockfd, request_buf + sent, req_str_len - sent, 0);
-#else
- bytes = write(sockfd, request_buf + sent, req_str_len - sent);
-#endif
- if (bytes < 0)
- ERROR_EXIT("ERROR writing message to socket");
- if (bytes == 0)
- break;
- sent+=bytes;
- } while(sent < req_str_len);
-
- memset(response_buf, 0, RESP_BUF_LEN);
- resp_len = sizeof(response_buf) - 1;
- received = 0;
- do {
-#ifdef WINDOWS
- bytes = recv(sockfd, response_buf + received, resp_len - received, 0);
-#else
- bytes = read(sockfd, response_buf + received, resp_len - received);
-#endif
- if (bytes < 0) {
- free(request_buf);
- ERROR_EXIT("ERROR reading response from socket");
+ debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd);
+ free(request_buf);
+ ERROR_EXIT("ERROR opening socket");
}
- if (bytes == 0)
- break;
- received += bytes;
- } while(received < resp_len);
- if (received == resp_len) {
- free(request_buf);
- ERROR_EXIT("ERROR storing complete response from socket");
- }
+ int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr));
+ debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
+ if (retConn < 0) {
+ free(request_buf);
+ ERROR_EXIT("ERROR connecting");
+ }
- response_buf[RESP_BUF_LEN - 1] = '\0';
- printf("Response:\n%s\n", response_buf);
+ memset(base64_buf, 0, INPUT_BUF_LEN);
- free(request_buf);
+ for (int n = 0, m = 0; n < userpass_buf_len;) {
+ uint32_t oct_a = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t oct_b = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t oct_c = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c;
+
+ base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f];
+ }
+
+ for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++)
+ base64_buf[encoded_len - 1 - l] = '=';
+
+ debugPrint("%s() LN%d: auth string base64 encoded: %s\n",
+ __func__, __LINE__, base64_buf);
+ char *auth = base64_buf;
+
+ int r = snprintf(request_buf,
+ req_buf_len,
+ req_fmt, url, host, rest_port,
+ auth, strlen(sqlstr), sqlstr);
+ if (r >= req_buf_len) {
+ free(request_buf);
+ ERROR_EXIT("ERROR too long request");
+ }
+ verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf);
+
+ req_str_len = strlen(request_buf);
+ sent = 0;
+ do {
#ifdef WINDOWS
- closesocket(sockfd);
+ bytes = send(sockfd, request_buf + sent, req_str_len - sent, 0);
+#else
+ bytes = write(sockfd, request_buf + sent, req_str_len - sent);
+#endif
+ if (bytes < 0)
+ ERROR_EXIT("ERROR writing message to socket");
+ if (bytes == 0)
+ break;
+ sent+=bytes;
+ } while(sent < req_str_len);
+
+ memset(response_buf, 0, RESP_BUF_LEN);
+ resp_len = sizeof(response_buf) - 1;
+ received = 0;
+ do {
+#ifdef WINDOWS
+ bytes = recv(sockfd, response_buf + received, resp_len - received, 0);
+#else
+ bytes = read(sockfd, response_buf + received, resp_len - received);
+#endif
+ if (bytes < 0) {
+ free(request_buf);
+ ERROR_EXIT("ERROR reading response from socket");
+ }
+ if (bytes == 0)
+ break;
+ received += bytes;
+ } while(received < resp_len);
+
+ if (received == resp_len) {
+ free(request_buf);
+ ERROR_EXIT("ERROR storing complete response from socket");
+ }
+
+ response_buf[RESP_BUF_LEN - 1] = '\0';
+ printf("Response:\n%s\n", response_buf);
+
+ if (resultFile) {
+ appendResultBufToFile(response_buf, resultFile);
+ }
+
+ free(request_buf);
+#ifdef WINDOWS
+ closesocket(sockfd);
WSACleanup();
#else
- close(sockfd);
+ close(sockfd);
#endif
- return 0;
+ return 0;
}
static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
if (NULL == dataBuf) {
errorPrint("%s() LN%d, calloc failed! size:%d\n",
- __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
+ __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
return NULL;
}
int dataLen = 0;
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
+ "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
return dataBuf;
}
@@ -2137,10 +2182,10 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(");
for (int i = 0; i < stbInfo->tagCount; i++) {
if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", strlen("binary")))
- || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", strlen("nchar")))) {
+ || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", strlen("nchar")))) {
if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) {
printf("binary or nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
+ (uint32_t)TSDB_MAX_BINARY_LEN);
tmfree(dataBuf);
return NULL;
}
@@ -2160,40 +2205,40 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) {
}
//rand_string(buf, stbInfo->tags[i].dataLen);
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "\'%s\', ", buf);
+ "\'%s\', ", buf);
tmfree(buf);
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "int", strlen("int"))) {
+ "int", strlen("int"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", tableSeq);
+ "%d, ", tableSeq);
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "bigint", strlen("bigint"))) {
+ "bigint", strlen("bigint"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64", ", rand_bigint());
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "float", strlen("float"))) {
+ "float", strlen("float"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%f, ", rand_float());
+ "%f, ", rand_float());
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "double", strlen("double"))) {
+ "double", strlen("double"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%f, ", rand_double());
+ "%f, ", rand_double());
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "smallint", strlen("smallint"))) {
+ "smallint", strlen("smallint"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", rand_smallint());
+ "%d, ", rand_smallint());
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "tinyint", strlen("tinyint"))) {
+ "tinyint", strlen("tinyint"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", rand_tinyint());
+ "%d, ", rand_tinyint());
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "bool", strlen("bool"))) {
+ "bool", strlen("bool"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", rand_bool());
+ "%d, ", rand_bool());
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "timestamp", strlen("timestamp"))) {
+ "timestamp", strlen("timestamp"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64", ", rand_bigint());
} else {
printf("No support data type: %s\n", stbInfo->tags[i].dataType);
tmfree(dataBuf);
@@ -2277,8 +2322,8 @@ static int calcRowLen(SSuperTable* superTbls) {
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
- char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
- int* childTblCountOfSuperTbl, int limit, int offset) {
+ char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
+ uint64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char limitBuf[100] = "\0";
@@ -2289,12 +2334,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* childTblName = *childTblNameOfSuperTbl;
if (offset >= 0) {
- snprintf(limitBuf, 100, " limit %d offset %d", limit, offset);
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
+ limit, offset);
}
//get all child table name use cmd: select tbname from superTblName;
snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s",
- dbName, sTblName, limitBuf);
+ dbName, sTblName, limitBuf);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
@@ -2302,7 +2348,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
taos_free_result(res);
taos_close(taos);
errorPrint("%s() LN%d, failed to run command %s\n",
- __func__, __LINE__, command);
+ __func__, __LINE__, command);
exit(-1);
}
@@ -2311,10 +2357,10 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
if (childTblName == NULL) {
childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (NULL == childTblName) {
- taos_free_result(res);
- taos_close(taos);
- errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
- exit(-1);
+ taos_free_result(res);
+ taos_close(taos);
+ errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ exit(-1);
}
}
@@ -2326,16 +2372,16 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
count++;
if (count >= childTblCount - 1) {
char *tmp = realloc(childTblName,
- (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
+ (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
if (tmp != NULL) {
childTblName = tmp;
childTblCount = (int)(childTblCount*1.5);
memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0,
- (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
+ (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
} else {
// exit, if allocate more memory failed
errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n",
- __func__, __LINE__, dbName, sTblName);
+ __func__, __LINE__, dbName, sTblName);
tmfree(childTblName);
taos_free_result(res);
taos_close(taos);
@@ -2353,16 +2399,16 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
}
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
- char* sTblName, char** childTblNameOfSuperTbl,
- int* childTblCountOfSuperTbl) {
+ char* sTblName, char** childTblNameOfSuperTbl,
+ uint64_t* childTblCountOfSuperTbl) {
- return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
- childTblNameOfSuperTbl, childTblCountOfSuperTbl,
- -1, -1);
+ return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
+ childTblNameOfSuperTbl, childTblCountOfSuperTbl,
+ -1, 0);
}
static int getSuperTableFromServer(TAOS * taos, char* dbName,
- SSuperTable* superTbls) {
+ SSuperTable* superTbls) {
char command[BUFFER_SIZE] = "\0";
TAOS_RES * res;
@@ -2390,29 +2436,29 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
tstrncpy(superTbls->tags[tagIndex].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
tstrncpy(superTbls->tags[tagIndex].dataType,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
superTbls->tags[tagIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->tags[tagIndex].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
tagIndex++;
} else {
tstrncpy(superTbls->columns[columnIndex].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
tstrncpy(superTbls->columns[columnIndex].dataType,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
superTbls->columns[columnIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->columns[columnIndex].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
columnIndex++;
}
count++;
@@ -2443,8 +2489,8 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
}
static int createSuperTable(
- TAOS * taos, char* dbName,
- SSuperTable* superTbl) {
+ TAOS * taos, char* dbName,
+ SSuperTable* superTbl) {
char command[BUFFER_SIZE] = "\0";
@@ -2456,7 +2502,7 @@ static int createSuperTable(
if (superTbl->columnCount == 0) {
errorPrint("%s() LN%d, super table column count is %d\n",
- __func__, __LINE__, superTbl->columnCount);
+ __func__, __LINE__, superTbl->columnCount);
return -1;
}
@@ -2465,13 +2511,13 @@ static int createSuperTable(
if (strcasecmp(dataType, "BINARY") == 0) {
len += snprintf(cols + len, STRING_LEN - len,
- ", col%d %s(%d)", colIndex, "BINARY",
- superTbl->columns[colIndex].dataLen);
+ ", col%d %s(%d)", colIndex, "BINARY",
+ superTbl->columns[colIndex].dataLen);
lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
} else if (strcasecmp(dataType, "NCHAR") == 0) {
len += snprintf(cols + len, STRING_LEN - len,
- ", col%d %s(%d)", colIndex, "NCHAR",
- superTbl->columns[colIndex].dataLen);
+ ", col%d %s(%d)", colIndex, "NCHAR",
+ superTbl->columns[colIndex].dataLen);
lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
} else if (strcasecmp(dataType, "INT") == 0) {
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT");
@@ -2500,7 +2546,7 @@ static int createSuperTable(
} else {
taos_close(taos);
errorPrint("%s() LN%d, config error data type : %s\n",
- __func__, __LINE__, dataType);
+ __func__, __LINE__, dataType);
exit(-1);
}
}
@@ -2512,18 +2558,18 @@ static int createSuperTable(
superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1);
if (NULL == superTbl->colsOfCreateChildTable) {
errorPrint("%s() LN%d, Failed when calloc, size:%d",
- __func__, __LINE__, len+1);
+ __func__, __LINE__, len+1);
taos_close(taos);
exit(-1);
}
snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
verbosePrint("%s() LN%d: %s\n",
- __func__, __LINE__, superTbl->colsOfCreateChildTable);
+ __func__, __LINE__, superTbl->colsOfCreateChildTable);
if (superTbl->tagCount == 0) {
errorPrint("%s() LN%d, super table tag count is %d\n",
- __func__, __LINE__, superTbl->tagCount);
+ __func__, __LINE__, superTbl->tagCount);
return -1;
}
@@ -2538,44 +2584,44 @@ static int createSuperTable(
if (strcasecmp(dataType, "BINARY") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
- "BINARY", superTbl->tags[tagIndex].dataLen);
+ "BINARY", superTbl->tags[tagIndex].dataLen);
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
} else if (strcasecmp(dataType, "NCHAR") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
- "NCHAR", superTbl->tags[tagIndex].dataLen);
+ "NCHAR", superTbl->tags[tagIndex].dataLen);
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
} else if (strcasecmp(dataType, "INT") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "INT");
+ "INT");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
} else if (strcasecmp(dataType, "BIGINT") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "BIGINT");
+ "BIGINT");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21;
} else if (strcasecmp(dataType, "SMALLINT") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "SMALLINT");
+ "SMALLINT");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
} else if (strcasecmp(dataType, "TINYINT") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "TINYINT");
+ "TINYINT");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4;
} else if (strcasecmp(dataType, "BOOL") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "BOOL");
+ "BOOL");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
} else if (strcasecmp(dataType, "FLOAT") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "FLOAT");
+ "FLOAT");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22;
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "DOUBLE");
+ "DOUBLE");
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42;
} else {
taos_close(taos);
errorPrint("%s() LN%d, config error tag type : %s\n",
- __func__, __LINE__, dataType);
+ __func__, __LINE__, dataType);
exit(-1);
}
}
@@ -2586,14 +2632,14 @@ static int createSuperTable(
superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
snprintf(command, BUFFER_SIZE,
- "create table if not exists %s.%s (ts timestamp%s) tags %s",
- dbName, superTbl->sTblName, cols, tags);
+ "create table if not exists %s.%s (ts timestamp%s) tags %s",
+ dbName, superTbl->sTblName, cols, tags);
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
- errorPrint( "create supertable %s failed!\n\n",
- superTbl->sTblName);
- return -1;
+ errorPrint( "create supertable %s failed!\n\n",
+ superTbl->sTblName);
+ return -1;
}
debugPrint("create supertable %s success!\n\n", superTbl->sTblName);
return 0;
@@ -2620,35 +2666,35 @@ static int createDatabasesAndStables() {
int dataLen = 0;
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName);
+ BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName);
if (g_Dbs.db[i].dbCfg.blocks > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks);
+ BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks);
}
if (g_Dbs.db[i].dbCfg.cache > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache);
+ BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache);
}
if (g_Dbs.db[i].dbCfg.days > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days);
+ BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days);
}
if (g_Dbs.db[i].dbCfg.keep > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep);
+ BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep);
}
if (g_Dbs.db[i].dbCfg.quorum > 1) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum);
+ BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum);
}
if (g_Dbs.db[i].dbCfg.replica > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica);
+ BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica);
}
if (g_Dbs.db[i].dbCfg.update > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update);
+ BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update);
}
//if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
// dataLen += snprintf(command + dataLen,
@@ -2656,33 +2702,33 @@ static int createDatabasesAndStables() {
//}
if (g_Dbs.db[i].dbCfg.minRows > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows);
+ BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows);
}
if (g_Dbs.db[i].dbCfg.maxRows > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows);
+ BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows);
}
if (g_Dbs.db[i].dbCfg.comp > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp);
+ BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp);
}
if (g_Dbs.db[i].dbCfg.walLevel > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel);
+ BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel);
}
if (g_Dbs.db[i].dbCfg.cacheLast > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast);
+ BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast);
}
if (g_Dbs.db[i].dbCfg.fsync > 0) {
dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
- " fsync %d", g_Dbs.db[i].dbCfg.fsync);
+ " fsync %d", g_Dbs.db[i].dbCfg.fsync);
}
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms")))
- || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
- "us", strlen("us")))) {
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
+ "us", strlen("us")))) {
dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
- " precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
+ " precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
}
debugPrint("%s() %d command: %s\n", __func__, __LINE__, command);
@@ -2694,8 +2740,8 @@ static int createDatabasesAndStables() {
printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
}
- debugPrint("%s() %d supertbl count:%d\n",
- __func__, __LINE__, g_Dbs.db[i].superTblCount);
+ debugPrint("%s() LN%d supertbl count:%"PRIu64"\n",
+ __func__, __LINE__, g_Dbs.db[i].superTblCount);
int validStbCount = 0;
@@ -2708,7 +2754,7 @@ static int createDatabasesAndStables() {
if ((ret != 0) || (g_Dbs.db[i].drop)) {
ret = createSuperTable(taos, g_Dbs.db[i].dbName,
- &g_Dbs.db[i].superTbls[j]);
+ &g_Dbs.db[i].superTbls[j]);
if (0 != ret) {
errorPrint("create super table %d failed!\n\n", j);
@@ -2717,10 +2763,10 @@ static int createDatabasesAndStables() {
}
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
- &g_Dbs.db[i].superTbls[j]);
+ &g_Dbs.db[i].superTbls[j]);
if (0 != ret) {
errorPrint("\nget super table %s.%s info failed!\n\n",
- g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
continue;
}
@@ -2753,21 +2799,22 @@ static void* createTable(void *sarg)
int len = 0;
int batchNum = 0;
- verbosePrint("%s() LN%d: Creating table from %d to %d\n",
- __func__, __LINE__,
- pThreadInfo->start_table_from, pThreadInfo->end_table_to);
+ verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
+ __func__, __LINE__,
+ pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(buffer, buff_len,
- "create table if not exists %s.%s%d %s;",
- pThreadInfo->db_name,
- g_args.tb_prefix, i,
- pThreadInfo->cols);
+ "create table if not exists %s.%s%"PRIu64" %s;",
+ pThreadInfo->db_name,
+ g_args.tb_prefix, i,
+ pThreadInfo->cols);
} else {
if (superTblInfo == NULL) {
errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
- __func__, __LINE__);
+ __func__, __LINE__);
free(buffer);
exit(-1);
} else {
@@ -2775,31 +2822,31 @@ static void* createTable(void *sarg)
batchNum = 0;
memset(buffer, 0, buff_len);
len += snprintf(buffer + len,
- buff_len - len, "create table ");
+ buff_len - len, "create table ");
}
char* tagsValBuf = NULL;
if (0 == superTblInfo->tagSource) {
tagsValBuf = generateTagVaulesForStb(superTblInfo, i);
} else {
tagsValBuf = getTagValueFromTagSample(
- superTblInfo,
- i % superTblInfo->tagSampleCount);
+ superTblInfo,
+ i % superTblInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
free(buffer);
return NULL;
}
len += snprintf(buffer + len,
- buff_len - len,
- "if not exists %s.%s%d using %s.%s tags %s ",
- pThreadInfo->db_name, superTblInfo->childTblPrefix,
- i, pThreadInfo->db_name,
- superTblInfo->sTblName, tagsValBuf);
+ buff_len - len,
+ "if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
+ pThreadInfo->db_name, superTblInfo->childTblPrefix,
+ i, pThreadInfo->db_name,
+ superTblInfo->sTblName, tagsValBuf);
free(tagsValBuf);
batchNum++;
if ((batchNum < superTblInfo->batchCreateTableNum)
- && ((buff_len - len)
- >= (superTblInfo->lenOfTagOfOneRow + 256))) {
+ && ((buff_len - len)
+ >= (superTblInfo->lenOfTagOfOneRow + 256))) {
continue;
}
}
@@ -2815,8 +2862,8 @@ static void* createTable(void *sarg)
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] already create %d - %d tables\n",
- pThreadInfo->threadID, pThreadInfo->start_table_from, i);
+ printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
+ pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
}
}
@@ -2833,8 +2880,8 @@ static void* createTable(void *sarg)
}
static int startMultiThreadCreateChildTable(
- char* cols, int threads, int startFrom, int ntables,
- char* db_name, SSuperTable* superTblInfo) {
+ char* cols, int threads, uint64_t startFrom, uint64_t ntables,
+ char* db_name, SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
threadInfo *infos = malloc(threads * sizeof(threadInfo));
@@ -2848,30 +2895,30 @@ static int startMultiThreadCreateChildTable(
threads = 1;
}
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
b = ntables % threads;
- for (int i = 0; i < threads; i++) {
+ for (int64_t i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
t_info->superTblInfo = superTblInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
t_info->taos = taos_connect(
- g_Dbs.host,
- g_Dbs.user,
- g_Dbs.password,
- db_name,
- g_Dbs.port);
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ db_name,
+ g_Dbs.port);
if (t_info->taos == NULL) {
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
- __func__, __LINE__, taos_errstr(NULL));
+ __func__, __LINE__, taos_errstr(NULL));
free(pids);
free(infos);
return -1;
@@ -2883,7 +2930,7 @@ static int startMultiThreadCreateChildTable(
startFrom = t_info->end_table_to + 1;
t_info->use_metric = true;
t_info->cols = cols;
- t_info->minDelay = INT16_MAX;
+ t_info->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info);
}
@@ -2903,62 +2950,62 @@ static int startMultiThreadCreateChildTable(
}
static void createChildTables() {
- char tblColsBuf[MAX_SQL_SIZE];
- int len;
+ char tblColsBuf[MAX_SQL_SIZE];
+ int len;
for (int i = 0; i < g_Dbs.dbCount; i++) {
if (g_Dbs.use_metric) {
if (g_Dbs.db[i].superTblCount > 0) {
- // with super table
+ // with super table
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable)
- || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
+ || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
continue;
}
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
- g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
+ g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
int startFrom = 0;
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
verbosePrint("%s() LN%d: create %d child tables from %d\n",
- __func__, __LINE__, g_totalChildTables, startFrom);
+ __func__, __LINE__, g_totalChildTables, startFrom);
startMultiThreadCreateChildTable(
- g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
- g_Dbs.threadCountByCreateTbl,
- startFrom,
- g_Dbs.db[i].superTbls[j].childTblCount,
- g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
+ g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
+ g_Dbs.threadCountByCreateTbl,
+ startFrom,
+ g_Dbs.db[i].superTbls[j].childTblCount,
+ g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
}
}
} else {
// normal table
len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP");
for (int j = 0; j < g_args.num_of_CPR; j++) {
- if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
- || (strncasecmp(g_args.datatype[j],
- "NCHAR", strlen("NCHAR")) == 0)) {
- snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
- ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary);
- } else {
- snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
- ", COL%d %s", j, g_args.datatype[j]);
- }
- len = strlen(tblColsBuf);
+ if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
+ || (strncasecmp(g_args.datatype[j],
+ "NCHAR", strlen("NCHAR")) == 0)) {
+ snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
+ ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary);
+ } else {
+ snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
+ ", COL%d %s", j, g_args.datatype[j]);
+ }
+ len = strlen(tblColsBuf);
}
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
- verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n",
- __func__, __LINE__,
- g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
+ verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRIu64" schema: %s\n",
+ __func__, __LINE__,
+ g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
startMultiThreadCreateChildTable(
- tblColsBuf,
- g_Dbs.threadCountByCreateTbl,
- 0,
- g_args.num_of_tables,
- g_Dbs.db[i].dbName,
- NULL);
+ tblColsBuf,
+ g_Dbs.threadCountByCreateTbl,
+ 0,
+ g_args.num_of_tables,
+ g_Dbs.db[i].dbName,
+ NULL);
}
}
}
@@ -2974,7 +3021,7 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
FILE *fp = fopen(superTblInfo->tagsFile, "r");
if (fp == NULL) {
printf("Failed to open tags file: %s, reason:%s\n",
- superTblInfo->tagsFile, strerror(errno));
+ superTblInfo->tagsFile, strerror(errno));
return -1;
}
@@ -3006,12 +3053,12 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
if (count >= tagCount - 1) {
char *tmp = realloc(tagDataBuf,
- (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
+ (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
if (tmp != NULL) {
tagDataBuf = tmp;
tagCount = (int)(tagCount*1.5);
memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow,
- 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
+ 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
} else {
// exit, if allocate more memory failed
printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile);
@@ -3040,7 +3087,7 @@ int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/
static int readSampleFromCsvFileToMem(
- SSuperTable* superTblInfo) {
+ SSuperTable* superTblInfo) {
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
@@ -3048,20 +3095,20 @@ static int readSampleFromCsvFileToMem(
FILE* fp = fopen(superTblInfo->sampleFile, "r");
if (fp == NULL) {
- errorPrint( "Failed to open sample file: %s, reason:%s\n",
- superTblInfo->sampleFile, strerror(errno));
- return -1;
+ errorPrint( "Failed to open sample file: %s, reason:%s\n",
+ superTblInfo->sampleFile, strerror(errno));
+ return -1;
}
assert(superTblInfo->sampleDataBuf);
memset(superTblInfo->sampleDataBuf, 0,
- MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow);
+ MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow);
while(1) {
readLen = tgetline(&line, &n, fp);
if (-1 == readLen) {
if(0 != fseek(fp, 0, SEEK_SET)) {
errorPrint( "Failed to fseek file: %s, reason:%s\n",
- superTblInfo->sampleFile, strerror(errno));
+ superTblInfo->sampleFile, strerror(errno));
fclose(fp);
return -1;
}
@@ -3077,13 +3124,13 @@ static int readSampleFromCsvFileToMem(
}
if (readLen > superTblInfo->lenOfOneRow) {
- printf("sample row len[%d] overflow define schema len[%d], so discard this row\n",
- (int32_t)readLen, superTblInfo->lenOfOneRow);
+ printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
+ (int32_t)readLen, superTblInfo->lenOfOneRow);
continue;
}
memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow,
- line, readLen);
+ line, readLen);
getRows++;
if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
@@ -3097,7 +3144,7 @@ static int readSampleFromCsvFileToMem(
}
static bool getColumnAndTagTypeFromInsertJsonFile(
- cJSON* stbInfo, SSuperTable* superTbls) {
+ cJSON* stbInfo, SSuperTable* superTbls) {
bool ret = false;
// columns
@@ -3114,7 +3161,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
int columnSize = cJSON_GetArraySize(columns);
if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) {
errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, MAX_COLUMN_COUNT);
goto PARSE_OVER;
}
@@ -3133,7 +3180,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
errorPrint("%s() LN%d, failed to read json, column count not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
count = 1;
@@ -3145,7 +3192,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
errorPrint("%s() LN%d: failed to read json, column type not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
@@ -3156,7 +3203,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
debugPrint("%s() LN%d: failed to read json, column len not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
columnCase.dataLen = 8;
@@ -3164,7 +3211,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->columns[index].dataType,
- columnCase.dataType, MAX_TB_NAME_SIZE);
+ columnCase.dataType, MAX_TB_NAME_SIZE);
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
@@ -3172,7 +3219,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) {
errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, MAX_COLUMN_COUNT);
goto PARSE_OVER;
}
@@ -3184,14 +3231,14 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
if (!tags || tags->type != cJSON_Array) {
errorPrint("%s() LN%d, failed to read json, tags not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
int tagSize = cJSON_GetArraySize(tags);
if (tagSize > MAX_TAG_COUNT) {
errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, MAX_TAG_COUNT);
+ __func__, __LINE__, MAX_TAG_COUNT);
goto PARSE_OVER;
}
@@ -3217,7 +3264,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
errorPrint("%s() LN%d, failed to read json, tag type not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
@@ -3227,7 +3274,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
errorPrint("%s() LN%d, failed to read json, column len not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
columnCase.dataLen = 0;
@@ -3235,7 +3282,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
- MAX_TB_NAME_SIZE);
+ MAX_TB_NAME_SIZE);
superTbls->tags[index].dataLen = columnCase.dataLen;
index++;
}
@@ -3243,7 +3290,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (index > MAX_TAG_COUNT) {
errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, MAX_TAG_COUNT);
+ __func__, __LINE__, MAX_TAG_COUNT);
goto PARSE_OVER;
}
@@ -3251,12 +3298,12 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) {
errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, MAX_COLUMN_COUNT);
goto PARSE_OVER;
}
ret = true;
- PARSE_OVER:
+PARSE_OVER:
return ret;
}
@@ -3320,34 +3367,45 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (threads2 && threads2->type == cJSON_Number) {
g_Dbs.threadCountByCreateTbl = threads2->valueint;
} else if (!threads2) {
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCountByCreateTbl = 1;
} else {
errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
+ if (gInsertInterval->valueint <0) {
+ errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
} else {
errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
- printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n",
- g_args.interlace_rows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %d\n\n",
- g_args.num_of_RPR);
+ printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
+ g_args.interlace_rows, g_args.num_of_RPR);
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
+ g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
g_args.interlace_rows = g_args.num_of_RPR;
@@ -3356,36 +3414,48 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len");
if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
+ if (maxSqlLen->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.max_sql_len = maxSqlLen->valueint;
} else if (!maxSqlLen) {
- g_args.max_sql_len = 1024000;
+ g_args.max_sql_len = (1024*1024);
} else {
errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req");
if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
+ if (numRecPerReq->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) {
+ numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
+ }
g_args.num_of_RPR = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = INT32_MAX;
+ g_args.num_of_RPR = MAX_RECORDS_PER_REQ;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
if (answerPrompt
- && answerPrompt->type == cJSON_String
- && answerPrompt->valuestring != NULL) {
+ && answerPrompt->type == cJSON_String
+ && answerPrompt->valuestring != NULL) {
if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
g_args.answer_yes = false;
} else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
@@ -3409,8 +3479,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
int dbSize = cJSON_GetArraySize(dbs);
if (dbSize > MAX_DB_COUNT) {
errorPrint(
- "ERROR: failed to read json, databases size overflow, max database is %d\n",
- MAX_DB_COUNT);
+ "ERROR: failed to read json, databases size overflow, max database is %d\n",
+ MAX_DB_COUNT);
goto PARSE_OVER;
}
@@ -3444,15 +3514,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].drop = g_args.drop_database;
} else {
errorPrint("%s() LN%d, failed to read json, drop input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision");
if (precision && precision->type == cJSON_String
- && precision->valuestring != NULL) {
+ && precision->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
- MAX_DB_NAME_SIZE);
+ MAX_DB_NAME_SIZE);
} else if (!precision) {
//tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
@@ -3487,8 +3557,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!keep) {
g_Dbs.db[i].dbCfg.keep = -1;
} else {
- printf("ERROR: failed to read json, keep not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, keep not found\n");
+ goto PARSE_OVER;
}
cJSON* days = cJSON_GetObjectItem(dbinfo, "days");
@@ -3497,8 +3567,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!days) {
g_Dbs.db[i].dbCfg.days = -1;
} else {
- printf("ERROR: failed to read json, days not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, days not found\n");
+ goto PARSE_OVER;
}
cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache");
@@ -3507,8 +3577,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cache) {
g_Dbs.db[i].dbCfg.cache = -1;
} else {
- printf("ERROR: failed to read json, cache not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, cache not found\n");
+ goto PARSE_OVER;
}
cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks");
@@ -3517,8 +3587,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!blocks) {
g_Dbs.db[i].dbCfg.blocks = -1;
} else {
- printf("ERROR: failed to read json, block not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, block not found\n");
+ goto PARSE_OVER;
}
//cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode");
@@ -3535,20 +3605,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (minRows && minRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
} else if (!minRows) {
- g_Dbs.db[i].dbCfg.minRows = -1;
+ g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, minRows not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, minRows not found\n");
+ goto PARSE_OVER;
}
cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows");
if (maxRows && maxRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
} else if (!maxRows) {
- g_Dbs.db[i].dbCfg.maxRows = -1;
+ g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, maxRows not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, maxRows not found\n");
+ goto PARSE_OVER;
}
cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp");
@@ -3557,8 +3627,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!comp) {
g_Dbs.db[i].dbCfg.comp = -1;
} else {
- printf("ERROR: failed to read json, comp not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, comp not found\n");
+ goto PARSE_OVER;
}
cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel");
@@ -3567,8 +3637,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!walLevel) {
g_Dbs.db[i].dbCfg.walLevel = -1;
} else {
- printf("ERROR: failed to read json, walLevel not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, walLevel not found\n");
+ goto PARSE_OVER;
}
cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast");
@@ -3577,8 +3647,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cacheLast) {
g_Dbs.db[i].dbCfg.cacheLast = -1;
} else {
- printf("ERROR: failed to read json, cacheLast not found\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, cacheLast not found\n");
+ goto PARSE_OVER;
}
cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum");
@@ -3587,8 +3657,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!quorum) {
g_Dbs.db[i].dbCfg.quorum = 1;
} else {
- printf("failed to read json, quorum input mistake");
- goto PARSE_OVER;
+ printf("failed to read json, quorum input mistake");
+ goto PARSE_OVER;
}
cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync");
@@ -3598,7 +3668,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].dbCfg.fsync = -1;
} else {
errorPrint("%s() LN%d, failed to read json, fsync input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3606,15 +3676,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
if (!stables || stables->type != cJSON_Array) {
errorPrint("%s() LN%d, failed to read json, super_tables not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
int stbSize = cJSON_GetArraySize(stables);
if (stbSize > MAX_SUPER_TABLE_COUNT) {
errorPrint(
- "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n",
- __func__, __LINE__, MAX_SUPER_TABLE_COUNT);
+ "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n",
+ __func__, __LINE__, MAX_SUPER_TABLE_COUNT);
goto PARSE_OVER;
}
@@ -3627,7 +3697,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) {
errorPrint("%s() LN%d, failed to read json, stb name not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE);
@@ -3641,8 +3711,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null
if (autoCreateTbl
- && autoCreateTbl->type == cJSON_String
- && autoCreateTbl->valuestring != NULL) {
+ && autoCreateTbl->type == cJSON_String
+ && autoCreateTbl->valuestring != NULL) {
if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) {
g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL;
} else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) {
@@ -3657,7 +3727,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- /*
cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
@@ -3667,17 +3736,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER;
}
- */
cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
if (childTblExists
- && childTblExists->type == cJSON_String
- && childTblExists->valuestring != NULL) {
+ && childTblExists->type == cJSON_String
+ && childTblExists->valuestring != NULL) {
if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3))
&& (g_Dbs.db[i].drop == false)) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
} else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2)
- || (g_Dbs.db[i].drop == true))) {
+ || (g_Dbs.db[i].drop == true))) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
@@ -3686,36 +3754,36 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count not found\n",
- __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
if (dataSource && dataSource->type == cJSON_String
- && dataSource->valuestring != NULL) {
+ && dataSource->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
- dataSource->valuestring, MAX_DB_NAME_SIZE);
+ dataSource->valuestring, MAX_DB_NAME_SIZE);
} else if (!dataSource) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
} else {
errorPrint("%s() LN%d, failed to read json, data_source not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
- cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful
+ cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest
if (insertMode && insertMode->type == cJSON_String
- && insertMode->valuestring != NULL) {
+ && insertMode->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].insertMode,
- insertMode->valuestring, MAX_DB_NAME_SIZE);
+ insertMode->valuestring, MAX_DB_NAME_SIZE);
} else if (!insertMode) {
tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE);
} else {
@@ -3727,8 +3795,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_limit->type != cJSON_Number) {
- printf("ERROR: failed to read json, childtable_limit\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, childtable_limit\n");
+ goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
} else {
@@ -3739,8 +3807,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if ((childTbl_offset) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) {
- printf("ERROR: failed to read json, childtable_offset\n");
- goto PARSE_OVER;
+ printf("ERROR: failed to read json, childtable_offset\n");
+ goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint;
} else {
@@ -3750,10 +3818,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
- ts->valuestring, MAX_DB_NAME_SIZE);
+ ts->valuestring, MAX_DB_NAME_SIZE);
} else if (!ts) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
- "now", MAX_DB_NAME_SIZE);
+ "now", MAX_DB_NAME_SIZE);
} else {
printf("ERROR: failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
@@ -3771,9 +3839,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format");
if (sampleFormat && sampleFormat->type
- == cJSON_String && sampleFormat->valuestring != NULL) {
+ == cJSON_String && sampleFormat->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
- sampleFormat->valuestring, MAX_DB_NAME_SIZE);
+ sampleFormat->valuestring, MAX_DB_NAME_SIZE);
} else if (!sampleFormat) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
} else {
@@ -3785,7 +3853,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (sampleFile && sampleFile->type == cJSON_String
&& sampleFile->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile,
- sampleFile->valuestring, MAX_FILE_NAME_LEN);
+ sampleFile->valuestring, MAX_FILE_NAME_LEN);
} else if (!sampleFile) {
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN);
} else {
@@ -3796,7 +3864,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file");
if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile,
- tagsFile->valuestring, MAX_FILE_NAME_LEN);
+ tagsFile->valuestring, MAX_FILE_NAME_LEN);
if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) {
g_Dbs.db[i].superTbls[j].tagSource = 0;
} else {
@@ -3823,7 +3891,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
} else {
errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
/*
@@ -3846,13 +3914,18 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
*/
cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
- printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n",
- i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %d\n\n",
- g_args.num_of_RPR);
+ printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
+ i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
+ g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR;
@@ -3861,8 +3934,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint(
- "%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ "%s() LN%d, failed to read json, interlace rows input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3894,30 +3967,40 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
+ if (insertRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
} else {
errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval");
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
+ if (insertInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
} else if (!insertInterval) {
- verbosePrint("%s() LN%d: stable insert interval be overrided by global %d.\n",
- __func__, __LINE__, g_args.insert_interval);
+ verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
+ __func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
int retVal = getColumnAndTagTypeFromInsertJsonFile(
- stbInfo, &g_Dbs.db[i].superTbls[j]);
+ stbInfo, &g_Dbs.db[i].superTbls[j]);
if (false == retVal) {
goto PARSE_OVER;
}
@@ -3926,7 +4009,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
ret = true;
- PARSE_OVER:
+PARSE_OVER:
return ret;
}
@@ -3971,7 +4054,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
if (answerPrompt && answerPrompt->type == cJSON_String
- && answerPrompt->valuestring != NULL) {
+ && answerPrompt->valuestring != NULL) {
if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
g_args.answer_yes = false;
} else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
@@ -3988,12 +4071,17 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
+ if (gQueryTimes->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
} else {
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -4015,10 +4103,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- // super_table_query
+ // specified_table_query
cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query");
if (!specifiedQuery) {
- g_queryInfo.specifiedQueryInfo.concurrent = 0;
+ g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, super_table_query not found\n");
@@ -4032,44 +4120,51 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
- "query_times");
+ "query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
+ if (specifiedQueryTimes->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
} else if (!specifiedQueryTimes) {
g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
} else {
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
- g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
- if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
- g_queryInfo.specifiedQueryInfo.concurrent);
+ if (concurrent->valueint <= 0) {
+ errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount,
+ g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
}
+ g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
} else if (!concurrent) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
}
- cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode");
- if (mode && mode->type == cJSON_String
- && mode->valuestring != NULL) {
- if (0 == strcmp("sync", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
- } else if (0 == strcmp("async", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE;
+ cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode");
+ if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String
+ && specifiedAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, query mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, async mode input error\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
}
cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval");
@@ -4097,8 +4192,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress");
if (keepProgress
- && keepProgress->type == cJSON_String
- && keepProgress->valuestring != NULL) {
+ && keepProgress->type == cJSON_String
+ && keepProgress->valuestring != NULL) {
if (0 == strcmp("yes", keepProgress->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1;
} else if (0 == strcmp("no", keepProgress->valuestring)) {
@@ -4117,13 +4212,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) {
errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
- __func__, __LINE__, MAX_QUERY_SQL_COUNT);
+ __func__, __LINE__, MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -4152,10 +4247,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
}
- // sub_table_query
+ // super_table_query
cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
if (!superQuery) {
- g_queryInfo.superQueryInfo.threadCnt = 0;
+ g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, sub_table_query not found\n");
@@ -4171,17 +4266,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
+ if (superQueryTimes->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
} else {
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
+ if (threads->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
} else if (!threads) {
g_queryInfo.superQueryInfo.threadCnt = 1;
@@ -4198,33 +4304,38 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (stblname && stblname->type == cJSON_String
&& stblname->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
- MAX_TB_NAME_SIZE);
+ MAX_TB_NAME_SIZE);
} else {
errorPrint("%s() LN%d, failed to read json, super table name input error\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
- cJSON* submode = cJSON_GetObjectItem(superQuery, "mode");
- if (submode && submode->type == cJSON_String
- && submode->valuestring != NULL) {
- if (0 == strcmp("sync", submode->valuestring)) {
- g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
- } else if (0 == strcmp("async", submode->valuestring)) {
- g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE;
+ cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode");
+ if (superAsyncMode && superAsyncMode->type == cJSON_String
+ && superAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, query mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, async mode input error\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
}
- cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval");
- if (subinterval && subinterval->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint;
- } else if (!subinterval) {
+ cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
+ if (superInterval && superInterval->type == cJSON_Number) {
+ if (superInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
+ } else if (!superInterval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.subscribeInterval = 10000;
@@ -4247,8 +4358,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* subkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress");
if (subkeepProgress &&
- subkeepProgress->type == cJSON_String
- && subkeepProgress->valuestring != NULL) {
+ subkeepProgress->type == cJSON_String
+ && subkeepProgress->valuestring != NULL) {
if (0 == strcmp("yes", subkeepProgress->valuestring)) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 1;
} else if (0 == strcmp("no", subkeepProgress->valuestring)) {
@@ -4267,13 +4378,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (subsqls->type != cJSON_Array) {
errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(subsqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
- __func__, __LINE__, MAX_QUERY_SQL_COUNT);
+ __func__, __LINE__, MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -4286,22 +4397,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!sqlStr || sqlStr->type != cJSON_String
|| sqlStr->valuestring == NULL) {
errorPrint("%s() LN%d, failed to read json, sql not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
- MAX_QUERY_SQL_LENGTH);
+ MAX_QUERY_SQL_LENGTH);
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
&& result->valuestring != NULL){
tstrncpy(g_queryInfo.superQueryInfo.result[j],
- result->valuestring, MAX_FILE_NAME_LEN);
+ result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, sub query result file not found\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
}
@@ -4310,12 +4421,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
ret = true;
- PARSE_OVER:
+PARSE_OVER:
return ret;
}
static bool getInfoFromJsonFile(char* file) {
- debugPrint("%s %d %s\n", __func__, __LINE__, file);
+ debugPrint("%s %d %s\n", __func__, __LINE__, file);
FILE *fp = fopen(file, "r");
if (!fp) {
@@ -4363,15 +4474,15 @@ static bool getInfoFromJsonFile(char* file) {
if (INSERT_TEST == g_args.test_mode) {
ret = getMetaFromInsertJsonFile(root);
} else if ((QUERY_TEST == g_args.test_mode)
- || (SUBSCRIBE_TEST == g_args.test_mode)) {
+ || (SUBSCRIBE_TEST == g_args.test_mode)) {
ret = getMetaFromQueryJsonFile(root);
} else {
errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n",
- __func__, __LINE__);
+ __func__, __LINE__);
goto PARSE_OVER;
}
- PARSE_OVER:
+PARSE_OVER:
free(content);
cJSON_Delete(root);
fclose(fp);
@@ -4412,8 +4523,9 @@ static void postFreeResource() {
}
}
-static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp,
- SSuperTable* superTblInfo, int* sampleUsePos) {
+static int getRowDataFromSample(
+ char* dataBuf, int64_t maxLen, int64_t timestamp,
+ SSuperTable* superTblInfo, int64_t* sampleUsePos) {
if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
/* int ret = readSampleFromCsvFileToMem(superTblInfo);
if (0 != ret) {
@@ -4428,9 +4540,9 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp,
int dataLen = 0;
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
- "(%" PRId64 ", ", timestamp);
+ "(%" PRId64 ", ", timestamp);
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
- "%s", superTblInfo->sampleDataBuf + superTblInfo->lenOfOneRow * (*sampleUsePos));
+ "%s", superTblInfo->sampleDataBuf + superTblInfo->lenOfOneRow * (*sampleUsePos));
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
(*sampleUsePos)++;
@@ -4438,19 +4550,19 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp,
return dataLen;
}
-static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) {
- int dataLen = 0;
+static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) {
+ int64_t dataLen = 0;
char *pstr = recBuf;
- int maxLen = MAX_DATA_SIZE;
+ int64_t maxLen = MAX_DATA_SIZE;
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp);
for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6))
- || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) {
+ if ((0 == strncasecmp(stbInfo->columns[i].dataType, "BINARY", strlen("BINARY")))
+ || (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", strlen("NCHAR")))) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
errorPrint( "binary or nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
+ (uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -4460,47 +4572,47 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
tmfree(buf);
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "int", 3)) {
+ "INT", 3)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_int());
+ "%d,", rand_int());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "bigint", 6)) {
+ "BIGINT", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64",", rand_bigint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "float", 5)) {
+ "FLOAT", 5)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f, ", rand_float());
+ "%f,", rand_float());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "double", 6)) {
+ "DOUBLE", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f, ", rand_double());
+ "%f,", rand_double());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "smallint", 8)) {
+ "SMALLINT", 8)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_smallint());
+ "%d,", rand_smallint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "tinyint", strlen("tinyint"))) {
+ "TINYINT", strlen("TINYINT"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_tinyint());
+ "%d,", rand_tinyint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "bool", strlen("bool"))) {
+ "BOOL", strlen("BOOL"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_bool());
+ "%d,", rand_bool());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "timestamp", strlen("timestamp"))) {
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64",", rand_bigint());
} else {
errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
return -1;
}
}
- dataLen -= 2;
+ dataLen -= 1;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
@@ -4508,8 +4620,8 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo
return strlen(recBuf);
}
-static int32_t generateData(char *recBuf, char **data_type,
- int num_of_cols, int64_t timestamp, int lenOfBinary) {
+static int64_t generateData(char *recBuf, char **data_type,
+ int num_of_cols, int64_t timestamp, int lenOfBinary) {
memset(recBuf, 0, MAX_DATA_SIZE);
char *pstr = recBuf;
pstr += sprintf(pstr, "(%" PRId64, timestamp);
@@ -4527,31 +4639,31 @@ static int32_t generateData(char *recBuf, char **data_type,
}
for (int i = 0; i < c; i++) {
- if (strcasecmp(data_type[i % c], "tinyint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_tinyint() );
- } else if (strcasecmp(data_type[i % c], "smallint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_smallint());
- } else if (strcasecmp(data_type[i % c], "int") == 0) {
- pstr += sprintf(pstr, ", %d", rand_int());
- } else if (strcasecmp(data_type[i % c], "bigint") == 0) {
- pstr += sprintf(pstr, ", %" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % c], "float") == 0) {
- pstr += sprintf(pstr, ", %10.4f", rand_float());
- } else if (strcasecmp(data_type[i % c], "double") == 0) {
+ if (strcasecmp(data_type[i % c], "TINYINT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_tinyint() );
+ } else if (strcasecmp(data_type[i % c], "SMALLINT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_smallint());
+ } else if (strcasecmp(data_type[i % c], "INT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_int());
+ } else if (strcasecmp(data_type[i % c], "BIGINT") == 0) {
+ pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
+ } else if (strcasecmp(data_type[i % c], "FLOAT") == 0) {
+ pstr += sprintf(pstr, ",%10.4f", rand_float());
+ } else if (strcasecmp(data_type[i % c], "DOUBLE") == 0) {
double t = rand_double();
- pstr += sprintf(pstr, ", %20.8f", t);
- } else if (strcasecmp(data_type[i % c], "bool") == 0) {
+ pstr += sprintf(pstr, ",%20.8f", t);
+ } else if (strcasecmp(data_type[i % c], "BOOL") == 0) {
bool b = taosRandom() & 1;
- pstr += sprintf(pstr, ", %s", b ? "true" : "false");
- } else if (strcasecmp(data_type[i % c], "binary") == 0) {
+ pstr += sprintf(pstr, ",%s", b ? "true" : "false");
+ } else if (strcasecmp(data_type[i % c], "BINARY") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ", \"%s\"", s);
+ pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
- } else if (strcasecmp(data_type[i % c], "nchar") == 0) {
+ } else if (strcasecmp(data_type[i % c], "NCHAR") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ", \"%s\"", s);
+ pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
}
@@ -4572,47 +4684,52 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
char* sampleDataBuf = NULL;
sampleDataBuf = calloc(
- superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
+ superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n",
- __func__, __LINE__,
- superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
- strerror(errno));
- return -1;
+ errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__,
+ superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
+ strerror(errno));
+ return -1;
}
superTblInfo->sampleDataBuf = sampleDataBuf;
int ret = readSampleFromCsvFileToMem(superTblInfo);
if (0 != ret) {
- errorPrint("%s() LN%d, read sample from csv file failed.\n",
- __func__, __LINE__);
- tmfree(sampleDataBuf);
- superTblInfo->sampleDataBuf = NULL;
- return -1;
+ errorPrint("%s() LN%d, read sample from csv file failed.\n",
+ __func__, __LINE__);
+ tmfree(sampleDataBuf);
+ superTblInfo->sampleDataBuf = NULL;
+ return -1;
}
return 0;
}
-static int execInsert(threadInfo *pThreadInfo, char *buffer, int k)
+static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k)
{
int affectedRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
- __func__, __LINE__, buffer);
+ __func__, __LINE__, buffer);
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) {
affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
- } else {
- if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) {
+ } else if (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest"))) {
+ if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port,
+ buffer, NULL /* not set result file */)) {
affectedRows = -1;
printf("========restful return fail, threadID[%d]\n",
- pThreadInfo->threadID);
+ pThreadInfo->threadID);
} else {
affectedRows = k;
}
+ } else {
+ errorPrint("%s() LN%d: unknown insert mode: %s\n",
+ __func__, __LINE__, superTblInfo->insertMode);
+ affectedRows = 0;
}
} else {
affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
@@ -4621,83 +4738,82 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k)
return affectedRows;
}
-static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq)
+static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) {
- if ((superTblInfo->childTblOffset >= 0)
- && (superTblInfo->childTblLimit > 0)) {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
- superTblInfo->childTblName +
- (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
+ if (superTblInfo->childTblLimit > 0) {
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
+ superTblInfo->childTblName +
+ (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
- verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n",
- pThreadInfo->threadID, __func__, __LINE__,
- pThreadInfo->start_table_from,
- pThreadInfo->ntables, tableSeq);
- snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
- superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
+ verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRIu64" seq=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, tableSeq);
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
+ superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%d",
- g_args.tb_prefix, tableSeq);
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
+ g_args.tb_prefix, tableSeq);
}
}
-static int generateDataTail(
- SSuperTable* superTblInfo,
- int batch, char* buffer, int remainderBufLen, int64_t insertRows,
- int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) {
- int len = 0;
- int ncols_per_record = 1; // count first col ts
+static int64_t generateDataTail(
+ SSuperTable* superTblInfo,
+ uint64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
+ int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) {
+ uint64_t len = 0;
+ uint32_t ncols_per_record = 1; // count first col ts
char *pstr = buffer;
if (superTblInfo == NULL) {
- int datatypeSeq = 0;
+ uint32_t datatypeSeq = 0;
while(g_args.datatype[datatypeSeq]) {
- datatypeSeq ++;
- ncols_per_record ++;
+ datatypeSeq ++;
+ ncols_per_record ++;
}
}
- verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch);
+ verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
- int k = 0;
+ uint64_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
- int retLen = 0;
+ int64_t retLen = 0;
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample"))) {
- retLen = getRowDataFromSample(
- data,
- remainderBufLen,
- startTime + superTblInfo->timeStampStep * k,
- superTblInfo,
- pSamplePos);
+ "sample", strlen("sample"))) {
+ retLen = getRowDataFromSample(
+ data,
+ remainderBufLen,
+ startTime + superTblInfo->timeStampStep * k,
+ superTblInfo,
+ pSamplePos);
} else if (0 == strncasecmp(superTblInfo->dataSource,
- "rand", strlen("rand"))) {
+ "rand", strlen("rand"))) {
- int randTail = superTblInfo->timeStampStep * k;
+ int64_t randTail = superTblInfo->timeStampStep * k;
if (superTblInfo->disorderRatio > 0) {
int rand_num = taosRandom() % 100;
if(rand_num < superTblInfo->disorderRatio) {
randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1);
- debugPrint("rand data generated, back %d\n", randTail);
+ debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
}
- uint64_t d = startTime
- + randTail;
+ int64_t d = startTime
+ + randTail;
retLen = generateRowData(
- data,
- d,
- superTblInfo);
+ data,
+ d,
+ superTblInfo);
}
if (retLen > remainderBufLen) {
@@ -4712,22 +4828,23 @@ static int generateDataTail(
char **data_type = g_args.datatype;
int lenOfBinary = g_args.len_of_binary;
- int rand_num = taosRandom() % 100;
- int randTail;
+ int64_t randTail = DEFAULT_TIMESTAMP_STEP * k;
- if ((g_args.disorderRatio != 0)
- && (rand_num < g_args.disorderRatio)) {
- randTail = (DEFAULT_TIMESTAMP_STEP * k
- + (taosRandom() % g_args.disorderRange + 1)) * (-1);
- debugPrint("rand data generated, back %d\n", randTail);
+ if (g_args.disorderRatio != 0) {
+ int rand_num = taosRandom() % 100;
+ if (rand_num < g_args.disorderRatio) {
+ randTail = (randTail + (taosRandom() % g_args.disorderRange + 1)) * (-1);
+
+ debugPrint("rand data generated, back %"PRId64"\n", randTail);
+ }
} else {
randTail = DEFAULT_TIMESTAMP_STEP * k;
}
retLen = generateData(data, data_type,
- ncols_per_record,
- startTime + randTail,
- lenOfBinary);
+ ncols_per_record,
+ startTime + randTail,
+ lenOfBinary);
if (len > remainderBufLen)
break;
@@ -4738,8 +4855,8 @@ static int generateDataTail(
remainderBufLen -= retLen;
}
- verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n",
- __func__, __LINE__, len, k, buffer);
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
+ __func__, __LINE__, len, k, buffer);
startFrom ++;
@@ -4753,8 +4870,8 @@ static int generateDataTail(
}
static int generateSQLHead(char *tableName, int32_t tableSeq,
- threadInfo* pThreadInfo, SSuperTable* superTblInfo,
- char *buffer, int remainderBufLen)
+ threadInfo* pThreadInfo, SSuperTable* superTblInfo,
+ char *buffer, int remainderBufLen)
{
int len;
@@ -4765,50 +4882,50 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
char* tagsValBuf = NULL;
if (0 == superTblInfo->tagSource) {
- tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq);
+ tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq);
} else {
- tagsValBuf = getTagValueFromTagSample(
- superTblInfo,
- tableSeq % superTblInfo->tagSampleCount);
+ tagsValBuf = getTagValueFromTagSample(
+ superTblInfo,
+ tableSeq % superTblInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
- __func__, __LINE__);
+ __func__, __LINE__);
return -1;
}
len = snprintf(
headBuf,
- HEAD_BUFF_LEN,
- "%s.%s using %s.%s tags %s values",
- pThreadInfo->db_name,
- tableName,
- pThreadInfo->db_name,
- superTblInfo->sTblName,
- tagsValBuf);
+ HEAD_BUFF_LEN,
+ "%s.%s using %s.%s tags %s values",
+ pThreadInfo->db_name,
+ tableName,
+ pThreadInfo->db_name,
+ superTblInfo->sTblName,
+ tagsValBuf);
tmfree(tagsValBuf);
} else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
len = snprintf(
headBuf,
- HEAD_BUFF_LEN,
- "%s.%s values",
- pThreadInfo->db_name,
- tableName);
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ pThreadInfo->db_name,
+ tableName);
} else {
len = snprintf(
headBuf,
- HEAD_BUFF_LEN,
- "%s.%s values",
- pThreadInfo->db_name,
- tableName);
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ pThreadInfo->db_name,
+ tableName);
}
} else {
- len = snprintf(
- headBuf,
- HEAD_BUFF_LEN,
- "%s.%s values",
- pThreadInfo->db_name,
- tableName);
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ pThreadInfo->db_name,
+ tableName);
}
if (len > remainderBufLen)
@@ -4819,55 +4936,57 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
return len;
}
-static int generateInterlaceDataBuffer(
- char *tableName, int batchPerTbl, int i, int batchPerTblTimes,
- int32_t tableSeq,
- threadInfo *pThreadInfo, char *buffer,
- int64_t insertRows,
- int64_t startTime,
- int *pRemainderBufLen)
+static int64_t generateInterlaceDataBuffer(
+ char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
+ uint64_t tableSeq,
+ threadInfo *pThreadInfo, char *buffer,
+ uint64_t insertRows,
+ int64_t startTime,
+ uint64_t *pRemainderBufLen)
{
assert(buffer);
char *pstr = buffer;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo,
- superTblInfo, pstr, *pRemainderBufLen);
+ superTblInfo, pstr, *pRemainderBufLen);
if (headLen <= 0) {
return 0;
}
// generate data buffer
- verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n",
- pThreadInfo->threadID, __func__, __LINE__, i, buffer);
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n",
+ pThreadInfo->threadID, __func__, __LINE__, i, buffer);
pstr += headLen;
*pRemainderBufLen -= headLen;
- int dataLen = 0;
+ int64_t dataLen = 0;
- verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n",
- pThreadInfo->threadID, __func__, __LINE__,
- i, batchPerTblTimes, batchPerTbl);
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ i, batchPerTblTimes, batchPerTbl);
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
startTime = taosGetTimestamp(pThreadInfo->time_precision);
}
} else {
- startTime = 1500000000000;
+ startTime = 1500000000000;
}
- int k = generateDataTail(
- superTblInfo,
- batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
- startTime,
- &(pThreadInfo->samplePos), &dataLen);
+ int64_t k = generateDataTail(
+ superTblInfo,
+ batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos), &dataLen);
if (k == batchPerTbl) {
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
+ debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n",
+ __func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
k = 0;
@@ -4876,13 +4995,13 @@ static int generateInterlaceDataBuffer(
return k;
}
-static int generateProgressiveDataBuffer(
- char *tableName,
- int32_t tableSeq,
- threadInfo *pThreadInfo, char *buffer,
- int64_t insertRows,
- int64_t startFrom, int64_t startTime, int *pSamplePos,
- int *pRemainderBufLen)
+static int64_t generateProgressiveDataBuffer(
+ char *tableName,
+ int64_t tableSeq,
+ threadInfo *pThreadInfo, char *buffer,
+ int64_t insertRows,
+ int64_t startFrom, int64_t startTime, int64_t *pSamplePos,
+ int64_t *pRemainderBufLen)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4891,20 +5010,20 @@ static int generateProgressiveDataBuffer(
if (superTblInfo == NULL) {
int datatypeSeq = 0;
while(g_args.datatype[datatypeSeq]) {
- datatypeSeq ++;
- ncols_per_record ++;
+ datatypeSeq ++;
+ ncols_per_record ++;
}
}
assert(buffer != NULL);
char *pstr = buffer;
- int k = 0;
+ int64_t k = 0;
memset(buffer, 0, *pRemainderBufLen);
- int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo,
- buffer, *pRemainderBufLen);
+ int64_t headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo,
+ buffer, *pRemainderBufLen);
if (headLen <= 0) {
return 0;
@@ -4912,23 +5031,46 @@ static int generateProgressiveDataBuffer(
pstr += headLen;
*pRemainderBufLen -= headLen;
- int dataLen;
+ int64_t dataLen;
k = generateDataTail(superTblInfo,
- g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom,
- startTime,
- pSamplePos, &dataLen);
+ g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom,
+ startTime,
+ pSamplePos, &dataLen);
return k;
}
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0)));
+}
+
static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
- pThreadInfo->threadID, __func__, __LINE__);
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ uint64_t insertRows;
+ uint64_t interlaceRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
- int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows;
+ if (superTblInfo) {
+ insertRows = superTblInfo->insertRows;
+
+ if ((superTblInfo->interlaceRows == 0)
+ && (g_args.interlace_rows > 0)) {
+ interlaceRows = g_args.interlace_rows;
+ } else {
+ interlaceRows = superTblInfo->interlaceRows;
+ }
+ } else {
+ insertRows = g_args.num_of_DPT;
+ interlaceRows = g_args.interlace_rows;
+ }
if (interlaceRows > insertRows)
interlaceRows = insertRows;
@@ -4947,11 +5089,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
// TODO: prompt tbl count multple interlace rows and batch
//
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n",
- __func__, __LINE__, maxSqlLen, strerror(errno));
+ errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -4960,30 +5102,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0;
- int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
+ int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
- int insert_interval =
+ uint64_t insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
- uint64_t et = 0xffffffff;
+ uint64_t et = UINT64_MAX;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
- int tableSeq = pThreadInfo->start_table_from;
+ uint64_t tableSeq = pThreadInfo->start_table_from;
- debugPrint("[%d] %s() LN%d: start_table_from=%d ntables=%d insertRows=%"PRId64"\n",
- pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from,
- pThreadInfo->ntables, insertRows);
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRIu64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
int64_t startTime = pThreadInfo->start_time;
assert(pThreadInfo->ntables > 0);
- int batchPerTbl = interlaceRows;
+ uint64_t batchPerTbl = interlaceRows;
+ uint64_t batchPerTblTimes;
- int batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
g_args.num_of_RPR / interlaceRows;
@@ -4991,21 +5133,21 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
batchPerTblTimes = 1;
}
- int generatedRecPerTbl = 0;
+ uint64_t generatedRecPerTbl = 0;
bool flagSleep = true;
- int sleepTimeTotal = 0;
+ uint64_t sleepTimeTotal = 0;
char *strInsertInto = "insert into ";
int nInsertBufLen = strlen(strInsertInto);
while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
if ((flagSleep) && (insert_interval)) {
- st = taosGetTimestampMs();
- flagSleep = false;
+ st = taosGetTimestampMs();
+ flagSleep = false;
}
// generate data
memset(buffer, 0, maxSqlLen);
- int remainderBufLen = maxSqlLen;
+ uint64_t remainderBufLen = maxSqlLen;
char *pstr = buffer;
@@ -5013,30 +5155,32 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int recOfBatch = 0;
+ uint64_t recOfBatch = 0;
- for (int i = 0; i < batchPerTblTimes; i ++) {
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
errorPrint("[%d] %s() LN%d, getTableName return null\n",
- pThreadInfo->threadID, __func__, __LINE__);
+ pThreadInfo->threadID, __func__, __LINE__);
free(buffer);
return NULL;
}
- int oldRemainderLen = remainderBufLen;
- int generated = generateInterlaceDataBuffer(
- tableName, batchPerTbl, i, batchPerTblTimes,
- tableSeq,
- pThreadInfo, pstr,
- insertRows,
- startTime,
- &remainderBufLen);
+ uint64_t oldRemainderLen = remainderBufLen;
+ int64_t generated = generateInterlaceDataBuffer(
+ tableName, batchPerTbl, i, batchPerTblTimes,
+ tableSeq,
+ pThreadInfo, pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
+ debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
if (generated < 0) {
- debugPrint("[%d] %s() LN%d, generated data is %d\n",
- pThreadInfo->threadID, __func__, __LINE__, generated);
- goto free_and_statistics_interlace;
+ errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace;
} else if (generated == 0) {
break;
}
@@ -5046,77 +5190,78 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr += (oldRemainderLen - remainderBufLen);
// startTime += batchPerTbl * superTblInfo->timeStampStep;
pThreadInfo->totalInsertRows += batchPerTbl;
- verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
- pThreadInfo->threadID, __func__, __LINE__,
- batchPerTbl, recOfBatch);
+ verbosePrint("[%d] %s() LN%d batchPerTbl=%"PRId64" recOfBatch=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl, recOfBatch);
if (insertMode == INTERLACE_INSERT_MODE) {
- if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
- // turn to first table
- tableSeq = pThreadInfo->start_table_from;
- generatedRecPerTbl += batchPerTbl;
+ if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+ generatedRecPerTbl += batchPerTbl;
- startTime = pThreadInfo->start_time
- + generatedRecPerTbl * nTimeStampStep;
+ startTime = pThreadInfo->start_time
+ + generatedRecPerTbl * nTimeStampStep;
- flagSleep = true;
- if (generatedRecPerTbl >= insertRows)
- break;
+ flagSleep = true;
+ if (generatedRecPerTbl >= insertRows)
+ break;
- if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR)
- break;
- }
+ int remainRows = insertRows - generatedRecPerTbl;
+ if ((remainRows > 0) && (batchPerTbl > remainRows))
+ batchPerTbl = remainRows;
+
+ if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR)
+ break;
+ }
}
- int remainRows = insertRows - generatedRecPerTbl;
- if ((remainRows > 0) && (batchPerTbl > remainRows))
- batchPerTbl = remainRows;
-
- verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n",
- pThreadInfo->threadID, __func__, __LINE__,
- generatedRecPerTbl, insertRows);
+ verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generatedRecPerTbl, insertRows);
if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl)
break;
}
- verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n",
- pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
- pThreadInfo->totalInsertRows);
+ verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
+ pThreadInfo->totalInsertRows);
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
- pThreadInfo->threadID, __func__, __LINE__, buffer);
+ pThreadInfo->threadID, __func__, __LINE__, buffer);
startTs = taosGetTimestampMs();
- int affectedRows = execInsert(pThreadInfo, buffer, recOfBatch);
+ int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
- performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
- __func__, __LINE__, delay);
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n",
+ __func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID,
- __func__, __LINE__, affectedRows);
- if ((affectedRows < 0) || (recOfBatch != affectedRows)) {
- errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %d\n%s\n",
- pThreadInfo->threadID, __func__, __LINE__,
- recOfBatch, affectedRows, buffer);
- goto free_and_statistics_interlace;
+ if (recOfBatch != affectedRows) {
+ errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch, affectedRows, buffer);
+ goto free_of_interlace;
}
pThreadInfo->totalAffectedRows += affectedRows;
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
lastPrintTime = currentPrintTime;
}
@@ -5126,20 +5271,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (insert_interval > (et - st) ) {
int sleepTime = insert_interval - (et -st);
performancePrint("%s() LN%d sleep: %d ms for insert interval\n",
- __func__, __LINE__, sleepTime);
+ __func__, __LINE__, sleepTime);
taosMsleep(sleepTime); // ms
sleepTimeTotal += insert_interval;
}
}
}
- free_and_statistics_interlace:
+free_of_interlace:
tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5155,21 +5296,21 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "Failed to alloc %d Bytes, reason:%s\n",
- maxSqlLen,
- strerror(errno));
+ errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ maxSqlLen,
+ strerror(errno));
return NULL;
}
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
- int timeStampStep =
+ int64_t timeStampStep =
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
/* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
@@ -5182,28 +5323,28 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
- for (uint32_t tableSeq =
- pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
- tableSeq ++) {
+ for (uint64_t tableSeq =
+ pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
+ tableSeq ++) {
int64_t start_time = pThreadInfo->start_time;
- int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
+ uint64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows);
- for (int64_t i = 0; i < insertRows;) {
- /*
- if (insert_interval) {
- st = taosGetTimestampMs();
- }
- */
+ for (uint64_t i = 0; i < insertRows;) {
+ /*
+ if (insert_interval) {
+ st = taosGetTimestampMs();
+ }
+ */
char tableName[TSDB_TABLE_NAME_LEN];
getTableName(tableName, pThreadInfo, tableSeq);
- verbosePrint("%s() LN%d: tid=%d seq=%d tableName=%s\n",
- __func__, __LINE__,
- pThreadInfo->threadID, tableSeq, tableName);
+ verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID, tableSeq, tableName);
- int remainderBufLen = maxSqlLen;
+ int64_t remainderBufLen = maxSqlLen;
char *pstr = buffer;
int nInsertBufLen = strlen("insert into ");
@@ -5212,44 +5353,50 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int generated = generateProgressiveDataBuffer(
- tableName, tableSeq, pThreadInfo, pstr, insertRows,
- i, start_time,
- &(pThreadInfo->samplePos),
- &remainderBufLen);
+ int64_t generated = generateProgressiveDataBuffer(
+ tableName, tableSeq, pThreadInfo, pstr, insertRows,
+ i, start_time,
+ &(pThreadInfo->samplePos),
+ &remainderBufLen);
if (generated > 0)
i += generated;
else
- goto free_and_statistics_2;
+ goto free_of_progressive;
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
startTs = taosGetTimestampMs();
- int affectedRows = execInsert(pThreadInfo, buffer, generated);
+ int64_t affectedRows = execInsert(pThreadInfo, buffer, generated);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
+ uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
- __func__, __LINE__, delay);
+ __func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- if (affectedRows < 0)
- goto free_and_statistics_2;
+ if (affectedRows < 0) {
+ errorPrint("%s() LN%d, affected rows: %"PRId64"\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_progressive;
+ }
pThreadInfo->totalAffectedRows += affectedRows;
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
lastPrintTime = currentPrintTime;
}
@@ -5271,21 +5418,17 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (g_args.verbose_print) {
if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo &&
- (0 == strncasecmp(
- superTblInfo->dataSource, "sample", strlen("sample")))) {
- verbosePrint("%s() LN%d samplePos=%d\n",
- __func__, __LINE__, pThreadInfo->samplePos);
+ (0 == strncasecmp(
+ superTblInfo->dataSource, "sample", strlen("sample")))) {
+ verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
+ __func__, __LINE__, pThreadInfo->samplePos);
}
}
} // tableSeq
- free_and_statistics_2:
+free_of_progressive:
tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5294,7 +5437,18 @@ static void* syncWrite(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows;
+ int interlaceRows;
+
+ if (superTblInfo) {
+ if ((superTblInfo->interlaceRows == 0)
+ && (g_args.interlace_rows > 0)) {
+ interlaceRows = g_args.interlace_rows;
+ } else {
+ interlaceRows = superTblInfo->interlaceRows;
+ }
+ } else {
+ interlaceRows = g_args.interlace_rows;
+ }
if (interlaceRows > 0) {
// interlace mode
@@ -5303,6 +5457,7 @@ static void* syncWrite(void *sarg) {
// progressive mode
return syncWriteProgressive(pThreadInfo);
}
+
}
static void callBack(void *param, TAOS_RES *res, int code) {
@@ -5321,8 +5476,9 @@ static void callBack(void *param, TAOS_RES *res, int code) {
char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen);
char data[MAX_DATA_SIZE];
char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix,
- pThreadInfo->start_table_from);
+ pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values",
+ pThreadInfo->db_name, pThreadInfo->tb_prefix,
+ pThreadInfo->start_table_from);
// if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
if (pThreadInfo->counter >= g_args.num_of_RPR) {
pThreadInfo->start_table_from++;
@@ -5338,7 +5494,7 @@ static void callBack(void *param, TAOS_RES *res, int code) {
for (int i = 0; i < g_args.num_of_RPR; i++) {
int rand_num = taosRandom() % 100;
if (0 != pThreadInfo->superTblInfo->disorderRatio
- && rand_num < pThreadInfo->superTblInfo->disorderRatio) {
+ && rand_num < pThreadInfo->superTblInfo->disorderRatio) {
int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
generateRowData(data, d, pThreadInfo->superTblInfo);
} else {
@@ -5381,8 +5537,34 @@ static void *asyncWrite(void *sarg) {
return NULL;
}
+static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr)
+{
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
+ struct hostent *server = gethostbyname(host);
+ if ((server == NULL) || (server->h_addr == NULL)) {
+ errorPrint("%s", "ERROR, no such host");
+ return -1;
+ }
+
+ debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n",
+ server->h_name,
+ server->h_addr,
+ (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
+ server->h_length);
+
+ memset(serv_addr, 0, sizeof(struct sockaddr_in));
+ serv_addr->sin_family = AF_INET;
+ serv_addr->sin_port = htons(rest_port);
+#ifdef WINDOWS
+ serv_addr->sin_addr.s_addr = inet_addr(host);
+#else
+ memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length);
+#endif
+ return 0;
+}
+
static void startMultiThreadInsertData(int threads, char* db_name,
- char* precision,SSuperTable* superTblInfo) {
+ char* precision,SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
assert(pids != NULL);
@@ -5417,48 +5599,48 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t start_time;
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
- start_time = taosGetTimestamp(timePrec);
+ start_time = taosGetTimestamp(timePrec);
} else {
if (TSDB_CODE_SUCCESS != taosParseTime(
- superTblInfo->startTimestamp,
- &start_time,
- strlen(superTblInfo->startTimestamp),
- timePrec, 0)) {
- ERROR_EXIT("failed to parse time!\n");
+ superTblInfo->startTimestamp,
+ &start_time,
+ strlen(superTblInfo->startTimestamp),
+ timePrec, 0)) {
+ ERROR_EXIT("failed to parse time!\n");
}
}
} else {
- start_time = 1500000000000;
+ start_time = 1500000000000;
}
int64_t start = taosGetTimestampMs();
// read sample data from file first
if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample")))) {
+ "sample", strlen("sample")))) {
if (0 != prepareSampleDataForSTable(superTblInfo)) {
errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
- __func__, __LINE__);
+ __func__, __LINE__);
exit(-1);
}
}
// read sample data from file first
if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample")))) {
+ "sample", strlen("sample")))) {
if (0 != prepareSampleDataForSTable(superTblInfo)) {
errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
- __func__, __LINE__);
+ __func__, __LINE__);
exit(-1);
}
}
TAOS* taos = taos_connect(
- g_Dbs.host, g_Dbs.user,
- g_Dbs.password, db_name, g_Dbs.port);
+ g_Dbs.host, g_Dbs.user,
+ g_Dbs.password, db_name, g_Dbs.port);
if (NULL == taos) {
errorPrint("%s() LN%d, connect to server fail , reason: %s\n",
- __func__, __LINE__, taos_errstr(NULL));
+ __func__, __LINE__, taos_errstr(NULL));
exit(-1);
}
@@ -5466,18 +5648,18 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int startFrom;
if (superTblInfo) {
- int limit, offset;
+ int64_t limit;
+ uint64_t offset;
if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) &&
- ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
+ ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
- if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
- && (superTblInfo->childTblOffset >= 0)) {
+ if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
if ((superTblInfo->childTblLimit < 0)
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
- > (superTblInfo->childTblCount))) {
+ > (superTblInfo->childTblCount))) {
superTblInfo->childTblLimit =
superTblInfo->childTblCount - superTblInfo->childTblOffset;
}
@@ -5503,7 +5685,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
if ((superTblInfo->childTblExists != TBL_NO_EXISTS)
- && (0 == superTblInfo->childTblLimit)) {
+ && (0 == superTblInfo->childTblLimit)) {
printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n");
if (!g_args.answer_yes) {
printf(" Press enter key to continue or Ctrl-C to stop\n\n");
@@ -5512,14 +5694,14 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
superTblInfo->childTblName = (char*)calloc(1,
- limit * TSDB_TABLE_NAME_LEN);
+ limit * TSDB_TABLE_NAME_LEN);
if (superTblInfo->childTblName == NULL) {
errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
taos_close(taos);
exit(-1);
}
- int childTblCount;
+ uint64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos,
db_name, superTblInfo->sTblName,
@@ -5533,17 +5715,23 @@ static void startMultiThreadInsertData(int threads, char* db_name,
taos_close(taos);
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
+ if ((superTblInfo)
+ && (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest")))) {
+ if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0)
+ exit(-1);
+ }
+
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
@@ -5552,41 +5740,42 @@ static void startMultiThreadInsertData(int threads, char* db_name,
t_info->superTblInfo = superTblInfo;
t_info->start_time = start_time;
- t_info->minDelay = INT16_MAX;
+ t_info->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) ||
- (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
+ (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
//t_info->taos = taos;
t_info->taos = taos_connect(
- g_Dbs.host, g_Dbs.user,
- g_Dbs.password, db_name, g_Dbs.port);
+ g_Dbs.host, g_Dbs.user,
+ g_Dbs.password, db_name, g_Dbs.port);
if (NULL == t_info->taos) {
errorPrint(
- "connect to server fail from insert sub thread, reason: %s\n",
- taos_errstr(NULL));
+ "connect to server fail from insert sub thread, reason: %s\n",
+ taos_errstr(NULL));
exit(-1);
}
} else {
t_info->taos = NULL;
}
- if ((NULL == superTblInfo)
- || (0 == superTblInfo->multiThreadWriteOneTbl)) {
+/* if ((NULL == superTblInfo)
+ || (0 == superTblInfo->multiThreadWriteOneTbl)) {
+ */
t_info->start_table_from = startFrom;
t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1;
- } else {
+/* } else {
t_info->start_table_from = 0;
t_info->ntables = superTblInfo->childTblCount;
t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint();
}
-
+*/
tsem_init(&(t_info->lock_sem), 0, 0);
- if (SYNC == g_Dbs.queryMode) {
- pthread_create(pids + i, NULL, syncWrite, t_info);
- } else {
+ if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, t_info);
+ } else {
+ pthread_create(pids + i, NULL, syncWrite, t_info);
}
}
@@ -5594,10 +5783,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pthread_join(pids[i], NULL);
}
- int64_t totalDelay = 0;
- int64_t maxDelay = 0;
- int64_t minDelay = INT16_MAX;
- int64_t cntDelay = 1;
+ uint64_t totalDelay = 0;
+ uint64_t maxDelay = 0;
+ uint64_t minDelay = UINT64_MAX;
+ uint64_t cntDelay = 1;
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
@@ -5606,16 +5795,16 @@ static void startMultiThreadInsertData(int threads, char* db_name,
tsem_destroy(&(t_info->lock_sem));
taos_close(t_info->taos);
- debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n",
- __func__, __LINE__,
- t_info->threadID, t_info->totalInsertRows,
- t_info->totalAffectedRows);
+ debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
+ __func__, __LINE__,
+ t_info->threadID, t_info->totalInsertRows,
+ t_info->totalAffectedRows);
if (superTblInfo) {
- superTblInfo->totalAffectedRows += t_info->totalAffectedRows;
- superTblInfo->totalInsertRows += t_info->totalInsertRows;
+ superTblInfo->totalAffectedRows += t_info->totalAffectedRows;
+ superTblInfo->totalInsertRows += t_info->totalInsertRows;
} else {
- g_args.totalAffectedRows += t_info->totalAffectedRows;
- g_args.totalInsertRows += t_info->totalInsertRows;
+ g_args.totalAffectedRows += t_info->totalAffectedRows;
+ g_args.totalInsertRows += t_info->totalInsertRows;
}
totalDelay += t_info->totalDelay;
@@ -5632,35 +5821,42 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t t = end - start;
if (superTblInfo) {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
- t / 1000.0, superTblInfo->totalInsertRows,
- superTblInfo->totalAffectedRows,
- threads, db_name, superTblInfo->sTblName,
- (double)superTblInfo->totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
- t / 1000.0, superTblInfo->totalInsertRows,
- superTblInfo->totalAffectedRows,
- threads, db_name, superTblInfo->sTblName,
- (double)superTblInfo->totalInsertRows / (t / 1000.0));
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ t / 1000.0, superTblInfo->totalInsertRows,
+ superTblInfo->totalAffectedRows,
+ threads, db_name, superTblInfo->sTblName,
+ (double)superTblInfo->totalInsertRows / (t / 1000.0));
+
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ t / 1000.0, superTblInfo->totalInsertRows,
+ superTblInfo->totalAffectedRows,
+ threads, db_name, superTblInfo->sTblName,
+ (double)superTblInfo->totalInsertRows / (t / 1000.0));
+ }
} else {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
- t / 1000.0, g_args.totalInsertRows,
- g_args.totalAffectedRows,
- threads, db_name,
- (double)g_args.totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
- t * 1000.0, g_args.totalInsertRows,
- g_args.totalAffectedRows,
- threads, db_name,
- (double)g_args.totalInsertRows / (t / 1000.0));
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ t / 1000.0, g_args.totalInsertRows,
+ g_args.totalAffectedRows,
+ threads, db_name,
+ (double)g_args.totalInsertRows / (t / 1000.0));
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ t * 1000.0, g_args.totalInsertRows,
+ g_args.totalAffectedRows,
+ threads, db_name,
+ (double)g_args.totalInsertRows / (t / 1000.0));
+ }
}
- printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
- avgDelay, maxDelay, minDelay);
- fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
+ avgDelay, maxDelay, minDelay);
+ }
//taos_close(taos);
@@ -5681,12 +5877,12 @@ static void *readTable(void *sarg) {
return NULL;
}
- int num_of_DPT;
+ int num_of_DPT;
/* if (rinfo->superTblInfo) {
num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
*/
- num_of_DPT = g_args.num_of_DPT;
+ num_of_DPT = g_args.num_of_DPT;
// }
int num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
@@ -5700,11 +5896,11 @@ static void *readTable(void *sarg) {
printf("%d records:\n", totalData);
fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
- for (int j = 0; j < n; j++) {
+ for (uint64_t j = 0; j < n; j++) {
double totalT = 0;
- int count = 0;
- for (int i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%d where ts>= %" PRId64,
+ uint64_t count = 0;
+ for (uint64_t i = 0; i < num_of_tables; i++) {
+ sprintf(command, "select %s from %s%"PRIu64" where ts>= %" PRIu64,
aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
@@ -5764,7 +5960,7 @@ static void *readMetric(void *sarg) {
fprintf(fp, "Querying On %d records:\n", totalData);
for (int j = 0; j < n; j++) {
- char condition[BUFFER_SIZE - 30] = "\0";
+ char condition[COND_BUF_LEN] = "\0";
char tempS[64] = "\0";
int m = 10 < num_of_tables ? 10 : num_of_tables;
@@ -5775,7 +5971,7 @@ static void *readMetric(void *sarg) {
} else {
sprintf(tempS, " or t1 = %d ", i);
}
- strcat(condition, tempS);
+ strncat(condition, tempS, COND_BUF_LEN - 1);
sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
@@ -5830,7 +6026,8 @@ static int insertTestProcess() {
return -1;
}
- printfInsertMetaToFile(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ printfInsertMetaToFile(g_fpOfInsertResult);
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
@@ -5841,7 +6038,8 @@ static int insertTestProcess() {
// create database and super tables
if(createDatabasesAndStables() != 0) {
- fclose(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
return -1;
}
@@ -5857,11 +6055,13 @@ static int insertTestProcess() {
end = taosGetTimestampMs();
if (g_totalChildTables > 0) {
- printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
- fprintf(g_fpOfInsertResult,
+ fprintf(stderr, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
+ (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
"Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ }
}
taosMsleep(1000);
@@ -5876,15 +6076,15 @@ static int insertTestProcess() {
if (superTblInfo && (superTblInfo->insertRows > 0)) {
startMultiThreadInsertData(
- g_Dbs.threadCount,
- g_Dbs.db[i].dbName,
- g_Dbs.db[i].dbCfg.precision,
- superTblInfo);
+ g_Dbs.threadCount,
+ g_Dbs.db[i].dbName,
+ g_Dbs.db[i].dbCfg.precision,
+ superTblInfo);
}
}
}
} else {
- startMultiThreadInsertData(
+ startMultiThreadInsertData(
g_Dbs.threadCount,
g_Dbs.db[i].dbName,
g_Dbs.db[i].dbCfg.precision,
@@ -5912,13 +6112,13 @@ static void *specifiedTableQuery(void *sarg) {
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- NULL,
- g_queryInfo.port);
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- pThreadInfo->threadID, taos_errstr(NULL));
+ pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
pThreadInfo->taos = taos;
@@ -5934,67 +6134,51 @@ static void *specifiedTableQuery(void *sarg) {
return NULL;
}
- int64_t st = 0;
- int64_t et = 0;
+ uint64_t st = 0;
+ uint64_t et = 0;
- int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+ uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
+ uint64_t totalQueried = 0;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
- (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) {
+ (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) {
taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms
}
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(tmpFile, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+
st = taosGetTimestampMs();
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- int64_t t1 = taosGetTimestampMs();
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- }
- selectAndGetResult(pThreadInfo->taos,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile);
- int64_t t2 = taosGetTimestampMs();
- printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000.0);
- } else {
- int64_t t1 = taosGetTimestampMs();
- int retCode = postProceSql(g_queryInfo.host,
- g_queryInfo.port,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
- if (0 != retCode) {
- printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
- return NULL;
- }
- int64_t t2 = taosGetTimestampMs();
- printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000.0);
+ selectAndGetResult(pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile);
+
+ et = taosGetTimestampMs();
+ printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n",
+ taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0);
- }
totalQueried ++;
g_queryInfo.specifiedQueryInfo.totalQueried ++;
-
- et = taosGetTimestampMs();
-
- int64_t currentPrintTime = taosGetTimestampMs();
- int64_t endTs = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
__func__, __LINE__, endTs, startTs);
- printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
+ lastPrintTime = currentPrintTime;
}
- lastPrintTime = currentPrintTime;
}
return NULL;
}
@@ -6015,14 +6199,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
tstrncpy(outSql, inSql, pos - inSql + 1);
//printf("1: %s\n", outSql);
- strcat(outSql, subTblName);
+ strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
//printf("2: %s\n", outSql);
- strcat(outSql, pos+strlen(sourceString));
+ strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
//printf("3: %s\n", outSql);
}
static void *superTableQuery(void *sarg) {
- char sqlstr[1024];
+ char sqlstr[MAX_QUERY_SQL_LENGTH];
threadInfo *pThreadInfo = (threadInfo *)sarg;
if (pThreadInfo->taos == NULL) {
@@ -6034,21 +6218,21 @@ static void *superTableQuery(void *sarg) {
g_queryInfo.port);
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- pThreadInfo->threadID, taos_errstr(NULL));
+ pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
pThreadInfo->taos = taos;
}
}
- int64_t st = 0;
- int64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
+ uint64_t st = 0;
+ uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
- int queryTimes = g_queryInfo.superQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ uint64_t totalQueried = 0;
+ uint64_t startTs = taosGetTimestampMs();
- int64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t lastPrintTime = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.superQueryInfo.queryInterval
&& (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
@@ -6067,7 +6251,7 @@ static void *superTableQuery(void *sarg) {
g_queryInfo.superQueryInfo.result[j],
pThreadInfo->threadID);
}
- selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile);
+ selectAndGetResult(pThreadInfo, sqlstr, tmpFile);
totalQueried++;
g_queryInfo.superQueryInfo.totalQueried ++;
@@ -6075,16 +6259,16 @@ static void *superTableQuery(void *sarg) {
int64_t currentPrintTime = taosGetTimestampMs();
int64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently completed queries: %d, QPS: %10.3f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
+ lastPrintTime = currentPrintTime;
}
- lastPrintTime = currentPrintTime;
}
}
et = taosGetTimestampMs();
- printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n",
+ printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(),
pThreadInfo->start_table_from,
pThreadInfo->end_table_to,
@@ -6127,13 +6311,19 @@ static int queryTestProcess() {
printfQuerySystemInfo(taos);
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ if (convertHostToServAddr(
+ g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
+ exit(-1);
+ }
+
pthread_t *pids = NULL;
threadInfo *infos = NULL;
//==== create sub threads for query from specify table
int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
if ((nSqlCount > 0) && (nConcurrent > 0)) {
@@ -6193,21 +6383,21 @@ static int queryTestProcess() {
ERROR_EXIT("memory allocation failed for create threads\n");
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
+ uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
- int startFrom = 0;
+ uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
@@ -6244,12 +6434,12 @@ static int queryTestProcess() {
tmfree((char*)infosOfSub);
// taos_close(taos);// TODO: workaround to use separate taos connection;
- int64_t endTs = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
- int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
g_queryInfo.superQueryInfo.totalQueried;
- printf("==== completed total queries: %d, the QPS of all threads: %10.3f====\n",
+ fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
return 0;
@@ -6262,14 +6452,16 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c
return;
}
- getResult(res, (char*)param);
- taos_free_result(res);
+ if (param)
+ appendResultToFile(res, (char*)param);
+ // tao_unscribe() will free result.
}
-static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
+static TAOS_SUB* subscribeImpl(
+ TAOS *taos, char *sql, char* topic, char* resultFileName) {
TAOS_SUB* tsub = NULL;
- if (g_queryInfo.specifiedQueryInfo.mode) {
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
tsub = taos_subscribe(taos,
g_queryInfo.specifiedQueryInfo.subscribeRestart,
topic, sql, subscribe_callback, (void*)resultFileName,
@@ -6290,9 +6482,12 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- char subSqlstr[1024];
+ char subSqlstr[MAX_QUERY_SQL_LENGTH];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ if (g_queryInfo.superQueryInfo.sqlCount == 0)
+ return NULL;
+
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
@@ -6318,59 +6513,63 @@ static void *superSubscribe(void *sarg) {
return NULL;
}
- //int64_t st = 0;
- //int64_t et = 0;
- do {
- //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
- // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- //}
-
- //st = taosGetTimestampMs();
- char topic[32] = {0};
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- sprintf(topic, "taosdemo-subscribe-%d", i);
+ char topic[32] = {0};
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
+ sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%d", i, j);
memset(subSqlstr,0,sizeof(subSqlstr));
- replaceChildTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i);
+ replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], subSqlstr, i);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
+ if (g_queryInfo.superQueryInfo.result[j][0] != 0) {
sprintf(tmpFile, "%s-%d",
- g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID);
+ g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID);
}
- tsub[i] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile);
- if (NULL == tsub[i]) {
+
+ uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j;
+ debugPrint("%s() LN%d, subSeq=%"PRIu64" subSqlstr: %s\n",
+ __func__, __LINE__, subSeq, subSqlstr);
+ tsub[subSeq] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile);
+ if (NULL == tsub[subSeq]) {
taos_close(pThreadInfo->taos);
return NULL;
}
}
- //et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
- } while(0);
+ }
// start loop to consume result
TAOS_RES* res = NULL;
while(1) {
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.superQueryInfo.mode) {
- continue;
- }
-
- res = taos_consume(tsub[i]);
- if (res) {
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.superQueryInfo.result[i],
- pThreadInfo->threadID);
+ for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
+ for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
+ if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
+ continue;
+ }
+
+ uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j;
+ taosMsleep(100); // ms
+ res = taos_consume(tsub[subSeq]);
+ if (res) {
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.superQueryInfo.result[j][0] != 0) {
+ sprintf(tmpFile, "%s-%d",
+ g_queryInfo.superQueryInfo.result[j],
+ pThreadInfo->threadID);
+ appendResultToFile(res, tmpFile);
+ }
}
- getResult(res, tmpFile);
}
}
}
taos_free_result(res);
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
+ uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j;
+ taos_unsubscribe(tsub[subSeq],
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ }
}
taos_close(pThreadInfo->taos);
@@ -6381,6 +6580,9 @@ static void *specifiedSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ if (g_queryInfo.specifiedQueryInfo.sqlCount == 0)
+ return NULL;
+
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
@@ -6405,50 +6607,38 @@ static void *specifiedSubscribe(void *sarg) {
return NULL;
}
- //int64_t st = 0;
- //int64_t et = 0;
- do {
- //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
- // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- //}
-
- //st = taosGetTimestampMs();
- char topic[32] = {0};
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ char topic[32] = {0};
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
sprintf(topic, "taosdemo-subscribe-%d", i);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
+ if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
}
tsub[i] = subscribeImpl(pThreadInfo->taos,
g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile);
- if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) {
+ if (NULL == tsub[i]) {
taos_close(pThreadInfo->taos);
return NULL;
}
- }
- //et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
- } while(0);
-
+ }
// start loop to consume result
TAOS_RES* res = NULL;
while(1) {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
continue;
}
+ taosMsleep(1000); // ms
res = taos_consume(tsub[i]);
if (res) {
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
+ appendResultToFile(res, tmpFile);
}
- getResult(res, tmpFile);
}
}
}
@@ -6497,30 +6687,35 @@ static int subscribeTestProcess() {
pthread_t *pids = NULL;
threadInfo *infos = NULL;
- //==== create sub threads for query from super table
- if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) ||
- (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) {
- errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
- g_queryInfo.specifiedQueryInfo.concurrent);
- exit(-1);
- }
+ //==== create sub threads for query for specified table
+ if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
+ printf("%s() LN%d, sepcified query sqlCount %"PRIu64".\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ } else {
+ if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
+ errorPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ exit(-1);
+ }
- pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t));
- infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo));
- if ((NULL == pids) || (NULL == infos)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
- exit(-1);
- }
+ pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t));
+ infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo));
+ if ((NULL == pids) || (NULL == infos)) {
+ errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
+ exit(-1);
+ }
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) {
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pids + i, NULL, specifiedSubscribe, t_info);
+ }
}
- //==== create sub threads for query from sub table
+ //==== create sub threads for super table query
pthread_t *pidsOfSub = NULL;
threadInfo *infosOfSub = NULL;
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
@@ -6536,21 +6731,21 @@ static int subscribeTestProcess() {
exit(-1);
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
+ uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
- int startFrom = 0;
+ uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
@@ -6659,7 +6854,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
- g_Dbs.queryMode = g_args.query_mode;
+ g_Dbs.asyncMode = g_args.async_mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 96a1cd16f8..f80ac069a0 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -72,7 +72,8 @@ enum _show_db_index {
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
@@ -83,10 +84,10 @@ enum _show_tables_index {
TSDB_SHOW_TABLES_NAME_INDEX,
TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
+ TSDB_SHOW_TABLES_METRIC_INDEX,
+ TSDB_SHOW_TABLES_UID_INDEX,
TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
+ TSDB_SHOW_TABLES_VGID_INDEX,
TSDB_MAX_SHOW_TABLES
};
@@ -99,22 +100,24 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
+#define COL_NOTE_LEN 128
+
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
- char note[128];
+ char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
- char name[TSDB_COL_NAME_LEN + 1];
+ char name[TSDB_TABLE_NAME_LEN];
SColDes cols[];
} STableDef;
extern char version[];
typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_DB_NAME_LEN];
char create_time[32];
int32_t ntables;
int32_t vgroups;
@@ -132,14 +135,15 @@ typedef struct {
int8_t wallevel;
int32_t fsync;
int8_t comp;
+ int8_t cachelast;
char precision[8]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN + 1];
- char metric[TSDB_TABLE_NAME_LEN + 1];
+ char name[TSDB_TABLE_NAME_LEN];
+ char metric[TSDB_TABLE_NAME_LEN];
} STableRecord;
typedef struct {
@@ -151,7 +155,7 @@ typedef struct {
pthread_t threadID;
int32_t threadIndex;
int32_t totalThreads;
- char dbName[TSDB_TABLE_NAME_LEN + 1];
+ char dbName[TSDB_DB_NAME_LEN];
void *taosCon;
int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut;
@@ -210,13 +214,13 @@ static struct argp_option options[] = {
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
// dump unit options
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'B', 0, 0, "Dump assigned databases", 2},
+ {"databases", 'D', 0, 0, "Dump assigned databases", 2},
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
- {"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
+ {"without-property", 'N', 0, 0, "Dump schema without properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
@@ -337,15 +341,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'A':
arguments->all_databases = true;
break;
- case 'B':
+ case 'D':
arguments->databases = true;
break;
// dump format option
case 's':
arguments->schemaonly = true;
break;
- case 'M':
- arguments->with_property = true;
+ case 'N':
+ arguments->with_property = false;
break;
case 'S':
// parse time here.
@@ -354,23 +358,23 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'E':
arguments->end_time = atol(arg);
break;
- case 'N':
+ case 'B':
arguments->data_batch = atoi(arg);
if (arguments->data_batch >= INT16_MAX) {
arguments->data_batch = INT16_MAX - 1;
- }
+ }
break;
- case 'L':
+ case 'L':
{
int32_t len = atoi(arg);
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
} else if (len < TSDB_MAX_SQL_LEN) {
len = TSDB_MAX_SQL_LEN;
- }
+ }
arguments->max_sql_len = len;
break;
- }
+ }
case 't':
arguments->table_batch = atoi(arg);
break;
@@ -398,27 +402,27 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-int taosDumpOut(struct arguments *arguments);
-int taosDumpIn(struct arguments *arguments);
-void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
-int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
-int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
-void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
-void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
-int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
-int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
-int taosCheckParam(struct arguments *arguments);
-void taosFreeDbInfos();
+static int taosDumpOut(struct arguments *arguments);
+static int taosDumpIn(struct arguments *arguments);
+static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
+static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
+static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
+static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
+static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
+static int taosCheckParam(struct arguments *arguments);
+static void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
struct arguments g_args = {
// connection option
- NULL,
- "root",
+ NULL,
+ "root",
#ifdef _TD_POWER_
- "powerdb",
+ "powerdb",
#else
- "taosdata",
+ "taosdata",
#endif
0,
"",
@@ -432,8 +436,8 @@ struct arguments g_args = {
false,
false,
// dump format option
- false,
- false,
+ false, // schemeonly
+ true, // with_property
0,
INT64_MAX,
1,
@@ -523,7 +527,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
- if (argc > 1)
+ if (argc > 2)
parse_args(argc, argv, &g_args);
argp_parse(&argp, argc, argv, 0, 0, &g_args);
@@ -675,10 +679,10 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
}
sprintf(tempCommand, "show tables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -705,12 +709,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
free(tempCommand);
return 0;
}
-
+
sprintf(tempCommand, "show stables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -748,7 +752,7 @@ int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric
return -1;
}
}
-
+
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
@@ -770,7 +774,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
sprintf(tmpCommand, "select tbname from %s", metric);
-
+
TAOS_RES *res = taos_query(taosCon, tmpCommand);
int32_t code = taos_errno(res);
if (code != 0) {
@@ -792,20 +796,20 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
-
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
+
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
numOfTable++;
}
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
-
+
int maxThreads = arguments->thread_num;
int tableOfPerFile ;
if (numOfTable <= arguments->thread_num) {
@@ -815,16 +819,16 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
tableOfPerFile = numOfTable / arguments->thread_num;
if (0 != numOfTable % arguments->thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
+ fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
close(fd);
return -1;
}
-
+
int32_t numOfThread = *totalNumOfThread;
int subFd = -1;
for (; numOfThread < maxThreads; numOfThread++) {
@@ -838,7 +842,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
(void)remove(tmpBuf);
}
sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
+ (void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
@@ -856,11 +860,11 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
-
+
if (fd >= 0) {
close(fd);
fd = -1;
- }
+ }
*totalNumOfThread = numOfThread;
@@ -884,7 +888,7 @@ int taosDumpOut(struct arguments *arguments) {
} else {
sprintf(tmpBuf, "dbs.sql");
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -916,9 +920,9 @@ int taosDumpOut(struct arguments *arguments) {
taosDumpCharset(fp);
sprintf(command, "show databases");
- result = taos_query(taos, command);
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
goto _exit_failure;
@@ -955,15 +959,17 @@ int taosDumpOut(struct arguments *arguments) {
goto _exit_failure;
}
- strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
if (arguments->with_property) {
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
//dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//dbInfos[count]->daysToKeep1;
//dbInfos[count]->daysToKeep2;
@@ -974,8 +980,10 @@ int taosDumpOut(struct arguments *arguments) {
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
//dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@@ -1007,8 +1015,8 @@ int taosDumpOut(struct arguments *arguments) {
g_resultStatistics.totalDatabasesOfDumpOut++;
sprintf(command, "use %s", dbInfos[0]->name);
-
- result = taos_query(taos, command);
+
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
@@ -1038,7 +1046,7 @@ int taosDumpOut(struct arguments *arguments) {
int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
if (0 == ret) {
superTblCnt++;
- }
+ }
}
retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
}
@@ -1050,7 +1058,7 @@ int taosDumpOut(struct arguments *arguments) {
goto _clean_tmp_file;
}
}
-
+
// TODO: save dump super table into result_output.txt
fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
@@ -1076,7 +1084,7 @@ int taosDumpOut(struct arguments *arguments) {
taos_close(taos);
taos_free_result(result);
tfree(command);
- taosFreeDbInfos();
+ taosFreeDbInfos();
fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
return 0;
@@ -1090,15 +1098,17 @@ _exit_failure:
return -1;
}
-int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
+int taosGetTableDes(
+ char* dbName, char *table,
+ STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
TAOS_ROW row = NULL;
TAOS_RES* res = NULL;
int count = 0;
char sqlstr[COMMAND_SIZE];
sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1108,7 +1118,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
TAOS_FIELD *fields = taos_fetch_fields(res);
- tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN);
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
@@ -1128,23 +1138,23 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
if (isSuperTable) {
return count;
}
-
+
// if chidl-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
taos_free_result(res);
return -1;
}
-
- fields = taos_fetch_fields(res);
+
+ fields = taos_fetch_fields(res);
row = taos_fetch_row(res);
if (NULL == row) {
@@ -1159,7 +1169,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
res = NULL;
continue;
}
-
+
int32_t* length = taos_fetch_lengths(res);
//int32_t* length = taos_fetch_lengths(tmpResult);
@@ -1188,16 +1198,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
case TSDB_DATA_TYPE_BINARY: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
- char tbuf[COMMAND_SIZE];
- converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN];
+ converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COMMAND_SIZE];
- convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
@@ -1219,15 +1229,17 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
default:
break;
}
-
+
taos_free_result(res);
- res = NULL;
+ res = NULL;
}
return count;
}
-int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) {
+int32_t taosDumpTable(
+ char *table, char *metric, struct arguments *arguments,
+ FILE *fp, TAOS* taosCon, char* dbName) {
int count = 0;
STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
@@ -1280,9 +1292,10 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
- "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update);
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
}
pstr += sprintf(pstr, ";");
@@ -1293,8 +1306,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
{
SThreadParaObj *pThread = (SThreadParaObj*)arg;
STableRecord tableRecord;
- int fd;
-
+ int fd;
+
char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
@@ -1305,13 +1318,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
-
+
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1321,13 +1334,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
memset(tmpBuf, 0, TSDB_FILENAME_LEN);
sprintf(tmpBuf, "use %s", pThread->dbName);
-
- TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
+
+ TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", pThread->dbName);
taos_free_result(tmpResult);
- fclose(fp);
+ fclose(fp);
close(fd);
return NULL;
}
@@ -1340,14 +1353,17 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
- int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
+ int ret = taosDumpTable(
+ tableRecord.name, tableRecord.metric, &g_args,
+ fp, pThread->taosCon, pThread->dbName);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
pThread->rowsOfDumpOut += ret;
-
+
if (pThread->rowsOfDumpOut >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName);
+ printf(" %"PRId64 " rows already be dumpout from database %s\n",
+ pThread->rowsOfDumpOut, pThread->dbName);
lastRowsPrint += 5000000;
}
@@ -1355,15 +1371,18 @@ void* taosDumpOutWorkThreadFp(void *arg)
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
-
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
+
+ memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
+ sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
+ g_args.outpath, pThread->dbName,
+ pThread->threadIndex, fileNameIndex);
} else {
- sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
+ sprintf(tmpBuf, "%s.tables.%d-%d.sql",
+ pThread->dbName, pThread->threadIndex, fileNameIndex);
}
fileNameIndex++;
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1377,7 +1396,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
taos_free_result(tmpResult);
close(fd);
- fclose(fp);
+ fclose(fp);
return NULL;
}
@@ -1385,15 +1404,16 @@ void* taosDumpOutWorkThreadFp(void *arg)
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName)
{
pthread_attr_t thattr;
- SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
+ SThreadParaObj *threadObj =
+ (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
for (int t = 0; t < numOfThread; ++t) {
SThreadParaObj *pThread = threadObj + t;
pThread->rowsOfDumpOut = 0;
pThread->tablesOfDumpOut = 0;
pThread->threadIndex = t;
pThread->totalThreads = numOfThread;
- tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN);
- pThread->taosCon = taosCon;
+ tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
+ pThread->taosCon = taosCon;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -1408,7 +1428,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
pthread_join(threadObj[t].threadID, NULL);
}
- // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
+ // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
int64_t totalRowsOfDumpOut = 0;
int64_t totalChildTblsOfDumpOut = 0;
for (int32_t t = 0; t < numOfThread; ++t) {
@@ -1449,7 +1469,7 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
}
-int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
+int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
TAOS_ROW row;
int fd = -1;
@@ -1457,8 +1477,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
sprintf(sqlstr, "show %s.stables", dbName);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
@@ -1478,13 +1498,14 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
(void)remove(".stables.tmp");
exit(-1);
}
-
- while ((row = taos_fetch_row(res)) != NULL) {
+
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
- }
-
+ }
+
taos_free_result(res);
(void)lseek(fd, 0, SEEK_SET);
@@ -1492,7 +1513,7 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
while (1) {
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
-
+
int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
if (0 == ret) {
superTblCnt++;
@@ -1505,8 +1526,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
close(fd);
(void)remove(".stables.tmp");
-
- return 0;
+
+ return 0;
}
@@ -1516,19 +1537,19 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
STableRecord tableRecord;
taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
-
+
fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
g_resultStatistics.totalDatabasesOfDumpOut++;
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
fprintf(fp, "USE %s;\n\n", dbInfo->name);
-
+
(void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
sprintf(sqlstr, "show %s.tables", dbInfo->name);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1547,15 +1568,17 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
-
+ tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+
taosWrite(fd, &tableRecord, sizeof(STableRecord));
-
+
numOfTable++;
}
taos_free_result(res);
@@ -1570,7 +1593,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
@@ -1579,7 +1602,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
return -1;
}
-
+
int32_t numOfThread = 0;
int subFd = -1;
for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
@@ -1616,7 +1639,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
fd = -1;
}
-
+
taos_free_result(res);
// start multi threads to dumpout
@@ -1624,7 +1647,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf);
- }
+ }
free(tblBuf);
return 0;
@@ -1637,15 +1660,18 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
char* pstr = sqlstr;
- pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name);
+ pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
+ dbName, tableDes->name);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (counter == 0) {
- pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, " (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
- pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@@ -1658,9 +1684,11 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
for (; counter < numOfCols; counter++) {
if (counter == count_temp) {
- pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ") TAGS (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
- pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@@ -1687,7 +1715,8 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
char *pstr = NULL;
pstr = tmpBuf;
- pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric);
+ pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
+ dbName, tableDes->name, dbName, metric);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
@@ -1735,7 +1764,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
char *pstr = NULL;
TAOS_ROW row = NULL;
int numFields = 0;
-
+
if (arguments->schemaonly) {
return 0;
}
@@ -1750,11 +1779,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
pstr = tmpBuffer;
char sqlstr[1024] = {0};
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbname, arguments->start_time, arguments->end_time);
-
- TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
@@ -1774,7 +1803,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
while ((row = taos_fetch_row(tmpResult)) != NULL) {
pstr = tmpBuffer;
curr_sqlstr_len = 0;
-
+
int32_t* length = taos_fetch_lengths(tmpResult); // act len
if (count == 0) {
@@ -1829,7 +1858,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
- pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR: {
@@ -1857,10 +1886,10 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
- totalRows++;
+ totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
-
+
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
lastRowsPrint += 5000000;
@@ -2206,7 +2235,7 @@ static FILE* taosOpenDumpInFile(char *fptr) {
}
char *fname = full_path.we_wordv[0];
-
+
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
@@ -2240,7 +2269,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
line[--read_len] = '\0';
//if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
+ if (read_len == 0 ) {
continue;
}
@@ -2259,8 +2288,8 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
-
+ cmd_len = 0;
+
if (lineNo >= lastRowsPrint) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
@@ -2300,7 +2329,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
if (totalThreads > tsSqlFileNum) {
totalThreads = tsSqlFileNum;
}
-
+
SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
for (int32_t t = 0; t < totalThreads; ++t) {
pThread = threadObj + t;
@@ -2330,7 +2359,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
int taosDumpIn(struct arguments *arguments) {
assert(arguments->isDumpIn);
-
+
TAOS *taos = NULL;
FILE *fp = NULL;
@@ -2345,22 +2374,22 @@ int taosDumpIn(struct arguments *arguments) {
int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
if (tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
-
+
fp = taosOpenDumpInFile(tsDbSqlFile);
if (NULL == fp) {
fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
return -1;
}
fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
-
+
taosLoadFileCharset(fp, tsfCharset);
-
+
taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
}
if (0 != tsSqlFileNumOfTbls) {
taosStartDumpInWorkThreads(taos, arguments);
- }
+ }
taos_close(taos);
taosFreeSQLFiles();
diff --git a/src/mnode/inc/mnodeDb.h b/src/mnode/inc/mnodeDb.h
index d03ba8d717..da0865833d 100644
--- a/src/mnode/inc/mnodeDb.h
+++ b/src/mnode/inc/mnodeDb.h
@@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS {
int32_t mnodeInitDbs();
void mnodeCleanupDbs();
int64_t mnodeGetDbNum();
+int32_t mnodeGetDbMaxReplica();
SDbObj *mnodeGetDb(char *db);
SDbObj *mnodeGetDbByTableName(char *db);
void * mnodeGetNextDb(void *pIter, SDbObj **pDb);
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index a80463233b..33cc443c63 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -266,7 +266,7 @@ typedef struct SAcctObj {
} SAcctObj;
typedef struct {
- char db[TSDB_DB_NAME_LEN];
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t type;
int16_t numOfColumns;
int32_t index;
diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c
index bf5b6f8c5c..aac16693a5 100644
--- a/src/mnode/src/mnodeDb.c
+++ b/src/mnode/src/mnodeDb.c
@@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() {
return sdbGetNumOfRows(tsDbSdb);
}
+int32_t mnodeGetDbMaxReplica() {
+ int32_t maxReplica = 0;
+ SDbObj *pDb = NULL;
+ void *pIter = NULL;
+
+ while (1) {
+ pIter = mnodeGetNextDb(pIter, &pDb);
+ if (pDb == NULL) break;
+
+ if (pDb->cfg.replications > maxReplica)
+ maxReplica = pDb->cfg.replications;
+
+ mnodeDecDbRef(pDb);
+ }
+
+ return maxReplica;
+}
+
static int32_t mnodeDbActionInsert(SSdbRow *pRow) {
SDbObj *pDb = pRow->pObj;
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 85d9f94b88..b513da29f4 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -29,6 +29,7 @@
#include "mnodeDef.h"
#include "mnodeInt.h"
#include "mnodeDnode.h"
+#include "mnodeDb.h"
#include "mnodeMnode.h"
#include "mnodeSdb.h"
#include "mnodeShow.h"
@@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
return TSDB_CODE_MND_NO_REMOVE_MASTER;
}
+ int32_t maxReplica = mnodeGetDbMaxReplica();
+ int32_t dnodesNum = mnodeGetDnodesNum();
+ if (dnodesNum <= maxReplica) {
+ mError("dnode:%d, can't drop dnode:%s, #dnodes: %d, replia: %d", pDnode->dnodeId, ep, dnodesNum, maxReplica);
+ mnodeDecDnodeRef(pDnode);
+ return TSDB_CODE_MND_NO_ENOUGH_DNODES;
+ }
+
mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
int32_t code = bnDropDnode(pDnode);
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 03772f2724..c0fa6368f3 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -129,7 +129,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
SShowObj *pShow = calloc(1, showObjSize);
pShow->type = pShowMsg->type;
pShow->payloadLen = htons(pShowMsg->payloadLen);
- tstrncpy(pShow->db, pShowMsg->db, TSDB_DB_NAME_LEN);
+ tstrncpy(pShow->db, pShowMsg->db, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN);
memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen);
pShow = mnodePutShowObj(pShow);
diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c
index 55ee39b6bc..e77c1b3e59 100644
--- a/src/mnode/src/mnodeUser.c
+++ b/src/mnode/src/mnodeUser.c
@@ -123,7 +123,7 @@ static void mnodePrintUserAuth() {
mnodeDecUserRef(pUser);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
}
diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt
index ab8b0f7678..4472c683c7 100644
--- a/src/os/CMakeLists.txt
+++ b/src/os/CMakeLists.txt
@@ -2,7 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX)
- ADD_SUBDIRECTORY(src/linux)
+ ADD_SUBDIRECTORY(src/linux)
ELSEIF (TD_DARWIN)
ADD_SUBDIRECTORY(src/darwin)
ELSEIF (TD_WINDOWS)
diff --git a/src/os/inc/os.h b/src/os/inc/os.h
index ff3427dae3..4306382bff 100644
--- a/src/os/inc/os.h
+++ b/src/os/inc/os.h
@@ -20,45 +20,9 @@
extern "C" {
#endif
-#ifdef _TD_DARWIN_64
-#include "osDarwin.h"
-#endif
-
-#ifdef _TD_ARM_64
-#include "osArm64.h"
-#endif
-
-#ifdef _TD_ARM_32
-#include "osArm32.h"
-#endif
-
-#ifdef _TD_MIPS_64
-#include "osMips64.h"
-#endif
-
-#ifdef _TD_LINUX_64
-#include "osLinux64.h"
-#endif
-
-#ifdef _TD_LINUX_32
-#include "osLinux32.h"
-#endif
-
-#ifdef _ALPINE
-#include "osAlpine.h"
-#endif
-
-#ifdef _TD_NINGSI_60
-#include "osNingsi.h"
-#endif
-
-#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
-#include "osWindows.h"
-#endif
-
+#include "osInc.h"
#include "osDef.h"
#include "osAtomic.h"
-#include "osCommon.h"
#include "osDir.h"
#include "osFile.h"
#include "osLz4.h"
@@ -67,6 +31,7 @@ extern "C" {
#include "osRand.h"
#include "osSemphone.h"
#include "osSignal.h"
+#include "osSleep.h"
#include "osSocket.h"
#include "osString.h"
#include "osSysinfo.h"
diff --git a/src/os/inc/osAlpine.h b/src/os/inc/osAlpine.h
deleted file mode 100644
index eba9459395..0000000000
--- a/src/os/inc/osAlpine.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ALPINE_H
-#define TDENGINE_OS_ALPINE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-typedef int(*__compar_fn_t)(const void *, const void *);
-void error (int, int, const char *);
-#ifndef PTHREAD_MUTEX_RECURSIVE_NP
- #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h
index e7c307dd9d..e69de29bb2 100644
--- a/src/os/inc/osArm32.h
+++ b/src/os/inc/osArm32.h
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ARM32_H
-#define TDENGINE_OS_ARM32_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define TAOS_OS_FUNC_LZ4
- #define BUILDIN_CLZL(val) __builtin_clzll(val)
- #define BUILDIN_CTZL(val) __builtin_ctzll(val)
- #define BUILDIN_CLZ(val) __builtin_clz(val)
- #define BUILDIN_CTZ(val) __builtin_ctz(val)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h
index 494dba751a..e69de29bb2 100644
--- a/src/os/inc/osArm64.h
+++ b/src/os/inc/osArm64.h
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ARM64_H
-#define TDENGINE_OS_ARM64_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osAtomic.h b/src/os/inc/osAtomic.h
index 803c351400..7affa444ee 100644
--- a/src/os/inc/osAtomic.h
+++ b/src/os/inc/osAtomic.h
@@ -20,7 +20,252 @@
extern "C" {
#endif
-#ifndef TAOS_OS_FUNC_ATOMIC
+#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
+ #define atomic_load_8(ptr) (*(char volatile*)(ptr))
+ #define atomic_load_16(ptr) (*(short volatile*)(ptr))
+ #define atomic_load_32(ptr) (*(long volatile*)(ptr))
+ #define atomic_load_64(ptr) (*(__int64 volatile*)(ptr))
+ #define atomic_load_ptr(ptr) (*(void* volatile*)(ptr))
+
+ #define atomic_store_8(ptr, val) ((*(char volatile*)(ptr)) = (char)(val))
+ #define atomic_store_16(ptr, val) ((*(short volatile*)(ptr)) = (short)(val))
+ #define atomic_store_32(ptr, val) ((*(long volatile*)(ptr)) = (long)(val))
+ #define atomic_store_64(ptr, val) ((*(__int64 volatile*)(ptr)) = (__int64)(val))
+ #define atomic_store_ptr(ptr, val) ((*(void* volatile*)(ptr)) = (void*)(val))
+
+ #define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val))
+ #define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val))
+ #define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val))
+ #define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
+ #ifdef _WIN64
+ #define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
+ #else
+ #define atomic_exchange_ptr(ptr, val) _InlineInterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
+ #endif
+
+ #ifdef _TD_GO_DLL_
+ #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
+ #else
+ #define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval))
+ #endif
+ #define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval))
+ #define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval))
+ #define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval))
+ #define atomic_val_compare_exchange_ptr(ptr, oldval, newval) _InterlockedCompareExchangePointer((void* volatile*)(ptr), (void*)(newval), (void*)(oldval))
+
+ char interlocked_add_fetch_8(char volatile *ptr, char val);
+ short interlocked_add_fetch_16(short volatile *ptr, short val);
+ long interlocked_add_fetch_32(long volatile *ptr, long val);
+ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val);
+
+ char interlocked_and_fetch_8(char volatile* ptr, char val);
+ short interlocked_and_fetch_16(short volatile* ptr, short val);
+ long interlocked_and_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val);
+
+ char interlocked_or_fetch_8(char volatile* ptr, char val);
+ short interlocked_or_fetch_16(short volatile* ptr, short val);
+ long interlocked_or_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ char interlocked_xor_fetch_8(char volatile* ptr, char val);
+ short interlocked_xor_fetch_16(short volatile* ptr, short val);
+ long interlocked_xor_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ __int64 interlocked_fetch_xor_64(__int64 volatile* ptr, __int64 val);
+
+ #define atomic_add_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_add_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_add_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_add_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+ #ifdef _TD_GO_DLL_
+ #define atomic_fetch_add_8 __sync_fetch_and_ad
+ #define atomic_fetch_add_16 __sync_fetch_and_add
+ #else
+ #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
+ #endif
+ #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_sub_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), -(char)(val))
+ #define atomic_sub_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), -(short)(val))
+ #define atomic_sub_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), -(long)(val))
+ #define atomic_sub_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), -(__int64)(val))
+
+ #define atomic_fetch_sub_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), -(char)(val))
+ #define atomic_fetch_sub_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), -(short)(val))
+ #define atomic_fetch_sub_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), -(long)(val))
+ #define atomic_fetch_sub_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), -(__int64)(val))
+
+ #define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_and_64(ptr, val) interlocked_fetch_and_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_or_64(ptr, val) interlocked_fetch_or_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_xor_64(ptr, val) interlocked_fetch_xor_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #ifdef _WIN64
+ #define atomic_add_fetch_ptr atomic_add_fetch_64
+ #define atomic_fetch_add_ptr atomic_fetch_add_64
+ #define atomic_sub_fetch_ptr atomic_sub_fetch_64
+ #define atomic_fetch_sub_ptr atomic_fetch_sub_64
+ #define atomic_and_fetch_ptr atomic_and_fetch_64
+ #define atomic_fetch_and_ptr atomic_fetch_and_64
+ #define atomic_or_fetch_ptr atomic_or_fetch_64
+ #define atomic_fetch_or_ptr atomic_fetch_or_64
+ #define atomic_xor_fetch_ptr atomic_xor_fetch_64
+ #define atomic_fetch_xor_ptr atomic_fetch_xor_64
+ #else
+ #define atomic_add_fetch_ptr atomic_add_fetch_32
+ #define atomic_fetch_add_ptr atomic_fetch_add_32
+ #define atomic_sub_fetch_ptr atomic_sub_fetch_32
+ #define atomic_fetch_sub_ptr atomic_fetch_sub_32
+ #define atomic_and_fetch_ptr atomic_and_fetch_32
+ #define atomic_fetch_and_ptr atomic_fetch_and_32
+ #define atomic_or_fetch_ptr atomic_or_fetch_32
+ #define atomic_fetch_or_ptr atomic_fetch_or_32
+ #define atomic_xor_fetch_ptr atomic_xor_fetch_32
+ #define atomic_fetch_xor_ptr atomic_fetch_xor_32
+ #endif
+#elif defined(_TD_NINGSI_60)
+ /*
+ * type __sync_fetch_and_add (type *ptr, type value);
+ * type __sync_fetch_and_sub (type *ptr, type value);
+ * type __sync_fetch_and_or (type *ptr, type value);
+ * type __sync_fetch_and_and (type *ptr, type value);
+ * type __sync_fetch_and_xor (type *ptr, type value);
+ * type __sync_fetch_and_nand (type *ptr, type value);
+ * type __sync_add_and_fetch (type *ptr, type value);
+ * type __sync_sub_and_fetch (type *ptr, type value);
+ * type __sync_or_and_fetch (type *ptr, type value);
+ * type __sync_and_and_fetch (type *ptr, type value);
+ * type __sync_xor_and_fetch (type *ptr, type value);
+ * type __sync_nand_and_fetch (type *ptr, type value);
+ *
+ * bool __sync_bool_compare_and_swap (type*ptr, type oldval, type newval, ...)
+ * type __sync_val_compare_and_swap (type *ptr, type oldval, ?type newval, ...)
+ * */
+
+ #define atomic_load_8(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_16(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_32(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_64(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_ptr(ptr) __sync_fetch_and_add((ptr), 0)
+
+ #define atomic_store_8(ptr, val) (*(ptr)=(val))
+ #define atomic_store_16(ptr, val) (*(ptr)=(val))
+ #define atomic_store_32(ptr, val) (*(ptr)=(val))
+ #define atomic_store_64(ptr, val) (*(ptr)=(val))
+ #define atomic_store_ptr(ptr, val) (*(ptr)=(val))
+
+ int8_t atomic_exchange_8_impl(int8_t* ptr, int8_t val );
+ int16_t atomic_exchange_16_impl(int16_t* ptr, int16_t val );
+ int32_t atomic_exchange_32_impl(int32_t* ptr, int32_t val );
+ int64_t atomic_exchange_64_impl(int64_t* ptr, int64_t val );
+ void* atomic_exchange_ptr_impl( void **ptr, void *val );
+
+ #define atomic_exchange_8(ptr, val) atomic_exchange_8_impl((int8_t*)ptr, (int8_t)val)
+ #define atomic_exchange_16(ptr, val) atomic_exchange_16_impl((int16_t*)ptr, (int16_t)val)
+ #define atomic_exchange_32(ptr, val) atomic_exchange_32_impl((int32_t*)ptr, (int32_t)val)
+ #define atomic_exchange_64(ptr, val) atomic_exchange_64_impl((int64_t*)ptr, (int64_t)val)
+ #define atomic_exchange_ptr(ptr, val) atomic_exchange_ptr_impl((void **)ptr, (void*)val)
+
+ #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_16 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_32 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_64 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap
+
+ #define atomic_add_fetch_8(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_16(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_32(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_64(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_ptr(ptr, val) __sync_add_and_fetch((ptr), (val))
+
+ #define atomic_fetch_add_8(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_16(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_32(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_64(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_ptr(ptr, val) __sync_fetch_and_add((ptr), (val))
+
+ #define atomic_sub_fetch_8(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_16(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_32(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_64(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_ptr(ptr, val) __sync_sub_and_fetch((ptr), (val))
+
+ #define atomic_fetch_sub_8(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_16(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_32(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_64(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_ptr(ptr, val) __sync_fetch_and_sub((ptr), (val))
+
+ #define atomic_and_fetch_8(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_16(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_32(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_64(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_ptr(ptr, val) __sync_and_and_fetch((ptr), (val))
+
+ #define atomic_fetch_and_8(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_16(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_32(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_64(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_ptr(ptr, val) __sync_fetch_and_and((ptr), (val))
+
+ #define atomic_or_fetch_8(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_16(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_32(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_64(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_ptr(ptr, val) __sync_or_and_fetch((ptr), (val))
+
+ #define atomic_fetch_or_8(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_16(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_32(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_64(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_ptr(ptr, val) __sync_fetch_and_or((ptr), (val))
+
+ #define atomic_xor_fetch_8(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_16(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_32(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_64(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_ptr(ptr, val) __sync_xor_and_fetch((ptr), (val))
+
+ #define atomic_fetch_xor_8(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_16(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_32(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_64(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_ptr(ptr, val) __sync_fetch_and_xor((ptr), (val))
+
+#else
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
diff --git a/src/os/inc/osDarwin.h b/src/os/inc/osDarwin.h
deleted file mode 100644
index 7c206afe7a..0000000000
--- a/src/os/inc/osDarwin.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_DARWIN_H
-#define TDENGINE_OS_DARWIN_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include