diff --git a/.appveyor.yml b/.appveyor.yml
index fe4816688b..ee1dc91767 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -1,30 +1,49 @@
version: 1.0.{build}
-os: Visual Studio 2015
+image:
+ - Visual Studio 2015
+ - macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
+matrix:
+ exclude:
+ - image: macos
+ ARCH: x86
+for:
+ -
+ matrix:
+ only:
+ - image: Visual Studio 2015
+ clone_folder: c:\dev\TDengine
+ clone_depth: 1
-clone_folder: c:\dev\TDengine
-clone_depth: 1
+ init:
+ - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
-init:
- - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
+ before_build:
+ - cd c:\dev\TDengine
+ - md build
-before_build:
- - cd c:\dev\TDengine
- - md build
-
-build_script:
- - cd build
- - cmake -G "NMake Makefiles" ..
- - nmake install
+ build_script:
+ - cd build
+ - cmake -G "NMake Makefiles" ..
+ - nmake install
+ -
+ matrix:
+ only:
+ - image: macos
+ clone_depth: 1
+ build_script:
+ - mkdir debug
+ - cd debug
+ - cmake .. > /dev/null
+ - make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
-
on_build_success: true
on_build_failure: true
on_build_status_changed: true
diff --git a/.drone.yml b/.drone.yml
new file mode 100644
index 0000000000..e7ae6ebbda
--- /dev/null
+++ b/.drone.yml
@@ -0,0 +1,180 @@
+---
+kind: pipeline
+name: test_amd64
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: smoke_test
+ image: python:3.8
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential gcc
+ - pip3 install psutil
+ - pip3 install guppy3
+ - pip3 install src/connector/python/linux/python3/
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ - cd ../tests
+ - ./test-all.sh smoke
+ when:
+ branch:
+ - develop
+ - master
+
+
+- name: crash_gen
+ image: python:3.8
+ commands:
+ - pip3 install requests
+ - pip3 install src/connector/python/linux/python3/
+ - pip3 install psutil
+ - pip3 install guppy3
+ - cd tests/pytest
+ - ./crash_gen.sh -a -p -t 4 -s 2000
+ when:
+ branch:
+ - develop
+ - master
+
+
+---
+kind: pipeline
+name: test_arm64
+
+platform:
+ os: linux
+ arch: arm64
+
+steps:
+- name: build
+ image: gcc
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch64 > /dev/null
+ - make
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: test_arm
+
+platform:
+ os: linux
+ arch: arm
+
+steps:
+- name: build
+ image: arm32v7/ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch32 > /dev/null
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_trusty
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:trusty
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake3 build-essential git binutils-2.26
+
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_xenial
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:xenial
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_bionic
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: goodbye
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: 64-bit
+ image: alpine
+ commands:
+ - echo 64-bit is good.
+ when:
+ branch:
+ - develop
+ - master
+
+
+depends_on:
+- test_arm64
+- test_amd64
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 0617d75976..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,296 +0,0 @@
-#
-# Configuration
-#
-#
-# Build Matrix
-#
-branches:
- only:
- - master
- - develop
- - coverity_scan
- - /^.*ci-.*$/
-
-matrix:
- - os: linux
- dist: focal
- language: c
-
- git:
- - depth: 1
-
- compiler: gcc
- env: DESC="linux/gcc build and test"
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - net-tools
- - python3-pip
- - python3-setuptools
- - valgrind
- - psmisc
- - unixodbc
- - unixodbc-dev
- - mono-complete
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- after_success:
- - travis_wait 20
- - |-
- case $TRAVIS_OS_NAME in
- linux)
- cd ${TRAVIS_BUILD_DIR}/debug
- make install > /dev/null || travis_terminate $?
-
- py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
- pip3 install psutil
- pip3 install guppy3
- pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
-
- cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
- mcs -out:taosdemo *.cs || travis_terminate $?
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
- sleep 5
- mono taosdemo -Q DEFAULT -y || travis_terminate $?
- pkill -KILL -x taosd
- fuser -k -n tcp 6030
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests
- ./test-all.sh smoke || travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- ./valgrind-test.sh 2>&1 > mem-error-out.log
- sleep 1
-
-
- # Color setting
- RED='\033[0;31m'
- GREEN='\033[1;32m'
- GREEN_DARK='\033[0;32m'
- GREEN_UNDERLINE='\033[4;32m'
- NC='\033[0m'
-
- grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
-
- for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
- do
- if [ -n "$memError" ]; then
- if [ "$memError" -gt 12 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
- More than our threshold! ## ${NC}"
- travis_terminate $memError
- fi
- fi
- done
-
- grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
- for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
- do
- if [ -n "$defiMemError" ]; then
- if [ "$defiMemError" -gt 13 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports \
- Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
- travis_terminate $defiMemError
- fi
- fi
- done
-
- ;;
- esac
-
- - os: linux
- dist: bionic
- language: c
- compiler: gcc
- env: COVERITY_SCAN=true
- git:
- - depth: 1
-
- script:
- - echo "this job is for coverity scan"
-
- addons:
- coverity_scan:
- # GitHub project metadata
- # ** specific to your project **
- project:
- name: TDengine
- version: 2.x
- description: TDengine
-
- # Where email notification of build analysis results will be sent
- notification_email: sdsang@taosdata.com, slguan@taosdata.com
-
- # Commands to prepare for build_command
- # ** likely specific to your build **
- build_command_prepend: cmake . > /dev/null
-
- # The command that will be added as an argument to "cov-build" to compile your project for analysis,
- # ** likely specific to your build **
- build_command: make
-
- # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
- # Take care in resource usage, and consider the build frequency allowances per
- # https://scan.coverity.com/faq#frequency
- branch_pattern: coverity_scan
-
- - os: linux
- dist: trusty
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - binutils-2.26
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="trusty/gcc-4.8/bintuils-2.26 build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
-
- - os: linux
- dist: bionic
- language: c
- compiler: clang
- env: DESC="linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: bionic
- language: c
- compiler: clang
- env: DESC="arm64 linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: xenial
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="arm64 xenial build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: osx
- osx_image: xcode11.4
- language: c
- compiler: clang
- env: DESC="mac/clang build"
- git:
- - depth: 1
- addons:
- homebrew:
- - cmake
- - unixodbc
-
- script:
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
- - cmake .. > /dev/null
- - make > /dev/null
diff --git a/Jenkinsfile b/Jenkinsfile
index dfe9ed4389..33ce784bce 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -94,6 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
+ pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}
diff --git a/README.md b/README.md
index 45a955f458..78f902babe 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[](https://travis-ci.org/taosdata/TDengine)
+[](https://cloud.drone.io/taosdata/TDengine)
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
diff --git a/cmake/define.inc b/cmake/define.inc
index e825dce024..4115dd0c41 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -57,7 +57,7 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
ENDIF ()
@@ -65,7 +65,7 @@ IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
@@ -73,7 +73,7 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
@@ -81,7 +81,7 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
@@ -89,7 +89,7 @@ IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
@@ -97,7 +97,7 @@ IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
@@ -138,7 +138,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
diff --git a/cmake/env.inc b/cmake/env.inc
index efcc996176..3989993953 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -32,6 +32,7 @@ ENDIF ()
#
# Set compiler options
+SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
diff --git a/cmake/version.inc b/cmake/version.inc
index 8035b31cc7..0ee23f319a 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.0.20.0")
+ SET(TD_VER_NUMBER "2.1.0.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/documentation20/cn/06.queries/docs.md b/documentation20/cn/06.queries/docs.md
index a161778a72..5557134aac 100644
--- a/documentation20/cn/06.queries/docs.md
+++ b/documentation20/cn/06.queries/docs.md
@@ -12,7 +12,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, G
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
-例如:在TAOS Shell中,从表d1001中查询出vlotage > 215的记录,按时间降序排列,仅仅输出2条。
+例如:在TAOS Shell中,从表d1001中查询出voltage > 215的记录,按时间降序排列,仅仅输出2条。
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 3442a2248c..5eec33e2f1 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)。
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
@@ -447,7 +446,7 @@ Query OK, 1 row(s) in set (0.000141s)
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 4da66ab486..59f80b0a55 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -349,7 +349,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
- 返回值为NULL,表示创建成功,返回值不为空,表示成功。
+ 返回值为NULL,表示创建失败;返回值不为空,表示成功。
- `void taos_close_stream (TAOS_STREAM *tstr)`
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 72fcd05d52..bfa0456c7d 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -144,7 +144,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- numOfMnodes:系统中管理节点个数。默认值:3。
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
-- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*100(即100天)。
+- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
@@ -462,31 +462,31 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- |
-| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
-| ABORT | COPY | ID | NCHAR | SMALLINT |
-| ACCOUNT | COUNT | IF | NE | SPREAD |
-| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
-| ADD | CTIME | IMMEDIATE | NOT | STABLES |
-| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
-| ALL | DATABASES | IN | NOW | STATEMENT |
-| ALTER | DAYS | INITIALLY | OF | STDDEV |
-| AND | DEFERRED | INSERT | OFFSET | STREAM |
-| AS | DELIMITERS | INSTEAD | OR | STREAMS |
-| ASC | DESC | INTEGER | ORDER | STRING |
-| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
-| AVG | DETACH | INTO | PERCENTILE | TABLE |
-| BEFORE | DIFF | IP | PLUS | TABLES |
-| BEGIN | DISTINCT | IS | PRAGMA | TAG |
-| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
-| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
-| BINARY | DNODES | KEEP | QUERIES | TBNAME |
-| BITAND | DOT | KEY | QUERY | TIMES |
-| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
-| BITOR | DROP | LAST | REM | TINYINT |
-| BOOL | EACH | LE | REPLACE | TOP |
-| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
-| BY | EQ | LIKE | RESET | TRIGGER |
-| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
+| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
+| ABORT | COPY | ID | NCHAR | SPREAD |
+| ACCOUNT | COUNT | IF | NE | STABLE |
+| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
+| ADD | CTIME | IMMEDIATE | NOT | STAR |
+| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
+| ALL | DATABASES | IN | NOW | STDDEV |
+| ALTER | DAYS | INITIALLY | OF | STREAM |
+| AND | DEFERRED | INSERT | OFFSET | STREAMS |
+| AS | DELIMITERS | INSTEAD | OR | STRING |
+| ASC | DESC | INTEGER | ORDER | SUM |
+| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
+| AVG | DETACH | INTO | PERCENTILE | TABLES |
+| BEFORE | DIFF | IP | PLUS | TAG |
+| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
+| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
+| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
+| BINARY | DNODES | KEEP | QUERIES | TIMES |
+| BITAND | DOT | KEY | QUERY | TIMESTAMP |
+| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
+| BITOR | DROP | LAST | REM | TOP |
+| BOOL | EACH | LE | REPLACE | TOPIC |
+| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
+| BY | EQ | LIKE | RESET | UMINUS |
+| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
| CHANGE | FAIL | LOCAL | ROWS | USE |
| CLOG | FILL | LP | RP | USER |
@@ -498,5 +498,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| CONCAT | GLOB | METRICS | SHOW | VIEW |
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
-| CONNECTION | GT | MNODES | | |
+| CONNECTION | GT | MNODES | SLIMIT | |
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 04c90748f2..112ad99391 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -407,7 +407,7 @@ SELECT select_expr [, select_expr ...]
[INTERVAL (interval_val [, interval_offset])]
[SLIDING sliding_val]
[FILL fill_val]
- [GROUP BY col_list ]
+ [GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
[LIMIT limit_val [OFFSET offset_val]]
@@ -647,7 +647,7 @@ Query OK, 1 row(s) in set (0.001091s)
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+### UNION ALL 操作符
+
+```mysql
+SELECT ...
+UNION ALL SELECT ...
+[UNION ALL SELECT ...]
+```
+
+TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。
+
### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 31343ed293..43006928a6 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,7 @@
name: tdengine
base: core18
-version: '2.0.20.0'
+
+version: '2.1.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +73,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.20.0
+ - usr/lib/libtaos.so.2.1.0.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index 15ef54b7b1..f0349c2b3d 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -48,6 +48,8 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
+int tsInsertInitialCheck(SSqlObj *pSql);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 9220754330..56d595ff1f 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -174,7 +174,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
-int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
+int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
@@ -306,7 +307,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
-int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
+int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index ec3b0c4421..bf41449e13 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -84,6 +84,7 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
+ uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray
@@ -154,13 +155,12 @@ typedef struct STagCond {
typedef struct SParamInfo {
int32_t idx;
- char type;
+ uint8_t type;
uint8_t timePrec;
int16_t bytes;
uint32_t offset;
} SParamInfo;
-
typedef struct SBoundColumn {
bool hasVal; // denote if current column has bound or not
int32_t offset; // all column offset value
@@ -372,7 +372,8 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
-
+ bool isBind;
+
SSubqueryState subState;
struct SSqlObj **pSubs;
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index b3060e2c82..04bccc1a4a 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: isUpdateQueryImp
- * Signature: (J)J
+ * Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
@@ -185,6 +185,44 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
(JNIEnv *, jobject, jlong, jbyteArray);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: prepareStmtImp
+ * Signature: ([BJ)I
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
+ (JNIEnv *, jobject, jbyteArray, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setBindTableNameImp
+ * Signature: (JLjava/lang/String;J)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
+ (JNIEnv *, jobject, jlong, jstring, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: bindColDataImp
+ * Signature: (J[B[B[BIIIIJ)J
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
+(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c
index 7447e36ac9..da7da17aa3 100644
--- a/src/client/src/TSDBJNIConnector.c
+++ b/src/client/src/TSDBJNIConnector.c
@@ -687,4 +687,194 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec
}
return taos_result_precision(result);
-}
\ No newline at end of file
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ if (jsql == NULL) {
+ jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ jsize len = (*env)->GetArrayLength(env, jsql);
+
+ char *str = (char *) calloc(1, sizeof(char) * (len + 1));
+ if (str == NULL) {
+ jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
+ return JNI_OUT_OF_MEMORY;
+ }
+
+ (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ TAOS_STMT* pStmt = taos_stmt_init(tscon);
+ int32_t code = taos_stmt_prepare(pStmt, str, len);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ free(str);
+ return (jlong) pStmt;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) {
+ TAOS *tsconn = (TAOS *)conn;
+ if (tsconn == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
+ return JNI_SQL_NULL;
+ }
+
+ const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
+
+ int32_t code = taos_stmt_set_tbname((void*)stmt, name);
+ if (code != TSDB_CODE_SUCCESS) {
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name);
+
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt,
+ jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ // todo refactor
+ jsize len = (*env)->GetArrayLength(env, colDataList);
+ char *colBuf = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ len = (*env)->GetArrayLength(env, lengthList);
+ char *lengthArray = (char*) calloc(1, len);
+ (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ len = (*env)->GetArrayLength(env, nullList);
+ char *nullArray = (char*) calloc(1, len);
+ (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ // bind multi-rows with only one invoke.
+ TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND));
+
+ b->num = numOfRows;
+ b->buffer_type = dataType; // todo check data type
+ b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes;
+ b->is_null = nullArray;
+ b->buffer = colBuf;
+ b->length = (int32_t*)lengthArray;
+
+ // set the length and is_null array
+ switch(dataType) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDB_DATA_TYPE_BIGINT: {
+ int32_t bytes = tDataTypes[dataType].bytes;
+ for(int32_t i = 0; i < numOfRows; ++i) {
+ b->length[i] = bytes;
+ }
+ break;
+ }
+
+ case TSDB_DATA_TYPE_NCHAR:
+ case TSDB_DATA_TYPE_BINARY: {
+ // do nothing
+ }
+ }
+
+ int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex);
+ tfree(b->length);
+ tfree(b->buffer);
+ tfree(b->is_null);
+ tfree(b);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ taos_stmt_add_batch(pStmt);
+ int32_t code = taos_stmt_execute(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ int32_t code = taos_stmt_close(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
+ return JNI_SUCCESS;
+}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index a7882ffa61..6b55780af9 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -326,6 +326,7 @@ TAOS_ROW tscFetchRow(void *param) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
@@ -679,6 +680,9 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta != NULL);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
+ if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@@ -907,7 +911,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
*/
pRes->qId = 0x1;
pRes->numOfRows = 0;
- } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
+ } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) {
pRes->code = tscProcessShowCreateTable(pSql);
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 6b88c90747..bade9bb66a 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -68,7 +68,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
// do nothing
} else if (pToken->type == TK_INTEGER) {
- useconds = tsosStr2int64(pToken->z);
+ useconds = taosStr2int64(pToken->z);
} else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
@@ -386,7 +386,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
*/
-static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
// once the data block is disordered, we do NOT keep previous timestamp any more
if (!pDataBlocks->ordered) {
return TSDB_CODE_SUCCESS;
@@ -411,6 +411,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
pDataBlocks->ordered = false;
+ tscWarn("NOT ordered input timestamp");
}
pDataBlocks->prevTS = k;
@@ -693,6 +694,8 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
+
+ dataBuf->prevTS = INT64_MIN;
}
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
@@ -705,19 +708,11 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
}
code = TSDB_CODE_TSC_INVALID_SQL;
- char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
- if (NULL == tmpTokenBuf) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
+ char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
- free(tmpTokenBuf);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) {
@@ -934,6 +929,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
+ /* parse columns after super table tags values.
+ * insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
+ * (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
+ * */
+ index = 0;
+ sToken = tStrGetToken(sql, &index, false);
+ sql += index;
+ int numOfColsAfterTags = 0;
+ if (sToken.type == TK_LP) {
+ if (*boundColumn != NULL) {
+ return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
+ } else {
+ *boundColumn = &sToken.z[0];
+ }
+
+ while (1) {
+ index = 0;
+ sToken = tStrGetToken(sql, &index, false);
+
+ if (sToken.type == TK_RP) {
+ break;
+ }
+
+ sql += index;
+ ++numOfColsAfterTags;
+ }
+
+ if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+
+ sToken = tStrGetToken(sql, &index, false);
+ }
+
+ sql = sToken.z;
+
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
@@ -1262,7 +1293,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
- if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
+ if ((pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index c3c8986e2f..611cb604c4 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -24,6 +24,7 @@
#include "tscSubquery.h"
int tsParseInsertSql(SSqlObj *pSql);
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start);
////////////////////////////////////////////////////////////////////////////////
// functions for normal statement preparation
@@ -43,10 +44,32 @@ typedef struct SNormalStmt {
tVariant* params;
} SNormalStmt;
+typedef struct SMultiTbStmt {
+ bool nameSet;
+ uint64_t currentUid;
+ uint32_t tbNum;
+ SStrToken tbname;
+ SHashObj *pTableHash;
+ SHashObj *pTableBlockHashList; // data block for each table
+} SMultiTbStmt;
+
+typedef enum {
+ STMT_INIT = 1,
+ STMT_PREPARE,
+ STMT_SETTBNAME,
+ STMT_BIND,
+ STMT_BIND_COL,
+ STMT_ADD_BATCH,
+ STMT_EXECUTE
+} STMT_ST;
+
typedef struct STscStmt {
bool isInsert;
+ bool multiTbInsert;
+ int16_t last;
STscObj* taos;
SSqlObj* pSql;
+ SMultiTbStmt mtb;
SNormalStmt normal;
} STscStmt;
@@ -135,7 +158,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
break;
default:
- tscDebug("param %d: type mismatch or invalid", i);
+ tscDebug("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
@@ -255,12 +278,13 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
-static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
+static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
+#if 0
if (0) {
// allow user bind param data with different type
union {
@@ -641,6 +665,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
}
}
+#endif
if (bind->buffer_type != param->type) {
return TSDB_CODE_TSC_INVALID_VALUE;
@@ -690,29 +715,106 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
memcpy(data + param->offset, bind->buffer, size);
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
+ if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) {
+ tscError("BINARY/NCHAR no length");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ for (int i = 0; i < bind->num; ++i) {
+ char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
+
+ if (bind->is_null != NULL && bind->is_null[i]) {
+ setNull(data + param->offset, param->type, param->bytes);
+ continue;
+ }
+
+ if (!IS_VAR_DATA_TYPE(param->type)) {
+ memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes);
+
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+ } else if (param->type == TSDB_DATA_TYPE_BINARY) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ int16_t bsize = (short)bind->length[i];
+ STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize);
+ } else if (param->type == TSDB_DATA_TYPE_NCHAR) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int32_t output = 0;
+ if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i));
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ varDataSetLen(data + param->offset, output);
+ }
+ }
+
return TSDB_CODE_SUCCESS;
}
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
-
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
-
- STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
- }
-
+ STscStmt* pStmt = (STscStmt*)stmt;
+
STableDataBlocks* pBlock = NULL;
+
+ if (pStmt->multiTbInsert) {
+ if (pCmd->pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
- int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
- if (ret != 0) {
- // todo handle error
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->pTableBlockHashList == NULL) {
+ pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
}
- uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + 1) * pBlock->rowSize;
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
@@ -729,9 +831,9 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
- int code = doBindParam(data, param, &bind[param->idx]);
+ int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
if (code != TSDB_CODE_SUCCESS) {
- tscDebug("param %d: type mismatch or invalid", param->idx);
+ tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return code;
}
}
@@ -739,9 +841,135 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
return TSDB_CODE_SUCCESS;
}
+
+static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ SSqlCmd* pCmd = &stmt->pSql->cmd;
+ STscStmt* pStmt = (STscStmt*)stmt;
+ int rowNum = bind->num;
+
+ STableDataBlocks* pBlock = NULL;
+
+ if (pStmt->multiTbInsert) {
+ if (pCmd->pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->pTableBlockHashList == NULL) {
+ pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams));
+
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize;
+ if (totalDataSize > pBlock->nAllocSize) {
+ const double factor = 1.5;
+
+ void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pBlock->pData = (char*)tmp;
+ pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
+ }
+
+ if (colIdx == -1) {
+ for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
+ SParamInfo* param = &pBlock->params[j];
+ if (bind[param->idx].num != rowNum) {
+ tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return code;
+ }
+ }
+
+ pCmd->batchSize += rowNum - 1;
+ } else {
+ SParamInfo* param = &pBlock->params[colIdx];
+
+ int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return code;
+ }
+
+ if (colIdx == (pBlock->numOfParams - 1)) {
+ pCmd->batchSize += rowNum - 1;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int insertStmtUpdateBatch(STscStmt* stmt) {
+ SSqlObj* pSql = stmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+ STableDataBlocks* pBlock = NULL;
+
+ if (pCmd->batchSize > INT16_MAX) {
+ tscError("too many record:%d", pCmd->batchSize);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ assert(pCmd->numOfClause == 1);
+ if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, stmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+
+ STableMeta* pTableMeta = pBlock->pTableMeta;
+
+ pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
+ pBlk->numOfRows = pCmd->batchSize;
+ pBlk->dataLen = 0;
+ pBlk->uid = pTableMeta->id.uid;
+ pBlk->tid = pTableMeta->id.tid;
+
+ return TSDB_CODE_SUCCESS;
+}
+
static int insertStmtAddBatch(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
++pCmd->batchSize;
+
+ if (stmt->multiTbInsert) {
+ return insertStmtUpdateBatch(stmt);
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -835,6 +1063,83 @@ static int insertStmtExecute(STscStmt* stmt) {
return pSql->res.code;
}
+static void insertBatchClean(STscStmt* pStmt) {
+ SSqlCmd *pCmd = &pStmt->pSql->cmd;
+ SSqlObj *pSql = pStmt->pSql;
+ int32_t size = taosHashGetSize(pCmd->pTableBlockHashList);
+
+ // data block reset
+ pCmd->batchSize = 0;
+
+ for(int32_t i = 0; i < size; ++i) {
+ if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
+ tfree(pCmd->pTableNameList[i]);
+ }
+ }
+
+ tfree(pCmd->pTableNameList);
+
+/*
+ STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
+
+ STableDataBlocks* pOneTableBlock = *p;
+
+ while (1) {
+ SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
+
+ pOneTableBlock->size = sizeof(SSubmitBlk);
+
+ pBlocks->numOfRows = 0;
+
+ p = taosHashIterate(pCmd->pTableBlockHashList, p);
+ if (p == NULL) {
+ break;
+ }
+
+ pOneTableBlock = *p;
+ }
+*/
+
+ pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
+ pCmd->numOfTables = 0;
+
+ taosHashEmpty(pCmd->pTableBlockHashList);
+ tscFreeSqlResult(pSql);
+ tscFreeSubobj(pSql);
+ tfree(pSql->pSubs);
+ pSql->subState.numOfSub = 0;
+}
+
+static int insertBatchStmtExecute(STscStmt* pStmt) {
+ int32_t code = 0;
+
+ if(pStmt->mtb.nameSet == false) {
+ tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
+
+ if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) > 0) { // merge according to vgId
+ if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+
+ code = tscHandleMultivnodeInsert(pStmt->pSql);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ insertBatchClean(pStmt);
+
+ return pStmt->pSql->res.code;
+}
+
////////////////////////////////////////////////////////////////////////////////
// interface functions
@@ -866,7 +1171,9 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
+ pSql->isBind = true;
pStmt->pSql = pSql;
+ pStmt->last = STMT_INIT;
return pStmt;
}
@@ -879,6 +1186,13 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return TSDB_CODE_TSC_DISCONNECTED;
}
+ if (pStmt->last != STMT_INIT) {
+ tscError("prepare status error, last:%d", pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_PREPARE;
+
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
@@ -917,6 +1231,36 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
registerSqlObj(pSql);
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ if ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+
+ int32_t index = 0;
+ SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false);
+
+ if (sToken.n == 0) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+
+ if (sToken.n == 1 && sToken.type == TK_QUESTION) {
+ pStmt->multiTbInsert = true;
+ pStmt->mtb.tbname = sToken;
+ pStmt->mtb.nameSet = false;
+ if (pStmt->mtb.pTableHash == NULL) {
+ pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ }
+ if (pStmt->mtb.pTableBlockHashList == NULL) {
+ pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStmt->multiTbInsert = false;
+ memset(&pStmt->mtb, 0, sizeof(pStmt->mtb));
+
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
@@ -931,6 +1275,105 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return normalStmtPrepare(pStmt);
}
+
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ SSqlObj* pSql = pStmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (name == NULL) {
+ terrno = TSDB_CODE_TSC_APP_ERROR;
+ tscError("0x%"PRIx64" name is NULL", pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) {
+ terrno = TSDB_CODE_TSC_APP_ERROR;
+ tscError("0x%"PRIx64" not multi table insert", pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) {
+ tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_SETTBNAME;
+
+ uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name));
+ if (uid != NULL) {
+ pStmt->mtb.currentUid = *uid;
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
+ pCmd->batchSize = pBlk->numOfRows;
+
+ taosHashPut(pCmd->pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
+
+ tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
+ pStmt->mtb.nameSet = true;
+
+ tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
+
+ pSql->cmd.parseFinished = 0;
+ pSql->cmd.numOfParams = 0;
+ pSql->cmd.batchSize = 0;
+
+ if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) {
+ SHashObj* hashList = pCmd->pTableBlockHashList;
+ pCmd->pTableBlockHashList = NULL;
+ tscResetSqlCmd(pCmd, true);
+ pCmd->pTableBlockHashList = hashList;
+ }
+
+ int32_t code = tsParseSql(pStmt->pSql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ code = pStmt->pSql->res.code;
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ STableDataBlocks* pBlock = NULL;
+ code = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData;
+ blk->numOfRows = 0;
+
+ pStmt->mtb.currentUid = pTableMeta->id.uid;
+ pStmt->mtb.tbNum++;
+
+ taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
+
+ taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
+
+ tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
+ }
+
+ return code;
+}
+
int taos_stmt_close(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (!pStmt->isInsert) {
@@ -943,6 +1386,13 @@ int taos_stmt_close(TAOS_STMT* stmt) {
}
free(normal->parts);
free(normal->sql);
+ } else {
+ if (pStmt->multiTbInsert) {
+ taosHashCleanup(pStmt->mtb.pTableHash);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
+ taosHashCleanup(pStmt->pSql->cmd.pTableBlockHashList);
+ pStmt->pSql->cmd.pTableBlockHashList = NULL;
+ }
}
taos_free_result(pStmt->pSql);
@@ -952,18 +1402,122 @@ int taos_stmt_close(TAOS_STMT* stmt) {
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
return insertStmtBindParam(pStmt, bind);
} else {
return normalStmtBindParam(pStmt, bind);
}
}
+
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
+ return insertStmtBindParamBatch(pStmt, bind, -1);
+}
+
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ }
+
+ pStmt->last = STMT_BIND_COL;
+
+ return insertStmtBindParamBatch(pStmt, bind, colIdx);
+}
+
+
+
int taos_stmt_add_batch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
+ if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_ADD_BATCH;
+
return insertStmtAddBatch(pStmt);
}
+
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
}
@@ -978,8 +1532,24 @@ int taos_stmt_reset(TAOS_STMT* stmt) {
int taos_stmt_execute(TAOS_STMT* stmt) {
int ret = 0;
STscStmt* pStmt = (STscStmt*)stmt;
+ if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
if (pStmt->isInsert) {
- ret = insertStmtExecute(pStmt);
+ if (pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pStmt->last = STMT_EXECUTE;
+
+ if (pStmt->multiTbInsert) {
+ ret = insertBatchStmtExecute(pStmt);
+ } else {
+ ret = insertStmtExecute(pStmt);
+ }
} else { // normal stmt query
char* sql = normalStmtBuildSql(pStmt);
if (sql == NULL) {
@@ -1074,7 +1644,7 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
}
if (idx<0 || idx>=pBlock->numOfParams) {
- tscError("param %d: out of range", idx);
+ tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx);
abort();
}
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 87b4669a04..22b0ed30a9 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -64,7 +64,7 @@ static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
-static bool hasSpecifyDB(SStrToken* pTableName);
+static int32_t getDelimiterIndex(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@@ -427,17 +427,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: {
const char* msg1 = "invalid table name";
- const char* msg2 = "table name too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (!tscValidateTableNameLength(pToken->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
// additional msg has been attached already
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
@@ -446,19 +441,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
+ case TSDB_SQL_SHOW_CREATE_STABLE:
case TSDB_SQL_SHOW_CREATE_TABLE: {
const char* msg1 = "invalid table name";
- const char* msg2 = "table name is too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (!tscValidateTableNameLength(pToken->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
-
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -645,18 +636,26 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// set the command/global limit parameters from the first subclause to the sqlcmd object
SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, 0);
pCmd->command = pQueryInfo1->command;
-
+ int32_t diffSize = 0;
+
// if there is only one element, the limit of clause is the limit of global result.
// validate the select node for "UNION ALL" subclause
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
- int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
+ int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize);
if (ret != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
+ if (diffSize) {
+ for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
+ SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
+ tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
+ }
+ }
+
pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here
}
@@ -983,11 +982,14 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
+ const char* msg4 = "db name too long";
+ const char* msg5 = "table name too long";
+
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
-
- if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
+ int32_t idx = getDelimiterIndex(pTableName);
+ if (idx != -1) { // db has been specified in sql string so we ignore current db path
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -997,6 +999,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
+ if (idx >= TSDB_DB_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
@@ -1341,14 +1350,13 @@ static char* cloneCurrentDBName(SSqlObj* pSql) {
}
/* length limitation, strstr cannot be applied */
-static bool hasSpecifyDB(SStrToken* pTableName) {
+static int32_t getDelimiterIndex(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
- return true;
+ return i;
}
}
-
- return false;
+ return -1;
}
int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
@@ -1607,11 +1615,27 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
return false;
}
+static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
+
+ if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery,
bool timeWindowQuery) {
assert(pSelNodeList != NULL && pCmd != NULL);
const char* msg1 = "too many items in selection clause";
+
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one tag";
@@ -1676,7 +1700,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
- if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
+ if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
addPrimaryTsColIntoResult(pQueryInfo);
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 9528a553b2..bc1207e80b 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -2441,10 +2441,22 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(tIsValidName(&pTableMetaInfo->name));
- tfree(pTableMetaInfo->pTableMeta);
-
uint32_t size = tscGetTableMetaMaxSize();
- pTableMetaInfo->pTableMeta = calloc(1, size);
+ if (pTableMetaInfo->pTableMeta == NULL) {
+ pTableMetaInfo->pTableMeta = calloc(1, size);
+ pTableMetaInfo->tableMetaSize = size;
+ } else if (pTableMetaInfo->tableMetaSize < size) {
+ char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
+ pTableMetaInfo->tableMetaSize = size;
+ } else {
+ //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
+ memset(pTableMetaInfo->pTableMeta, 0, size);
+ pTableMetaInfo->tableMetaSize = size;
+ }
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
@@ -2456,10 +2468,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
+ char buf[80*1024] = {0};
+ assert(size < 80*1024);
+
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
- int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
+ int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@@ -2641,6 +2656,7 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
+ tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
tscKeepConn[TSDB_SQL_SHOW] = 1;
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 8dbb1c0a52..364af4e8b1 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -457,6 +457,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 420b78f64d..71d7dc1b73 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -1038,7 +1038,8 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
tfree(pTableMetaInfo->pTableMeta);
}
- pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
+ pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
}
/*
@@ -1255,67 +1256,73 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* pOneTableBlock = *p;
while(pOneTableBlock) {
- // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
- int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
- STableDataBlocks* dataBuf = NULL;
-
- int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
- INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
- if (ret != TSDB_CODE_SUCCESS) {
- tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
- taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
- return ret;
- }
-
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
- int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
-
- if (dataBuf->nAllocSize < destSize) {
- while (dataBuf->nAllocSize < destSize) {
- dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
- }
-
- char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
- if (tmp != NULL) {
- dataBuf->pData = tmp;
- memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
- } else { // failed to allocate memory, free already allocated memory and return error code
- tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
-
+ if (pBlocks->numOfRows > 0) {
+ // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
+ int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
+ STableDataBlocks* dataBuf = NULL;
+
+ int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
+ INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
- tfree(dataBuf->pData);
-
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return ret;
}
+
+ int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+
+ if (dataBuf->nAllocSize < destSize) {
+ while (dataBuf->nAllocSize < destSize) {
+ dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
+ }
+
+ char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
+ if (tmp != NULL) {
+ dataBuf->pData = tmp;
+ memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
+ } else { // failed to allocate memory, free already allocated memory and return error code
+ tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
+
+ taosHashCleanup(pVnodeDataBlockHashList);
+ tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tfree(dataBuf->pData);
+
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ }
+
+ tscSortRemoveDataBlockDupRows(pOneTableBlock);
+ char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
+
+ tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
+ pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
+
+ int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+
+ pBlocks->tid = htonl(pBlocks->tid);
+ pBlocks->uid = htobe64(pBlocks->uid);
+ pBlocks->sversion = htonl(pBlocks->sversion);
+ pBlocks->numOfRows = htons(pBlocks->numOfRows);
+ pBlocks->schemaLen = 0;
+
+ // erase the empty space reserved for binary data
+ int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
+ assert(finalLen <= len);
+
+ dataBuf->size += (finalLen + sizeof(SSubmitBlk));
+ assert(dataBuf->size <= dataBuf->nAllocSize);
+
+ // the length does not include the SSubmitBlk structure
+ pBlocks->dataLen = htonl(finalLen);
+ dataBuf->numOfTables += 1;
+
+ pBlocks->numOfRows = 0;
+ }else {
+ tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname);
}
-
- tscSortRemoveDataBlockDupRows(pOneTableBlock);
- char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
-
- tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
- pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
-
- int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
-
- pBlocks->tid = htonl(pBlocks->tid);
- pBlocks->uid = htobe64(pBlocks->uid);
- pBlocks->sversion = htonl(pBlocks->sversion);
- pBlocks->numOfRows = htons(pBlocks->numOfRows);
- pBlocks->schemaLen = 0;
-
- // erase the empty space reserved for binary data
- int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
- assert(finalLen <= len);
-
- dataBuf->size += (finalLen + sizeof(SSubmitBlk));
- assert(dataBuf->size <= dataBuf->nAllocSize);
-
- // the length does not include the SSubmitBlk structure
- pBlocks->dataLen = htonl(finalLen);
- dataBuf->numOfTables += 1;
-
+
p = taosHashIterate(pCmd->pTableBlockHashList, p);
if (p == NULL) {
break;
@@ -1437,7 +1444,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
return pInfo->pExpr->base.offset;
}
-int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
+int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
@@ -1449,15 +1456,36 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
if (pField1->type != pField2->type ||
- pField1->bytes != pField2->bytes ||
strcasecmp(pField1->name, pField2->name) != 0) {
return 1;
}
+
+ if (pField1->bytes != pField2->bytes) {
+ *diffSize = 1;
+
+ if (pField2->bytes > pField1->bytes) {
+ pField1->bytes = pField2->bytes;
+ }
+ }
}
return 0;
}
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
+ assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
+
+ for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) {
+ TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i);
+ TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
+
+ pField2->bytes = pField1->bytes;
+ }
+
+ return 0;
+}
+
+
int32_t tscGetResRowLength(SArray* pExprList) {
size_t num = taosArrayGetSize(pExprList);
if (num == 0) {
@@ -2431,6 +2459,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pTableMeta = pTableMeta;
+ if (pTableMetaInfo->pTableMeta == NULL) {
+ pTableMetaInfo->tableMetaSize = 0;
+ } else {
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
+ }
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
@@ -2706,6 +2739,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
+
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
@@ -3100,7 +3134,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
+
+
+ // DON't free final since it may be recoreded and used later in APP
+ TAOS_FIELD* finalBk = pRes->final;
+ pRes->final = NULL;
tscFreeSqlResult(pSql);
+ pRes->final = finalBk;
pRes->numOfTotal = num;
@@ -3333,11 +3373,11 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
-int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
- assert(pChild != NULL);
+int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
+ assert(pChild != NULL && buf != NULL);
- uint32_t size = tscGetTableMetaMaxSize();
- STableMeta* p = calloc(1, size);
+// uint32_t size = tscGetTableMetaMaxSize();
+ STableMeta* p = buf;//calloc(1, size);
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
@@ -3349,12 +3389,12 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
- tfree(p);
+// tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
- tfree(p);
+// tfree(p);
return -1;
}
}
diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h
index be16e80124..adf210cfeb 100644
--- a/src/common/inc/tcmdtype.h
+++ b/src/common/inc/tcmdtype.h
@@ -80,6 +80,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index e8c0760997..88d5b85010 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -15,10 +15,7 @@
#ifndef _TD_DATA_FORMAT_H_
#define _TD_DATA_FORMAT_H_
-#include
-#include
-#include
-
+#include "os.h"
#include "talgo.h"
#include "ttype.h"
#include "tutil.h"
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 26475834d5..2f4aa4c2b2 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -44,6 +44,7 @@ extern int32_t tsDnodeId;
// common
extern int tsRpcTimer;
extern int tsRpcMaxTime;
+extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 1524f15b7d..db97c3a5af 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -48,6 +48,7 @@ int32_t tsDnodeId = 0;
// common
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
+int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second
@@ -625,6 +626,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_MS;
taosInitConfigOption(cfg);
+ cfg.option = "rpcForceTcp";
+ cfg.ptr = &tsRpcForceTcp;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "rpcMaxTime";
cfg.ptr = &tsRpcMaxTime;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -921,7 +932,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
- cfg.maxValue = 10000000000.0f;
+ cfg.maxValue = 100000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
index 4b5b88d93b..f8ea9af423 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java
@@ -84,10 +84,12 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
+ @Deprecated
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
-
+ }
+
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -171,6 +173,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
+ @Deprecated
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
index c8ab9fb15a..02fee74eb5 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
@@ -49,7 +49,7 @@ public class TSDBConnection extends AbstractConnection {
this.databaseMetaData.setConnection(this);
}
- public TSDBJNIConnector getConnection() {
+ public TSDBJNIConnector getConnector() {
return this.connector;
}
@@ -58,7 +58,7 @@ public class TSDBConnection extends AbstractConnection {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
- return new TSDBStatement(this, this.connector);
+ return new TSDBStatement(this);
}
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
@@ -74,14 +74,18 @@ public class TSDBConnection extends AbstractConnection {
}
public PreparedStatement prepareStatement(String sql) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
- return new TSDBPreparedStatement(this, this.connector, sql);
+ }
+
+ return new TSDBPreparedStatement(this, sql);
}
public void close() throws SQLException {
- if (isClosed)
+ if (isClosed) {
return;
+ }
+
this.connector.closeConnection();
this.isClosed = true;
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index bbd8519a03..55533bd28c 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -104,7 +104,7 @@ public class TSDBDriver extends AbstractDriver {
static {
try {
- java.sql.DriverManager.registerDriver(new TSDBDriver());
+ DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index 5e3ffffa4f..2111ab2743 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -18,6 +18,7 @@ package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TaosInfo;
+import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
@@ -29,10 +30,13 @@ public class TSDBJNIConnector {
private static volatile Boolean isInitialized = false;
private TaosInfo taosInfo = TaosInfo.getInstance();
+
// Connection pointer used in C
private long taos = TSDBConstants.JNI_NULL_POINTER;
+
// result set status in current connection
private boolean isResultsetClosed = true;
+
private int affectedRows = -1;
static {
@@ -75,7 +79,6 @@ public class TSDBJNIConnector {
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
-// this.closeConnectionImp(this.taos);
closeConnection();
this.taos = TSDBConstants.JNI_NULL_POINTER;
}
@@ -97,12 +100,6 @@ public class TSDBJNIConnector {
* @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
- // close previous result set if the user forgets to invoke the
- // free method to close previous result set.
-// if (!this.isResultsetClosed) {
-// freeResultSet(taosResultSetPointer);
-// }
-
Long pSql = 0l;
try {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
@@ -169,37 +166,14 @@ public class TSDBJNIConnector {
private native long isUpdateQueryImp(long connection, long pSql);
/**
- * Free resultset operation from C to release resultset pointer by JNI
+ * Free result set operation from C to release result set pointer by JNI
*/
public int freeResultSet(long pSql) {
- int res = TSDBConstants.JNI_SUCCESS;
-// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
-// throw new RuntimeException("Invalid result set pointer");
-// }
-
-// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
- res = this.freeResultSetImp(this.taos, pSql);
-// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
-// }
-
+ int res = this.freeResultSetImp(this.taos, pSql);
isResultsetClosed = true;
return res;
}
- /**
- * Close the open result set which is associated to the current connection. If the result set is already
- * closed, return 0 for success.
- */
-// public int freeResultSet() {
-// int resCode = TSDBConstants.JNI_SUCCESS;
-// if (!isResultsetClosed) {
-// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer);
-// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
-// isResultsetClosed = true;
-// }
-// return resCode;
-// }
-
private native int freeResultSetImp(long connection, long result);
/**
@@ -246,6 +220,7 @@ public class TSDBJNIConnector {
*/
public void closeConnection() throws SQLException {
int code = this.closeConnectionImp(this.taos);
+
if (code < 0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
} else if (code == 0) {
@@ -253,6 +228,7 @@ public class TSDBJNIConnector {
} else {
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
}
+
// invoke closeConnectionImpl only here
taosInfo.connect_close_increment();
}
@@ -289,7 +265,7 @@ public class TSDBJNIConnector {
private native void unsubscribeImp(long subscription, boolean isKeep);
/**
- * Validate if a create table sql statement is correct without actually creating that table
+ * Validate if a create table SQL statement is correct without actually creating that table
*/
public boolean validateCreateTableSql(String sql) {
int res = validateCreateTableSqlImp(taos, sql.getBytes());
@@ -297,4 +273,66 @@ public class TSDBJNIConnector {
}
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
+
+ public long prepareStmt(String sql) throws SQLException {
+ Long stmt = 0L;
+ try {
+ stmt = prepareStmtImp(sql.getBytes(), this.taos);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
+ }
+
+ if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ }
+
+ if (stmt == TSDBConstants.JNI_SQL_NULL) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
+ }
+
+ if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
+ }
+
+ return stmt;
+ }
+
+ private native long prepareStmtImp(byte[] sql, long con);
+
+ public void setBindTableName(long stmt, String tableName) throws SQLException {
+ int code = setBindTableNameImp(stmt, tableName, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name");
+ }
+ }
+
+ private native int setBindTableNameImp(long stmt, String name, long conn);
+
+ public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException {
+ int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data");
+ }
+ }
+
+ private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn);
+
+ public void executeBatch(long stmt) throws SQLException {
+ int code = executeBatchImp(stmt, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind");
+ }
+ }
+
+ private native int executeBatchImp(long stmt, long con);
+
+ public void closeBatch(long stmt) throws SQLException {
+ int code = closeStmt(stmt, this.taos);
+ if (code != TSDBConstants.JNI_SUCCESS) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind");
+ }
+ }
+
+ private native int closeStmt(long stmt, long con);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
index e545bbc8f2..71e07252a3 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
@@ -14,36 +14,44 @@
*****************************************************************************/
package com.taosdata.jdbc;
+import com.taosdata.jdbc.utils.Utils;
+
import java.io.InputStream;
import java.io.Reader;
+import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.net.URL;
-import java.nio.charset.Charset;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
+ * TDengine only supports a subset of the standard SQL, thus this implementation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
-
private String rawSql;
private Object[] parameters;
private boolean isPrepared;
-
+
+ private ArrayList colData;
+ private String tableName;
+ private long nativeStmtHandle = 0;
+
private volatile TSDBParameterMetaData parameterMetaData;
- TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
- super(connection, connecter);
+ TSDBPreparedStatement(TSDBConnection connection, String sql) {
+ super(connection);
init(sql);
+ int parameterCnt = 0;
if (sql.contains("?")) {
- int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
if ('?' == sql.charAt(i)) {
parameterCnt++;
@@ -52,6 +60,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
parameters = new Object[parameterCnt];
this.isPrepared = true;
}
+
+ if (parameterCnt > 1) {
+ // the table name is also a parameter, so ignore it.
+ this.colData = new ArrayList(parameterCnt - 1);
+ this.colData.addAll(Collections.nCopies(parameterCnt - 1, null));
+ }
}
private void init(String sql) {
@@ -126,28 +140,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
* @return a string of the native sql statement for TSDB
*/
private String getNativeSql(String rawSql) throws SQLException {
- String sql = rawSql;
- for (int i = 0; i < parameters.length; ++i) {
- Object para = parameters[i];
- if (para != null) {
- String paraStr;
- if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
- } else {
- paraStr = para.toString();
- }
- // if para is timestamp or String or byte[] need to translate ' character
- if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
- paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
- paraStr = "'" + paraStr + "'";
- }
- sql = sql.replaceFirst("[?]", paraStr);
- } else {
- sql = sql.replaceFirst("[?]", "NULL");
- }
- }
- clearParameters();
- return sql;
+ return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@@ -275,15 +268,19 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- setObject(parameterIndex,x);
+ setObject(parameterIndex, x);
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (parameterIndex < 1 && parameterIndex >= parameters.length)
+ }
+
+ if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
+ }
+
parameters[parameterIndex - 1] = x;
}
@@ -320,9 +317,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-
+ }
+
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -535,4 +533,276 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
+
+ ///////////////////////////////////////////////////////////////////////
+ // NOTE: the following APIs are not JDBC compatible
+ // set the bind table name
+ private static class ColumnInfo {
+ @SuppressWarnings("rawtypes")
+ private ArrayList data;
+ private int type;
+ private int bytes;
+ private boolean typeIsSet;
+
+ public ColumnInfo() {
+ this.typeIsSet = false;
+ }
+
+ public void setType(int type) throws SQLException {
+ if (this.isTypeSet()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type has been set");
+ }
+
+ this.typeIsSet = true;
+ this.type = type;
+ }
+
+ public boolean isTypeSet() {
+ return this.typeIsSet;
+ }
+ };
+
+ public void setTableName(String name) {
+ this.tableName = name;
+ }
+
+ public void setValueImpl(int columnIndex, ArrayList list, int type, int bytes) throws SQLException {
+ ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex);
+ if (col == null) {
+ ColumnInfo p = new ColumnInfo();
+ p.setType(type);
+ p.bytes = bytes;
+ p.data = (ArrayList>) list.clone();
+ this.colData.set(columnIndex, p);
+ } else {
+ if (col.type != type) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch");
+ }
+ col.data.addAll(list);
+ }
+ }
+
+ public void setInt(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT, Integer.BYTES);
+ }
+
+ public void setFloat(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT, Float.BYTES);
+ }
+
+ public void setTimestamp(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP, Long.BYTES);
+ }
+
+ public void setLong(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT, Long.BYTES);
+ }
+
+ public void setDouble(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE, Double.BYTES);
+ }
+
+ public void setBoolean(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL, Byte.BYTES);
+ }
+
+ public void setByte(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT, Byte.BYTES);
+ }
+
+ public void setShort(int columnIndex, ArrayList list) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT, Short.BYTES);
+ }
+
+ public void setString(int columnIndex, ArrayList list, int size) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY, size);
+ }
+
+ // note: expand the required space for each NChar character
+ public void setNString(int columnIndex, ArrayList list, int size) throws SQLException {
+ setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES);
+ }
+
+ public void columnDataAddBatch() throws SQLException {
+ // pass the data block to native code
+ if (rawSql == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "sql statement not set yet");
+ }
+
+ // table name is not set yet, abort
+ if (this.tableName == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet");
+ }
+
+ int numOfCols = this.colData.size();
+ if (numOfCols == 0) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ this.nativeStmtHandle = connector.prepareStmt(rawSql);
+ connector.setBindTableName(this.nativeStmtHandle, this.tableName);
+
+ ColumnInfo colInfo = (ColumnInfo) this.colData.get(0);
+ if (colInfo == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ int rows = colInfo.data.size();
+ for (int i = 0; i < numOfCols; ++i) {
+ ColumnInfo col1 = this.colData.get(i);
+ if (col1 == null || !col1.isTypeSet()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
+ }
+
+ if (rows != col1.data.size()) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical");
+ }
+
+ ByteBuffer colDataList = ByteBuffer.allocate(rows * col1.bytes);
+ colDataList.order(ByteOrder.LITTLE_ENDIAN);
+
+ ByteBuffer lengthList = ByteBuffer.allocate(rows * Integer.BYTES);
+ lengthList.order(ByteOrder.LITTLE_ENDIAN);
+
+ ByteBuffer isNullList = ByteBuffer.allocate(rows * Byte.BYTES);
+ isNullList.order(ByteOrder.LITTLE_ENDIAN);
+
+ switch (col1.type) {
+ case TSDBConstants.TSDB_DATA_TYPE_INT: {
+ for (int j = 0; j < rows; ++j) {
+ Integer val = (Integer) col1.data.get(j);
+ colDataList.putInt(val == null? Integer.MIN_VALUE:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
+ for (int j = 0; j < rows; ++j) {
+ Byte val = (Byte) col1.data.get(j);
+ colDataList.put(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
+ for (int j = 0; j < rows; ++j) {
+ Boolean val = (Boolean) col1.data.get(j);
+ if (val == null) {
+ colDataList.put((byte) 0);
+ } else {
+ colDataList.put((byte) (val? 1:0));
+ }
+
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
+ for (int j = 0; j < rows; ++j) {
+ Short val = (Short) col1.data.get(j);
+ colDataList.putShort(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
+ for (int j = 0; j < rows; ++j) {
+ Long val = (Long) col1.data.get(j);
+ colDataList.putLong(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
+ for (int j = 0; j < rows; ++j) {
+ Float val = (Float) col1.data.get(j);
+ colDataList.putFloat(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
+ for (int j = 0; j < rows; ++j) {
+ Double val = (Double) col1.data.get(j);
+ colDataList.putDouble(val == null? 0:val);
+ isNullList.put((byte) (val == null? 1:0));
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
+ case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
+ String charset = TaosGlobalConfig.getCharset();
+ for (int j = 0; j < rows; ++j) {
+ String val = (String) col1.data.get(j);
+
+ colDataList.position(j * col1.bytes); // seek to the correct position
+ if (val != null) {
+ byte[] b = null;
+ try {
+ if (col1.type == TSDBConstants.TSDB_DATA_TYPE_BINARY) {
+ b = val.getBytes();
+ } else {
+ b = val.getBytes(charset);
+ }
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+
+ if (val.length() > col1.bytes) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long");
+ }
+
+ colDataList.put(b);
+ lengthList.putInt(b.length);
+ isNullList.put((byte) 0);
+ } else {
+ lengthList.putInt(0);
+ isNullList.put((byte) 1);
+ }
+ }
+ break;
+ }
+
+ case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
+ case TSDBConstants.TSDB_DATA_TYPE_USMALLINT:
+ case TSDBConstants.TSDB_DATA_TYPE_UINT:
+ case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types");
+ }
+ };
+
+ connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i);
+ }
+ }
+
+ public void columnDataExecuteBatch() throws SQLException {
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ connector.executeBatch(this.nativeStmtHandle);
+ this.columnDataClearBatch();
+ }
+
+ public void columnDataClearBatch() {
+ int size = this.colData.size();
+ this.colData.clear();
+
+ this.colData.addAll(Collections.nCopies(size, null));
+ this.tableName = null; // clear the table name
+ }
+
+ public void columnDataCloseBatch() throws SQLException {
+ TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
+ connector.closeBatch(this.nativeStmtHandle);
+
+ this.nativeStmtHandle = 0L;
+ this.tableName = null;
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
index ce5290de66..7b3be5d263 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import com.taosdata.jdbc.utils.NullType;
+
public class TSDBResultSetBlockData {
private int numOfRows = 0;
private int rowIndex = 0;
@@ -164,59 +166,7 @@ public class TSDBResultSetBlockData {
}
}
- private static class NullType {
- private static final byte NULL_BOOL_VAL = 0x2;
- private static final String NULL_STR = "null";
-
- public String toString() {
- return NullType.NULL_STR;
- }
-
- public static boolean isBooleanNull(byte val) {
- return val == NullType.NULL_BOOL_VAL;
- }
-
- private static boolean isTinyIntNull(byte val) {
- return val == Byte.MIN_VALUE;
- }
-
- private static boolean isSmallIntNull(short val) {
- return val == Short.MIN_VALUE;
- }
-
- private static boolean isIntNull(int val) {
- return val == Integer.MIN_VALUE;
- }
-
- private static boolean isBigIntNull(long val) {
- return val == Long.MIN_VALUE;
- }
-
- private static boolean isFloatNull(float val) {
- return Float.isNaN(val);
- }
-
- private static boolean isDoubleNull(double val) {
- return Double.isNaN(val);
- }
-
- private static boolean isBinaryNull(byte[] val, int length) {
- if (length != Byte.BYTES) {
- return false;
- }
-
- return val[0] == 0xFF;
- }
-
- private static boolean isNcharNull(byte[] val, int length) {
- if (length != Integer.BYTES) {
- return false;
- }
-
- return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
- }
-
- }
+
/**
* The original type may not be a string type, but will be converted to by
@@ -488,8 +438,8 @@ public class TSDBResultSetBlockData {
}
try {
- String ss = TaosGlobalConfig.getCharset();
- return new String(dest, ss);
+ String charset = TaosGlobalConfig.getCharset();
+ return new String(dest, charset);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
index 34470fbc4e..618e896a6d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
@@ -84,7 +84,8 @@ public class TSDBResultSetRowData {
data.set(col, value);
}
- public int getInt(int col, int srcType) throws SQLException {
+ @SuppressWarnings("deprecation")
+ public int getInt(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch (srcType) {
@@ -128,7 +129,7 @@ public class TSDBResultSetRowData {
long value = (long) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
- return new Long(value).intValue();
+ return Long.valueOf(value).intValue();
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
index fb20a621b0..d8ba67576d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
@@ -19,8 +19,6 @@ import java.sql.ResultSet;
import java.sql.SQLException;
public class TSDBStatement extends AbstractStatement {
-
- private TSDBJNIConnector connector;
/**
* Status of current statement
*/
@@ -29,29 +27,26 @@ public class TSDBStatement extends AbstractStatement {
private TSDBConnection connection;
private TSDBResultSet resultSet;
- public void setConnection(TSDBConnection connection) {
+ TSDBStatement(TSDBConnection connection) {
this.connection = connection;
}
- TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
- this.connection = connection;
- this.connector = connector;
- }
-
public ResultSet executeQuery(String sql) throws SQLException {
// check if closed
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
+ }
+
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
// execute query
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (this.connector.isUpdateQuery(pSql)) {
- this.connector.freeResultSet(pSql);
+ if (this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY);
}
- TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql);
+ TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql);
res.setBatchFetch(this.connection.getBatchFetch());
return res;
}
@@ -60,14 +55,14 @@ public class TSDBStatement extends AbstractStatement {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (!this.connector.isUpdateQuery(pSql)) {
- this.connector.freeResultSet(pSql);
+ if (!this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE);
}
- int affectedRows = this.connector.getAffectedRows(pSql);
- this.connector.freeResultSet(pSql);
+ int affectedRows = this.connection.getConnector().getAffectedRows(pSql);
+ this.connection.getConnector().freeResultSet(pSql);
return affectedRows;
}
@@ -81,30 +76,29 @@ public class TSDBStatement extends AbstractStatement {
public boolean execute(String sql) throws SQLException {
// check if closed
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
+ }
+
// execute query
- long pSql = this.connector.executeQuery(sql);
+ long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
- if (this.connector.isUpdateQuery(pSql)) {
- this.affectedRows = this.connector.getAffectedRows(pSql);
- this.connector.freeResultSet(pSql);
+ if (this.connection.getConnector().isUpdateQuery(pSql)) {
+ this.affectedRows = this.connection.getConnector().getAffectedRows(pSql);
+ this.connection.getConnector().freeResultSet(pSql);
return false;
}
- this.resultSet = new TSDBResultSet(this, this.connector, pSql);
+ this.resultSet = new TSDBResultSet(this, this.connection.getConnector(), pSql);
this.resultSet.setBatchFetch(this.connection.getBatchFetch());
return true;
}
public ResultSet getResultSet() throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-// long resultSetPointer = connector.getResultSet();
-// TSDBResultSet resSet = null;
-// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
-// resSet = new TSDBResultSet(connector, resultSetPointer);
-// }
+ }
+
return this.resultSet;
}
@@ -115,12 +109,20 @@ public class TSDBStatement extends AbstractStatement {
}
public Connection getConnection() throws SQLException {
- if (isClosed())
+ if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (this.connector == null)
+ }
+
+ if (this.connection.getConnector() == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ }
+
return this.connection;
}
+
+ public void setConnection(TSDBConnection connection) {
+ this.connection = connection;
+ }
public boolean isClosed() throws SQLException {
return isClosed;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
index 6efe13561d..a94cfa6e07 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
@@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver {
static {
try {
- java.sql.DriverManager.registerDriver(new RestfulDriver());
+ DriverManager.registerDriver(new RestfulDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
index f82955ca9d..f58e3f8cd2 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
@@ -2,12 +2,12 @@ package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
+import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
-import java.nio.charset.Charset;
import java.sql.*;
import java.util.Calendar;
@@ -21,6 +21,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) {
super(conn, database);
this.rawSql = sql;
+
if (sql.contains("?")) {
int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
@@ -58,29 +59,14 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
return executeUpdate(sql);
}
- private String getNativeSql(String rawSql) throws SQLException {
- String sql = rawSql;
- for (int i = 0; i < parameters.length; ++i) {
- Object para = parameters[i];
- if (para != null) {
- String paraStr;
- if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
- } else {
- paraStr = para.toString();
- }
- // if para is timestamp or String or byte[] need to translate ' character
- if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
- paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
- paraStr = "'" + paraStr + "'";
- }
- sql = sql.replaceFirst("[?]", paraStr);
- } else {
- sql = sql.replaceFirst("[?]", "NULL");
- }
- }
- clearParameters();
- return sql;
+ /****
+ * 将rawSql转换成一条可执行的sql语句,使用属性parameters中的变脸进行替换
+ * 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?)
+ * @param rawSql,可能是insert、select或其他,使用?做占位符
+ * @return
+ */
+ private String getNativeSql(String rawSql) {
+ return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@@ -220,8 +206,8 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
-
- setObject(parameterIndex,x);
+
+ setObject(parameterIndex, x);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index e9cc3a009f..fbc3a50a27 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -136,21 +136,21 @@ public class RestfulStatement extends AbstractStatement {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
}
this.resultSet = null;
- this.affectedRows = checkJsonResultSet(jsonObject);
+ this.affectedRows = getAffectedRows(jsonObject);
return this.affectedRows;
}
- private int checkJsonResultSet(JSONObject jsonObject) {
+ private int getAffectedRows(JSONObject jsonObject) throws SQLException {
// create ... SQLs should return 0 , and Restful result is this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head");
+ if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
JSONArray data = jsonObject.getJSONArray("data");
- int rows = Integer.parseInt(jsonObject.getString("rows"));
- if (head.size() == 1 && "affected_rows".equals(head.getString(0))
- && data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) {
- return 0;
- }
- return rows;
+ if (data != null)
+ return data.getJSONArray(0).getInteger(0);
+
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java
new file mode 100755
index 0000000000..0e05aeeee7
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java
@@ -0,0 +1,91 @@
+package com.taosdata.jdbc.utils;
+
+public class NullType {
+ private static final byte NULL_BOOL_VAL = 0x2;
+ private static final String NULL_STR = "null";
+
+ public String toString() {
+ return NullType.NULL_STR;
+ }
+
+ public static boolean isBooleanNull(byte val) {
+ return val == NullType.NULL_BOOL_VAL;
+ }
+
+ public static boolean isTinyIntNull(byte val) {
+ return val == Byte.MIN_VALUE;
+ }
+
+ public static boolean isSmallIntNull(short val) {
+ return val == Short.MIN_VALUE;
+ }
+
+ public static boolean isIntNull(int val) {
+ return val == Integer.MIN_VALUE;
+ }
+
+ public static boolean isBigIntNull(long val) {
+ return val == Long.MIN_VALUE;
+ }
+
+ public static boolean isFloatNull(float val) {
+ return Float.isNaN(val);
+ }
+
+ public static boolean isDoubleNull(double val) {
+ return Double.isNaN(val);
+ }
+
+ public static boolean isBinaryNull(byte[] val, int length) {
+ if (length != Byte.BYTES) {
+ return false;
+ }
+
+ return val[0] == 0xFF;
+ }
+
+ public static boolean isNcharNull(byte[] val, int length) {
+ if (length != Integer.BYTES) {
+ return false;
+ }
+
+ return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
+ }
+
+ public static byte getBooleanNull() {
+ return NullType.NULL_BOOL_VAL;
+ }
+
+ public static byte getTinyintNull() {
+ return Byte.MIN_VALUE;
+ }
+
+ public static int getIntNull() {
+ return Integer.MIN_VALUE;
+ }
+
+ public static short getSmallIntNull() {
+ return Short.MIN_VALUE;
+ }
+
+ public static long getBigIntNull() {
+ return Long.MIN_VALUE;
+ }
+
+ public static int getFloatNull() {
+ return 0x7FF00000;
+ }
+
+ public static long getDoubleNull() {
+ return 0x7FFFFF0000000000L;
+ }
+
+ public static byte getBinaryNull() {
+ return (byte) 0xFF;
+ }
+
+ public static byte[] getNcharNull() {
+ return new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
+ }
+
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
new file mode 100644
index 0000000000..052f34858d
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
@@ -0,0 +1,136 @@
+package com.taosdata.jdbc.utils;
+
+import com.google.common.collect.Range;
+import com.google.common.collect.RangeSet;
+import com.google.common.collect.TreeRangeSet;
+
+import java.nio.charset.Charset;
+import java.sql.Timestamp;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class Utils {
+
+ private static Pattern ptn = Pattern.compile(".*?'");
+
+ public static String escapeSingleQuota(String origin) {
+ Matcher m = ptn.matcher(origin);
+ StringBuffer sb = new StringBuffer();
+ int end = 0;
+ while (m.find()) {
+ end = m.end();
+ String seg = origin.substring(m.start(), end);
+ int len = seg.length();
+ if (len == 1) {
+ if ('\'' == seg.charAt(0)) {
+ sb.append("\\'");
+ } else {
+ sb.append(seg);
+ }
+ } else { // len > 1
+ sb.append(seg.substring(0, seg.length() - 2));
+ char lastcSec = seg.charAt(seg.length() - 2);
+ if (lastcSec == '\\') {
+ sb.append("\\'");
+ } else {
+ sb.append(lastcSec);
+ sb.append("\\'");
+ }
+ }
+ }
+
+ if (end < origin.length()) {
+ sb.append(origin.substring(end));
+ }
+ return sb.toString();
+ }
+
+ public static String getNativeSql(String rawSql, Object[] parameters) {
+ // toLowerCase
+ String preparedSql = rawSql.trim().toLowerCase();
+
+ String[] clause = new String[0];
+ if (SqlSyntaxValidator.isInsertSql(preparedSql)) {
+ // insert or import
+ clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"};
+ }
+ if (SqlSyntaxValidator.isSelectSql(preparedSql)) {
+ // select
+ clause = new String[]{"where\\s*.*"};
+ }
+ Map placeholderPositions = new HashMap<>();
+ RangeSet clauseRangeSet = TreeRangeSet.create();
+ findPlaceholderPosition(preparedSql, placeholderPositions);
+ findClauseRangeSet(preparedSql, clause, clauseRangeSet);
+
+ return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
+ }
+
+ private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet clauseRangeSet) {
+ clauseRangeSet.clear();
+ for (String regex : regexArr) {
+ Matcher matcher = Pattern.compile(regex).matcher(preparedSql);
+ while (matcher.find()) {
+ int start = matcher.start();
+ int end = matcher.end();
+ clauseRangeSet.add(Range.closed(start, end));
+ }
+ }
+ }
+
+ private static void findPlaceholderPosition(String preparedSql, Map placeholderPosition) {
+ placeholderPosition.clear();
+ Matcher matcher = Pattern.compile("\\?").matcher(preparedSql);
+ int index = 0;
+ while (matcher.find()) {
+ int pos = matcher.start();
+ placeholderPosition.put(index, pos);
+ index++;
+ }
+ }
+
+ /***
+ *
+ * @param rawSql
+ * @param paramArr
+ * @param placeholderPosition
+ * @param clauseRangeSet
+ * @return
+ */
+ private static String transformSql(String rawSql, Object[] paramArr, Map placeholderPosition, RangeSet clauseRangeSet) {
+ String[] sqlArr = rawSql.split("\\?");
+
+ return IntStream.range(0, sqlArr.length).mapToObj(index -> {
+ if (index == paramArr.length)
+ return sqlArr[index];
+
+ Object para = paramArr[index];
+ String paraStr;
+ if (para != null) {
+ if (para instanceof byte[]) {
+ paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
+ } else {
+ paraStr = para.toString();
+ }
+ // if para is timestamp or String or byte[] need to translate ' character
+ if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
+ paraStr = Utils.escapeSingleQuota(paraStr);
+
+ Integer pos = placeholderPosition.get(index);
+ boolean contains = clauseRangeSet.contains(pos);
+ if (contains) {
+ paraStr = "'" + paraStr + "'";
+ }
+ }
+ } else {
+ paraStr = "NULL";
+ }
+ return sqlArr[index] + paraStr;
+ }).collect(Collectors.joining());
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index 11c3de3052..3a223ed981 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -12,6 +12,7 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeTest {
+
Connection connection;
Statement statement;
String dbName = "test";
@@ -19,62 +20,53 @@ public class SubscribeTest {
String host = "127.0.0.1";
String topic = "test";
- @Before
- public void createDatabase() {
- try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- Properties properties = new Properties();
- properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
-
- statement = connection.createStatement();
- statement.execute("drop database if exists " + dbName);
- statement.execute("create database if not exists " + dbName);
- statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
- long ts = System.currentTimeMillis();
- for (int i = 0; i < 2; i++) {
- ts += i;
- String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
- statement.executeUpdate(sql);
- }
-
- } catch (ClassNotFoundException | SQLException e) {
- return;
- }
- }
-
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
- System.out.println(rawSql);
-// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
+ TSDBConnection conn = connection.unwrap(TSDBConnection.class);
+ TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
-// int a = 0;
-// while (true) {
-// TimeUnit.MILLISECONDS.sleep(1000);
-// TSDBResultSet resSet = subscribe.consume();
-// while (resSet.next()) {
-// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
-// System.out.printf(i + ": " + resSet.getString(i) + "\t");
-// }
-// System.out.println("\n======" + a + "==========");
-// }
-// a++;
-// if (a >= 2) {
-// break;
-// }
-// resSet.close();
-// }
-//
-// subscribe.close(true);
+ int a = 0;
+ while (true) {
+ TimeUnit.MILLISECONDS.sleep(1000);
+ TSDBResultSet resSet = subscribe.consume();
+ while (resSet.next()) {
+ for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
+ System.out.printf(i + ": " + resSet.getString(i) + "\t");
+ }
+ System.out.println("\n======" + a + "==========");
+ }
+ a++;
+ if (a >= 2) {
+ break;
+ }
+ resSet.close();
+ }
+
+ subscribe.close(true);
} catch (Exception e) {
e.printStackTrace();
}
}
+ @Before
+ public void createDatabase() throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
+
+ statement = connection.createStatement();
+ statement.execute("drop database if exists " + dbName);
+ statement.execute("create database if not exists " + dbName);
+ statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
+ long ts = System.currentTimeMillis();
+ statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
+ statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
+ }
+
@After
public void close() {
try {
@@ -86,6 +78,5 @@ public class SubscribeTest {
} catch (SQLException e) {
e.printStackTrace();
}
-
}
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
index c5c6f7bca5..f304fd6874 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java
@@ -3,7 +3,6 @@ package com.taosdata.jdbc;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
-import com.taosdata.jdbc.rs.RestfulResultSet;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -177,7 +176,8 @@ public class TSDBResultSetTest {
rs.getAsciiStream("f1");
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @SuppressWarnings("deprecation")
+ @Test(expected = SQLFeatureNotSupportedException.class)
public void getUnicodeStream() throws SQLException {
rs.getUnicodeStream("f1");
}
@@ -326,7 +326,7 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void getRow() throws SQLException {
- int row = rs.getRow();
+ rs.getRow();
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -405,12 +405,12 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateByte() throws SQLException {
- rs.updateByte(1, new Byte("0"));
+ rs.updateByte(1, (byte) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateShort() throws SQLException {
- rs.updateShort(1, new Short("0"));
+ rs.updateShort(1, (short) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
new file mode 100644
index 0000000000..efc83a6df1
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
@@ -0,0 +1,401 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class InsertSpecialCharacterJniTest {
+
+ private static final String host = "127.0.0.1";
+ private static Connection conn;
+ private static String dbName = "spec_char_test";
+ private static String tbname1 = "test";
+ private static String tbname2 = "weather";
+ private static String special_character_str_1 = "$asd$$fsfsf$";
+ private static String special_character_str_2 = "\\asdfsfsf\\\\";
+ private static String special_character_str_3 = "\\\\asdfsfsf\\";
+ private static String special_character_str_4 = "?asd??fsf?sf?";
+ private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
+
+ @Test
+ public void testCase01() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_1.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from ?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, tbname1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_1, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+
+ @Test
+ public void testCase02() throws SQLException {
+ //TODO:
+ // Expected :\asdfsfsf\\
+ // Actual :\asdfsfsf\
+
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_2.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ //TODO: bug to be fixed
+// Assert.assertEquals(special_character_str_2, f1);
+ Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase03() throws SQLException {
+ //TODO:
+ // TDengine ERROR (216): Syntax error in SQL
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_3.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_3, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase04() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase05() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase06() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_4);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query t1
+ final String query = "select * from t1";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase07() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ pstmt.setString(3, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase08() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_5);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase09() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ // query t1
+ String query = "select * from t?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t2";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test
+ public void testCase10() throws SQLException {
+ final long now = System.currentTimeMillis();
+
+ // insert
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ //query t1
+ String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(4, new Timestamp(0));
+ pstmt.setString(5, "f1");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ byte[] f2 = rs.getBytes(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 2);
+ pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(3, new Timestamp(0));
+ pstmt.setString(4, "f2");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase11() throws SQLException {
+ final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
+ final long now = System.currentTimeMillis();
+
+ final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setTimestamp(2, new Timestamp(now));
+ pstmt.setBytes(3, speicalCharacterStr.getBytes());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+
+ @Test
+ public void testCase12() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setString(2, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals("HelloTDengine", f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Before
+ public void before() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tbname1 + "");
+ stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
+ stmt.execute("drop table if exists " + tbname2);
+ stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (conn != null)
+ conn.close();
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
new file mode 100644
index 0000000000..0cbbe76716
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
@@ -0,0 +1,401 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class InsertSpecialCharacterRestfulTest {
+
+ private static final String host = "127.0.0.1";
+ // private static final String host = "master";
+ private static Connection conn;
+ private static String dbName = "spec_char_test";
+ private static String tbname1 = "test";
+ private static String tbname2 = "weather";
+ private static String special_character_str_1 = "$asd$$fsfsf$";
+ private static String special_character_str_2 = "\\asdfsfsf\\\\";
+ private static String special_character_str_3 = "\\\\asdfsfsf\\";
+ private static String special_character_str_4 = "?asd??fsf?sf?";
+ private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
+
+ @Test
+ public void testCase01() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_1.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from ?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, tbname1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_1, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+
+ @Test
+ public void testCase02() throws SQLException {
+ //TODO:
+ // Expected :\asdfsfsf\\
+ // Actual :\asdfsfsf\
+
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_2.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ //TODO: bug to be fixed
+// Assert.assertEquals(special_character_str_2, f1);
+ Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase03() throws SQLException {
+ //TODO:
+ // TDengine ERROR (216): Syntax error in SQL
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_3.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_3, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase04() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase05() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase06() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_4);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_4.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query t1
+ final String query = "select * from t1";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ }
+
+ @Test
+ public void testCase07() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setBytes(2, special_character_str_4.getBytes());
+ pstmt.setString(3, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_4, f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase08() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, special_character_str_5);
+ pstmt.setTimestamp(3, new Timestamp(now));
+ pstmt.setBytes(4, special_character_str_5.getBytes());
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase09() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ // query t1
+ String query = "select * from t?";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 1);
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ String f2 = rs.getString(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t2";
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test
+ public void testCase10() throws SQLException {
+ final long now = System.currentTimeMillis();
+
+ // insert
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+ //query t1
+ String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(4, new Timestamp(0));
+ pstmt.setString(5, "f1");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals(special_character_str_5, f1);
+ byte[] f2 = rs.getBytes(3);
+ Assert.assertNull(f2);
+ }
+ // query t2
+ query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setInt(1, 2);
+ pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(3, new Timestamp(0));
+ pstmt.setString(4, "f2");
+
+ ResultSet rs = pstmt.executeQuery();
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ byte[] f1 = rs.getBytes(2);
+ Assert.assertNull(f1);
+ String f2 = new String(rs.getBytes(3));
+ Assert.assertEquals(special_character_str_5, f2);
+ }
+ }
+
+ @Test(expected = SQLException.class)
+ public void testCase11() throws SQLException {
+ final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
+ final long now = System.currentTimeMillis();
+
+ final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setInt(1, 1);
+ pstmt.setTimestamp(2, new Timestamp(now));
+ pstmt.setBytes(3, speicalCharacterStr.getBytes());
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ }
+
+ @Test
+ public void testCase12() throws SQLException {
+ final long now = System.currentTimeMillis();
+ // insert
+ final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ pstmt.setTimestamp(1, new Timestamp(now));
+ pstmt.setString(2, special_character_str_4);
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(1, ret);
+ }
+ // query
+ final String query = "select * from " + tbname1;
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery(query);
+ rs.next();
+ long timestamp = rs.getTimestamp(1).getTime();
+ Assert.assertEquals(now, timestamp);
+ String f1 = new String(rs.getBytes(2));
+ Assert.assertEquals("HelloTDengine", f1);
+ String f2 = rs.getString(3);
+ Assert.assertEquals(special_character_str_4, f2);
+ }
+ }
+
+ @Before
+ public void before() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tbname1 + "");
+ stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
+ stmt.execute("drop table if exists " + tbname2);
+ stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws SQLException {
+ if (conn != null)
+ conn.close();
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
index 40956a601f..e4dd6384f9 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
@@ -6,11 +6,11 @@ import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
-import java.io.Serializable;
import java.sql.*;
public class RestfulPreparedStatementTest {
private static final String host = "127.0.0.1";
+ // private static final String host = "master";
private static Connection conn;
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static PreparedStatement pstmt_insert;
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
new file mode 100644
index 0000000000..c861ef2966
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
@@ -0,0 +1,24 @@
+package com.taosdata.jdbc.utils;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class UtilsTest {
+
+ @Test
+ public void escapeSingleQuota() {
+ String s = "'''''a\\'";
+ String news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+
+ s = "\'''''a\\'";
+ news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+
+ s = "\'\'\'\''a\\'";
+ news = Utils.escapeSingleQuota(s);
+ Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
+ }
+}
\ No newline at end of file
diff --git a/src/connector/python/.gitignore b/src/connector/python/.gitignore
new file mode 100644
index 0000000000..228a0b4530
--- /dev/null
+++ b/src/connector/python/.gitignore
@@ -0,0 +1,154 @@
+
+# Created by https://www.toptal.com/developers/gitignore/api/python
+# Edit at https://www.toptal.com/developers/gitignore?templates=python
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+pytestdebug.log
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+doc/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+#poetry.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+# .env
+.env/
+.venv/
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+pythonenv*
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# operating system-related files
+# file properties cache/storage on macOS
+*.DS_Store
+# thumbnail cache on Windows
+Thumbs.db
+
+# profiling data
+.prof
+
+
+# End of https://www.toptal.com/developers/gitignore/api/python
diff --git a/src/connector/python/linux/python2/LICENSE b/src/connector/python/LICENSE
similarity index 100%
rename from src/connector/python/linux/python2/LICENSE
rename to src/connector/python/LICENSE
diff --git a/src/connector/python/README.md b/src/connector/python/README.md
new file mode 100644
index 0000000000..9151e9b8f0
--- /dev/null
+++ b/src/connector/python/README.md
@@ -0,0 +1,17 @@
+# TDengine Connector for Python
+
+[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
+
+## Install
+
+```sh
+pip install git+https://github.com/taosdata/TDengine-connector-python
+```
+
+## Source Code
+
+[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python).
+
+## License - AGPL
+
+Keep same with [TDengine](https://github.com/taosdata/TDengine).
diff --git a/src/connector/python/examples/demo.py b/src/connector/python/examples/demo.py
new file mode 100644
index 0000000000..6c7c03f3e2
--- /dev/null
+++ b/src/connector/python/examples/demo.py
@@ -0,0 +1,12 @@
+import taos
+
+conn = taos.connect(host='127.0.0.1',
+ user='root',
+ passworkd='taodata',
+ database='log')
+cursor = conn.cursor()
+
+sql = "select * from log.log limit 10"
+cursor.execute(sql)
+for row in cursor:
+ print(row)
diff --git a/src/connector/python/linux/python2 b/src/connector/python/linux/python2
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/linux/python2
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/linux/python2/README.md b/src/connector/python/linux/python2/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/linux/python2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py
deleted file mode 100644
index ff2d90fcb3..0000000000
--- a/src/connector/python/linux/python2/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.8",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 2",
- "Operating System :: Linux",
- ],
-)
diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py
deleted file mode 100644
index 4367947341..0000000000
--- a/src/connector/python/linux/python2/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.CDLL('libtaos.so')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py
deleted file mode 100644
index 4c0456b503..0000000000
--- a/src/connector/python/linux/python2/taos/cursor.py
+++ /dev/null
@@ -1,278 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def next(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def log(self, logfile):
- self._logfile = logfile
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- # global querySeqNum
- # querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
- # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- # print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
- with open(self._logfile, "a") as logfile:
- logfile.write("%s;\n" % operation)
-
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
- return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
- return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
- return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
- return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
- return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
- return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
- return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
- return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
- return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
- return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
- return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
- return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
- return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
- return True
-
- return False
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/linux/python3 b/src/connector/python/linux/python3
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/linux/python3
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/linux/python3/LICENSE b/src/connector/python/linux/python3/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/linux/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/linux/python3/README.md b/src/connector/python/linux/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/linux/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py
deleted file mode 100644
index 296e79b973..0000000000
--- a/src/connector/python/linux/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: Linux",
- ],
-)
diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/linux/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py
deleted file mode 100644
index f6c395342c..0000000000
--- a/src/connector/python/linux/python3/taos/connection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py
deleted file mode 100644
index 93466f5184..0000000000
--- a/src/connector/python/linux/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/linux/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/linux/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/linux/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/osx/python3 b/src/connector/python/osx/python3
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/osx/python3
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/osx/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/osx/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py
deleted file mode 100644
index 9bce1a976f..0000000000
--- a/src/connector/python/osx/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: MacOS X",
- ],
-)
diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/osx/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py
deleted file mode 100644
index dca9bd42e8..0000000000
--- a/src/connector/python/osx/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.CDLL('libtaos.dylib')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py
deleted file mode 100644
index f6c395342c..0000000000
--- a/src/connector/python/osx/python3/taos/connection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py
deleted file mode 100644
index 93466f5184..0000000000
--- a/src/connector/python/osx/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py
deleted file mode 100644
index 32dc0ea3c3..0000000000
--- a/src/connector/python/osx/python3/taos/cursor.py
+++ /dev/null
@@ -1,280 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the rowcount of insertion
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def log(self, logfile):
- self._logfile = logfile
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- # global querySeqNum
- # querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
- # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- # print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
- with open(self._logfile, "a") as logfile:
- logfile.write("%s;\n" % operation)
-
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
- return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
- return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
- return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
- return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
- return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
- return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
- return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
- return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
- return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
- return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
- return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
- return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
- return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
- return True
-
- return False
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/osx/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/osx/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/osx/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py
new file mode 100644
index 0000000000..4d083d7ddb
--- /dev/null
+++ b/src/connector/python/setup.py
@@ -0,0 +1,35 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+setuptools.setup(
+ name="taos",
+ version="2.0.9",
+ author="Taosdata Inc.",
+ author_email="support@taosdata.com",
+ description="TDengine python client package",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/pypa/sampleproject",
+ packages=setuptools.find_packages(),
+ classifiers=[
+
+ "Environment :: Console",
+ "Environment :: MacOS X",
+ "Environment :: Win32 (MS Windows)",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 2.7",
+ "Operating System :: Linux",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: Microsoft :: Windows :: Windows 10",
+ ],
+)
diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/taos/__init__.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/__init__.py
rename to src/connector/python/taos/__init__.py
diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/taos/cinterface.py
similarity index 70%
rename from src/connector/python/linux/python3/taos/cinterface.py
rename to src/connector/python/taos/cinterface.py
index 4367947341..b8824327b0 100644
--- a/src/connector/python/linux/python3/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -3,6 +3,7 @@ from .constants import FieldType
from .error import *
import math
import datetime
+import platform
def _convert_millisecond_to_datetime(milli):
@@ -20,40 +21,28 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if micro:
_timestamp_converter = _convert_microsecond_to_datetime
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bool row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_byte))[
+ :abs(num_of_rows)]]
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C tinyint row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
def _crow_tinyint_unsigned_to_python(
@@ -63,92 +52,56 @@ def _crow_tinyint_unsigned_to_python(
micro=False):
"""Function to convert C tinyint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_ubyte))[
+ :abs(num_of_rows)]]
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_short))[
+ :abs(num_of_rows)]]
def _crow_smallint_unsigned_to_python(
data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_ushort))[
+ :abs(num_of_rows)]]
def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_uint))[
+ :abs(num_of_rows)]]
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bigint row to python row
"""
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
+ return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
@@ -158,52 +111,33 @@ def _crow_bigint_unsigned_to_python(
micro=False):
"""Function to convert C bigint row to python row
"""
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
+ return [
+ None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_uint64))[
+ :abs(num_of_rows)]]
def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C float row to python row
"""
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
+ return [None if math.isnan(ele) else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C double row to python row
"""
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
+ return [None if math.isnan(ele) else ele for ele in ctypes.cast(
+ data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row
"""
assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
+ return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
+ 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
@@ -230,30 +164,17 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""
assert(nbytes is not None)
res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
+ for i in range(abs(num_of_rows)):
+ try:
+ rbyte = ctypes.cast(
+ data + nbytes * i,
+ ctypes.POINTER(
+ ctypes.c_short))[
+ :1].pop()
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode()[0:rbyte])
+ except ValueError:
+ res.append(None)
return res
@@ -262,20 +183,12 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""
assert(nbytes is not None)
res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
+ for i in range(abs(num_of_rows)):
+ try:
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode())
+ except ValueError:
+ res.append(None)
return res
@@ -324,14 +237,38 @@ class TaosField(ctypes.Structure):
# C interface class
+def _load_taos_linux():
+ return ctypes.CDLL('libtaos.so')
+
+
+def _load_taos_darwin():
+ return ctypes.cDLL('libtaos.dylib')
+
+
+def _load_taos_windows():
+ return ctypes.windll.LoadLibrary('taos')
+
+
+def _load_taos():
+ load_func = {
+ 'Linux': _load_taos_linux,
+ 'Darwin': _load_taos_darwin,
+ 'Windows': _load_taos_windows,
+ }
+ try:
+ return load_func[platform.system()]()
+ except:
+ sys.exit('unsupported platform to TDengine connector')
+
+
class CTaosInterface(object):
- libtaos = ctypes.CDLL('libtaos.so')
+ libtaos = _load_taos()
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
libtaos.taos_init.restype = None
libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
+ # libtaos.taos_use_result.restype = ctypes.c_void_p
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
libtaos.taos_errstr.restype = ctypes.c_char_p
libtaos.taos_subscribe.restype = ctypes.c_void_p
@@ -432,7 +369,7 @@ class CTaosInterface(object):
'''Close the TDengine handle
'''
CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
+ # print('connection is closed')
@staticmethod
def query(connection, sql):
diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/taos/connection.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/connection.py
rename to src/connector/python/taos/connection.py
diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/taos/constants.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/constants.py
rename to src/connector/python/taos/constants.py
diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/taos/cursor.py
similarity index 98%
rename from src/connector/python/linux/python3/taos/cursor.py
rename to src/connector/python/taos/cursor.py
index 32dc0ea3c3..d443ec95d0 100644
--- a/src/connector/python/linux/python3/taos/cursor.py
+++ b/src/connector/python/taos/cursor.py
@@ -45,6 +45,12 @@ class TDengineCursor(object):
return self
def __next__(self):
+ return self._taos_next()
+
+ def next(self):
+ return self._taos_next()
+
+ def _taos_next(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetch iterator")
diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/taos/dbapi.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/dbapi.py
rename to src/connector/python/taos/dbapi.py
diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/taos/error.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/error.py
rename to src/connector/python/taos/error.py
diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/taos/subscription.py
similarity index 100%
rename from src/connector/python/linux/python2/taos/subscription.py
rename to src/connector/python/taos/subscription.py
diff --git a/src/connector/python/windows/python2 b/src/connector/python/windows/python2
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/windows/python2
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/windows/python2/LICENSE b/src/connector/python/windows/python2/LICENSE
deleted file mode 100644
index 79a9d73086..0000000000
--- a/src/connector/python/windows/python2/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/windows/python2/README.md b/src/connector/python/windows/python2/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/windows/python2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py
deleted file mode 100644
index 47d374fe67..0000000000
--- a/src/connector/python/windows/python2/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 2",
- "Operating System :: Windows",
- ],
-)
diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py
deleted file mode 100644
index 9732635738..0000000000
--- a/src/connector/python/windows/python2/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py
deleted file mode 100644
index ec72474df9..0000000000
--- a/src/connector/python/windows/python2/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.windll.LoadLibrary('taos')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py
deleted file mode 100644
index 5729d01c6d..0000000000
--- a/src/connector/python/windows/python2/taos/connection.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- if len(kwargs) > 0:
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py
deleted file mode 100644
index 8a8011c3e3..0000000000
--- a/src/connector/python/windows/python2/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Time precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py
deleted file mode 100644
index 5f4666b593..0000000000
--- a/src/connector/python/windows/python2/taos/cursor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
-
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py
deleted file mode 100644
index 594681ada9..0000000000
--- a/src/connector/python/windows/python2/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py
deleted file mode 100644
index c584badce8..0000000000
--- a/src/connector/python/windows/python2/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/windows/python2/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/windows/python3 b/src/connector/python/windows/python3
new file mode 120000
index 0000000000..b870225aa0
--- /dev/null
+++ b/src/connector/python/windows/python3
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/src/connector/python/windows/python3/LICENSE b/src/connector/python/windows/python3/LICENSE
deleted file mode 100644
index 2d032e65d8..0000000000
--- a/src/connector/python/windows/python3/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
- Copyright (c) 2019 TAOS Data, Inc.
-
-This program is free software: you can use, redistribute, and/or modify
-it under the terms of the GNU Affero General Public License, version 3
-or later ("AGPL"), as published by the Free Software Foundation.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
diff --git a/src/connector/python/windows/python3/README.md b/src/connector/python/windows/python3/README.md
deleted file mode 100644
index 70db6bba13..0000000000
--- a/src/connector/python/windows/python3/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# TDengine python client interface
\ No newline at end of file
diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py
deleted file mode 100644
index cdcec62a21..0000000000
--- a/src/connector/python/windows/python3/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="taos",
- version="2.0.7",
- author="Taosdata Inc.",
- author_email="support@taosdata.com",
- description="TDengine python client package",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pypa/sampleproject",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "Operating System :: Windows",
- ],
-)
diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py
deleted file mode 100644
index b57e25fd2c..0000000000
--- a/src/connector/python/windows/python3/taos/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
-
-# Globals
-threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
-
-
-def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
-
- Current supporting keyword parameters:
- @dsn: Data source name as string
- @user: Username as string(optional)
- @password: Password as string(optional)
- @host: Hostname(optional)
- @database: Database name(optional)
-
- @rtype: TDengineConnector
- """
- return TDengineConnection(*args, **kwargs)
diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py
deleted file mode 100644
index ec72474df9..0000000000
--- a/src/connector/python/windows/python3/taos/cinterface.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
-
-
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
-
-
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
-
-
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
-
- if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
- else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
-
-
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_bool))[
- :abs(num_of_rows)]]
-
-
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
-
-
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
-
-
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
-
-
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
-
-
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
-
-
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
- else:
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
-
-
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
- """
- if num_of_rows > 0:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
- else:
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
-
-
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
-
-
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
- """
- if num_of_rows > 0:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
- else:
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
-
-
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- if num_of_rows > 0:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
- else:
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
-
-
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
-
- return res
-
-
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows > 0:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
- """
- assert(nbytes is not None)
- res = []
- if num_of_rows >= 0:
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- else:
- for i in range(abs(num_of_rows)):
- try:
- res.append((ctypes.cast(data + nbytes * i + 2,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
-
-# C interface class
-
-
-class CTaosInterface(object):
-
- libtaos = ctypes.windll.LoadLibrary('taos')
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- #libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
-
- def __init__(self, config=None):
- '''
- Function to initialize the class
- @host : str, hostname to connect
- @user : str, username to connect to server
- @password : str, password to connect to server
- @db : str, default db to use when log in
- @config : str, config directory
-
- @rtype : None
- '''
- if config is None:
- self._config = ctypes.c_char_p(None)
- else:
- try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
- except AttributeError:
- raise AttributeError("config is expected as a str")
-
- if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
-
- CTaosInterface.libtaos.taos_init()
-
- @property
- def config(self):
- """ Get current config
- """
- return self._config
-
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
- Function to connect to server
-
- @rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- #print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
- """
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
-
-
-if __name__ == '__main__':
- cinter = CTaosInterface()
- conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
-
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
-
- fields = CTaosInterface.useResult(result)
-
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
-
- print(data)
-
- cinter.freeResult(result)
- cinter.close(conn)
diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py
deleted file mode 100644
index 5729d01c6d..0000000000
--- a/src/connector/python/windows/python3/taos/connection.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
-
-
-class TDengineConnection(object):
- """ TDengine connection object
- """
-
- def __init__(self, *args, **kwargs):
- self._conn = None
- self._host = None
- self._user = "root"
- self._password = "taosdata"
- self._database = None
- self._port = 0
- self._config = None
- self._chandle = None
-
- if len(kwargs) > 0:
- self.config(**kwargs)
-
- def config(self, **kwargs):
- # host
- if 'host' in kwargs:
- self._host = kwargs['host']
-
- # user
- if 'user' in kwargs:
- self._user = kwargs['user']
-
- # password
- if 'password' in kwargs:
- self._password = kwargs['password']
-
- # database
- if 'database' in kwargs:
- self._database = kwargs['database']
-
- # port
- if 'port' in kwargs:
- self._port = kwargs['port']
-
- # config
- if 'config' in kwargs:
- self._config = kwargs['config']
-
- self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
-
- def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
-
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
- if self._conn is None:
- return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
-
- def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
-
- def commit(self):
- """Commit any pending transaction to the database.
-
- Since TDengine do not support transactions, the implement is void functionality.
- """
- pass
-
- def rollback(self):
- """Void functionality
- """
- pass
-
- def clear_result_set(self):
- """Clear unused result set on this connection.
- """
- pass
-
-
-if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
- conn.close()
- print("Hello world")
diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py
deleted file mode 100644
index 49fc17b2fb..0000000000
--- a/src/connector/python/windows/python3/taos/constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Constants in TDengine python
-"""
-
-from .dbapi import *
-
-
-class FieldType(object):
- """TDengine Field Types
- """
- # type_code
- C_NULL = 0
- C_BOOL = 1
- C_TINYINT = 2
- C_SMALLINT = 3
- C_INT = 4
- C_BIGINT = 5
- C_FLOAT = 6
- C_DOUBLE = 7
- C_BINARY = 8
- C_TIMESTAMP = 9
- C_NCHAR = 10
- C_TINYINT_UNSIGNED = 11
- C_SMALLINT_UNSIGNED = 12
- C_INT_UNSIGNED = 13
- C_BIGINT_UNSIGNED = 14
- # NULL value definition
- # NOTE: These values should change according to C definition in tsdb.h
- C_BOOL_NULL = 0x02
- C_TINYINT_NULL = -128
- C_TINYINT_UNSIGNED_NULL = 255
- C_SMALLINT_NULL = -32768
- C_SMALLINT_UNSIGNED_NULL = 65535
- C_INT_NULL = -2147483648
- C_INT_UNSIGNED_NULL = 4294967295
- C_BIGINT_NULL = -9223372036854775808
- C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
- # Timestamp precision definition
- C_TIMESTAMP_MILLI = 0
- C_TIMESTAMP_MICRO = 1
diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py
deleted file mode 100644
index 136cd42fe4..0000000000
--- a/src/connector/python/windows/python3/taos/cursor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-from .constants import FieldType
-
-# querySeqNum = 0
-
-
-class TDengineCursor(object):
- """Database cursor which is used to manage the context of a fetch operation.
-
- Attributes:
- .description: Read-only attribute consists of 7-item sequences:
-
- > name (mondatory)
- > type_code (mondatory)
- > display_size
- > internal_size
- > precision
- > scale
- > null_ok
-
- This attribute will be None for operations that do not return rows or
- if the cursor has not had an operation invoked via the .execute*() method yet.
-
- .rowcount:This read-only attribute specifies the number of rows that the last
- .execute*() produced (for DQL statements like SELECT) or affected
- """
-
- def __init__(self, connection=None):
- self._description = []
- self._rowcount = -1
- self._connection = None
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
- self._logfile = ""
-
- if connection is not None:
- self._connection = connection
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetch iterator")
-
- if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
- if self._block_rows == 0:
- raise StopIteration
- self._block = list(map(tuple, zip(*block)))
- self._block_iter = 0
-
- data = self._block[self._block_iter]
- self._block_iter += 1
-
- return data
-
- @property
- def description(self):
- """Return the description of the object.
- """
- return self._description
-
- @property
- def rowcount(self):
- """Return the rowcount of the object
- """
- return self._rowcount
-
- @property
- def affected_rows(self):
- """Return the affected_rows of the object
- """
- return self._affected_rows
-
- def callproc(self, procname, *args):
- """Call a stored database procedure with the given name.
-
- Void functionality since no stored procedures.
- """
- pass
-
- def close(self):
- """Close the cursor.
- """
- if self._connection is None:
- return False
-
- self._reset_result()
- self._connection = None
-
- return True
-
- def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
- if not operation:
- return None
-
- if not self._connection:
- # TODO : change the exception raised here
- raise ProgrammingError("Cursor is not connected")
-
- self._reset_result()
-
- stmt = operation
- if params is not None:
- pass
-
- self._result = CTaosInterface.query(self._connection._conn, stmt)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(self._result)
- return self._handle_result()
- else:
- raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
-
- def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
- pass
-
- def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
- pass
-
- def fetchmany(self):
- pass
-
- def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
- return list(map(tuple, zip(*buffer)))
-
- def fetchall(self):
- if self._result is None or self._fields is None:
- raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
- self._rowcount = 0
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
- if num_of_fields == 0:
- break
- self._rowcount += num_of_fields
- for i in range(len(self._fields)):
- buffer[i].extend(block[i])
-
- return list(map(tuple, zip(*buffer)))
-
- def nextset(self):
- """
- """
- pass
-
- def setinputsize(self, sizes):
- pass
-
- def setutputsize(self, size, column=None):
- pass
-
- def _reset_result(self):
- """Reset the result to unused version.
- """
- self._description = []
- self._rowcount = -1
- if self._result is not None:
- CTaosInterface.freeResult(self._result)
- self._result = None
- self._fields = None
- self._block = None
- self._block_rows = -1
- self._block_iter = 0
- self._affected_rows = 0
-
- def _handle_result(self):
- """Handle the return result from query.
- """
- self._description = []
- for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
-
- return self._result
diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py
deleted file mode 100644
index a29621f7a3..0000000000
--- a/src/connector/python/windows/python3/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py
deleted file mode 100644
index 238b293a0b..0000000000
--- a/src/connector/python/windows/python3/taos/error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""Python exceptions
-"""
-
-
-class Error(Exception):
- def __init__(self, msg=None, errno=None):
- self.msg = msg
- self._full_msg = self.msg
- self.errno = errno
-
- def __str__(self):
- return self._full_msg
-
-
-class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
- pass
-
-
-class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
- pass
-
-
-class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
- pass
-
-
-class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
- pass
-
-
-class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
- pass
-
-
-class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
- pass
-
-
-class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
- pass
-
-
-class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
- pass
-
-
-class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
- pass
diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py
deleted file mode 100644
index 270d9de092..0000000000
--- a/src/connector/python/windows/python3/taos/subscription.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .cinterface import CTaosInterface
-from .error import *
-
-
-class TDengineSubscription(object):
- """TDengine subscription object
- """
-
- def __init__(self, sub):
- self._sub = sub
-
- def consume(self):
- """Consume rows of a subscription
- """
- if self._sub is None:
- raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
-
- def close(self, keepProgress=True):
- """Close the Subscription.
- """
- if self._sub is None:
- return False
-
- CTaosInterface.unsubscribe(self._sub, keepProgress)
- return True
-
-
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(True, "test", "select * from meters;", 1000)
-
- for i in range(0, 10):
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c
index c573d709f5..586adacc98 100644
--- a/src/dnode/src/dnodeCfg.c
+++ b/src/dnode/src/dnodeCfg.c
@@ -158,7 +158,7 @@ static int32_t dnodeWriteCfg() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c
index 9554651776..9b15353647 100644
--- a/src/dnode/src/dnodeEps.c
+++ b/src/dnode/src/dnodeEps.c
@@ -277,7 +277,7 @@ static int32_t dnodeWriteEps() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeMInfos.c b/src/dnode/src/dnodeMInfos.c
index 0dca116d84..611c30b843 100644
--- a/src/dnode/src/dnodeMInfos.c
+++ b/src/dnode/src/dnodeMInfos.c
@@ -286,7 +286,7 @@ static int32_t dnodeWriteMInfos() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/inc/taos.h b/src/inc/taos.h
index cd8e116053..6dd695b320 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -82,6 +82,7 @@ typedef struct TAOS_BIND {
uintptr_t buffer_length; // unused
uintptr_t *length;
int * is_null;
+
int is_unsigned; // unused
int * error; // unused
union {
@@ -99,12 +100,25 @@ typedef struct TAOS_BIND {
unsigned int allocated;
} TAOS_BIND;
+typedef struct TAOS_MULTI_BIND {
+ int buffer_type;
+ void *buffer;
+ uintptr_t buffer_length;
+ int32_t *length;
+ char *is_null;
+ int num;
+} TAOS_MULTI_BIND;
+
+
TAOS_STMT *taos_stmt_init(TAOS *taos);
int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name);
int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind);
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind);
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx);
int taos_stmt_add_batch(TAOS_STMT *stmt);
int taos_stmt_execute(TAOS_STMT *stmt);
TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index e9170860a6..e596ee67ec 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -22,7 +22,6 @@ extern "C" {
#include
#include
-#include "osDef.h"
#include "taos.h"
#define TSDB__packed
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index eff4eecbc1..ce6f7c4f22 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -218,6 +218,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
+#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index e9f95660f7..ef3f8ed1fb 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -79,12 +79,12 @@
#define TK_DOT 60
#define TK_CREATE 61
#define TK_TABLE 62
-#define TK_DATABASE 63
-#define TK_TABLES 64
-#define TK_STABLES 65
-#define TK_VGROUPS 66
-#define TK_DROP 67
-#define TK_STABLE 68
+#define TK_STABLE 63
+#define TK_DATABASE 64
+#define TK_TABLES 65
+#define TK_STABLES 66
+#define TK_VGROUPS 67
+#define TK_DROP 68
#define TK_TOPIC 69
#define TK_DNODE 70
#define TK_USER 71
diff --git a/src/inc/ttype.h b/src/inc/ttype.h
index 662a23bfdb..9949f31c59 100644
--- a/src/inc/ttype.h
+++ b/src/inc/ttype.h
@@ -5,6 +5,8 @@
extern "C" {
#endif
+#include
+#include
#include "taosdef.h"
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c
index b88244ea01..8f7f475536 100644
--- a/src/kit/shell/src/shellCheck.c
+++ b/src/kit/shell/src/shellCheck.c
@@ -142,7 +142,7 @@ static void *shellCheckThreadFp(void *arg) {
taos_free_result(pSql);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
return NULL;
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 42992b782f..0e468347ee 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -68,20 +68,17 @@ enum TEST_MODE {
INVAID_TEST
};
-enum QUERY_MODE {
- SYNC_QUERY_MODE, // 0
- ASYNC_QUERY_MODE, // 1
- INVALID_MODE
-};
+#define MAX_RECORDS_PER_REQ 32766
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
+#define COND_BUF_LEN BUFFER_SIZE - 30
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE (16*1024)
+#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
@@ -118,8 +115,8 @@ typedef enum TALBE_EXISTS_EN {
} TALBE_EXISTS_EN;
enum MODE {
- SYNC,
- ASYNC,
+ SYNC_MODE,
+ ASYNC_MODE,
MODE_BUT
};
@@ -188,7 +185,7 @@ typedef struct {
/* Used by main to communicate with parse_opt. */
typedef struct SArguments_S {
char * metaFile;
- int test_mode;
+ uint32_t test_mode;
char * host;
uint16_t port;
char * user;
@@ -205,31 +202,31 @@ typedef struct SArguments_S {
bool verbose_print;
bool performance_print;
char * output_file;
- int query_mode;
+ bool async_mode;
char * datatype[MAX_NUM_DATATYPE + 1];
- int len_of_binary;
- int num_of_CPR;
- int num_of_threads;
- int64_t insert_interval;
+ uint32_t len_of_binary;
+ uint32_t num_of_CPR;
+ uint32_t num_of_threads;
+ uint64_t insert_interval;
int64_t query_times;
- int64_t interlace_rows;
- int64_t num_of_RPR; // num_of_records_per_req
- int64_t max_sql_len;
- int64_t num_of_tables;
- int64_t num_of_DPT;
+ uint64_t interlace_rows;
+ uint64_t num_of_RPR; // num_of_records_per_req
+ uint64_t max_sql_len;
+ uint64_t num_of_tables;
+ uint64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int method_of_delete;
+ uint32_t method_of_delete;
char ** arg_list;
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
- int dataLen;
+ char field[TSDB_COL_NAME_LEN + 1];
+ char dataType[MAX_TB_NAME_SIZE];
+ uint32_t dataLen;
char note[128];
} StrColumn;
@@ -237,50 +234,50 @@ typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1];
int64_t childTblCount;
bool childTblExists; // 0: no, 1: yes
- int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
- int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
+ uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
+ uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
char childTblPrefix[MAX_TB_NAME_SIZE];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
- char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful
+ char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
int64_t childTblLimit;
- int64_t childTblOffset;
+ uint64_t childTblOffset;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
- int64_t interlaceRows; //
+ uint64_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int64_t maxSqlLen; //
+ uint64_t maxSqlLen; //
- int64_t insertInterval; // insert interval, will override global insert interval
- int64_t insertRows;
+ uint64_t insertInterval; // insert interval, will override global insert interval
+ uint64_t insertRows;
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN+1];
char tagsFile[MAX_FILE_NAME_LEN+1];
- int columnCount;
+ uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
- int tagCount;
+ uint32_t tagCount;
StrColumn tags[MAX_TAG_COUNT];
char* childTblName;
char* colsOfCreateChildTable;
- int64_t lenOfOneRow;
- int64_t lenOfTagOfOneRow;
+ uint64_t lenOfOneRow;
+ uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
//int sampleRowCount;
//int sampleUsePos;
- int tagSource; // 0: rand, 1: tag sample
+ uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
- int tagSampleCount;
- int tagUsePos;
+ uint32_t tagSampleCount;
+ uint32_t tagUsePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SSuperTable;
typedef struct {
@@ -307,8 +304,8 @@ typedef struct {
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
- int minRows;
- int maxRows;
+ uint32_t minRows; // 0 means default
+ uint32_t maxRows; // 0 means default
int comp;
int walLevel;
int cacheLast;
@@ -327,13 +324,15 @@ typedef struct SDataBase_S {
char dbName[MAX_DB_NAME_SIZE];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
- int64_t superTblCount;
+ uint64_t superTblCount;
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
} SDataBase;
typedef struct SDbs_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
+ struct sockaddr_in serv_addr;
+
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
@@ -341,106 +340,107 @@ typedef struct SDbs_S {
bool use_metric;
bool insert_only;
bool do_aggreFunc;
- bool queryMode;
+ bool asyncMode;
- int threadCount;
- int threadCountByCreateTbl;
- int dbCount;
+ uint32_t threadCount;
+ uint32_t threadCountByCreateTbl;
+ uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SDbs;
typedef struct SpecifiedQueryInfo_S {
- int64_t queryInterval; // 0: unlimit > 0 loop/s
- int64_t concurrent;
- int64_t sqlCount;
- int mode; // 0: sync, 1: async
- int64_t subscribeInterval; // ms
- int64_t queryTimes;
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint64_t concurrent;
+ uint64_t sqlCount;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
+ uint64_t queryTimes;
int subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
- int64_t totalQueried;
+ uint64_t totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
- int64_t queryInterval; // 0: unlimit > 0 loop/s
- int threadCnt;
- int mode; // 0: sync, 1: async
- int64_t subscribeInterval; // ms
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint32_t threadCnt;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
int subscribeRestart;
int subscribeKeepProgress;
- int64_t queryTimes;
- int64_t childTblCount;
+ uint64_t queryTimes;
+ uint64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
- int64_t sqlCount;
+ uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
- int64_t totalQueried;
+ uint64_t totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
+ struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[MAX_DB_NAME_SIZE+1];
- char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful
+ char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
- int64_t totalQueried;
+ uint64_t totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
- TAOS *taos;
- int threadID;
- char db_name[MAX_DB_NAME_SIZE+1];
- uint32_t time_precision;
- char fp[4096];
- char tb_prefix[MAX_TB_NAME_SIZE];
- int64_t start_table_from;
- int64_t end_table_to;
- int64_t ntables;
- int64_t data_of_rate;
- int64_t start_time;
- char* cols;
- bool use_metric;
+ TAOS * taos;
+ int threadID;
+ char db_name[MAX_DB_NAME_SIZE+1];
+ uint32_t time_precision;
+ char fp[4096];
+ char tb_prefix[MAX_TB_NAME_SIZE];
+ uint64_t start_table_from;
+ uint64_t end_table_to;
+ uint64_t ntables;
+ uint64_t data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
SSuperTable* superTblInfo;
// for async insert
- tsem_t lock_sem;
- int64_t counter;
+ tsem_t lock_sem;
+ int64_t counter;
uint64_t st;
uint64_t et;
- int64_t lastTs;
+ uint64_t lastTs;
// sample data
- int64_t samplePos;
+ int64_t samplePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
// insert delay statistics
- int64_t cntDelay;
- int64_t totalDelay;
- int64_t avgDelay;
- int64_t maxDelay;
- int64_t minDelay;
+ uint64_t cntDelay;
+ uint64_t totalDelay;
+ uint64_t avgDelay;
+ uint64_t maxDelay;
+ uint64_t minDelay;
// query
- int64_t querySeq; // sequence number of sql command
+ uint64_t querySeq; // sequence number of sql command
} threadInfo;
#ifdef WINDOWS
@@ -519,6 +519,8 @@ static int taosRandom()
static int createDatabasesAndStables();
static void createChildTables();
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
+static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
+ char* sqlstr, char *resultFile);
/* ************ Global variables ************ */
@@ -566,7 +568,7 @@ SArguments g_args = {
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
- 1024000, // max_sql_len
+ (1024*1024), // max_sql_len
10000, // num_of_tables
10000, // num_of_DPT
0, // abort
@@ -663,11 +665,11 @@ static void printHelp() {
printf("%s%s%s%s\n", indent, "-q", indent,
"Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
printf("%s%s%s%s\n", indent, "-b", indent,
- "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.");
+ "The data_type of columns, default: INT,INT,INT,INT.");
printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
printf("%s%s%s%s\n", indent, "-l", indent,
- "The number of columns per record. Default is 10.");
+ "The number of columns per record. Default is 4.");
printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
@@ -722,7 +724,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "\n\t-c need a valid path following!\n");
exit(EXIT_FAILURE);
}
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
} else if (strcmp(argv[i], "-h") == 0) {
if (argc == i+1) {
@@ -768,48 +770,49 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->sqlFile = argv[++i];
} else if (strcmp(argv[i], "-q") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n");
exit(EXIT_FAILURE);
}
- arguments->query_mode = atoi(argv[++i]);
+ arguments->async_mode = atoi(argv[++i]);
} else if (strcmp(argv[i], "-T") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-T need a number following!\n");
exit(EXIT_FAILURE);
}
arguments->num_of_threads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-i") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-i need a number following!\n");
exit(EXIT_FAILURE);
}
arguments->insert_interval = atoi(argv[++i]);
} else if (strcmp(argv[i], "-qt") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))
+ || (atoi(argv[i+1]) <= 0)) {
printHelp();
- errorPrint("%s", "\n\t-qt need a number following!\n");
+ errorPrint("%s", "\n\t-qt need a valid (>0) number following!\n");
exit(EXIT_FAILURE);
}
arguments->query_times = atoi(argv[++i]);
} else if (strcmp(argv[i], "-B") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-B need a number following!\n");
exit(EXIT_FAILURE);
}
arguments->interlace_rows = atoi(argv[++i]);
} else if (strcmp(argv[i], "-r") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-r need a number following!\n");
exit(EXIT_FAILURE);
@@ -964,9 +967,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
- if (arguments->method_of_delete < 0
- || arguments->method_of_delete > 3) {
- arguments->method_of_delete = 0;
+ if (arguments->method_of_delete > 3) {
+ errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ exit(EXIT_FAILURE);
}
} else if ((strcmp(argv[i], "--version") == 0) ||
(strcmp(argv[i], "-V") == 0)){
@@ -1001,17 +1004,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
break;
printf("\n");
}
- printf("# Insertion interval: %"PRId64"\n",
+ printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
- printf("# Number of records per req: %"PRId64"\n",
+ printf("# Number of records per req: %"PRIu64"\n",
arguments->num_of_RPR);
- printf("# Max SQL length: %"PRId64"\n",
+ printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
printf("# Length of Binary: %d\n", arguments->len_of_binary);
printf("# Number of Threads: %d\n", arguments->num_of_threads);
- printf("# Number of Tables: %"PRId64"\n",
+ printf("# Number of Tables: %"PRIu64"\n",
arguments->num_of_tables);
- printf("# Number of Data per Table: %"PRId64"\n",
+ printf("# Number of Data per Table: %"PRIu64"\n",
arguments->num_of_DPT);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
@@ -1069,7 +1072,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
if (code != 0) {
if (!quiet) {
debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
- errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res));
+ errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res));
}
taos_free_result(res);
//taos_close(taos);
@@ -1086,27 +1089,33 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return 0;
}
-static void getResult(TAOS_RES *res, char* resultFileName) {
+static void appendResultBufToFile(char *resultBuf, char *resultFile)
+{
+ FILE *fp = NULL;
+ if (resultFile[0] != 0) {
+ fp = fopen(resultFile, "at");
+ if (fp == NULL) {
+ errorPrint(
+ "%s() LN%d, failed to open result file: %s, result will not save to file\n",
+ __func__, __LINE__, resultFile);
+ return;
+ }
+ }
+
+ fprintf(fp, "%s", resultBuf);
+ tmfclose(fp);
+}
+
+static void appendResultToFile(TAOS_RES *res, char* resultFile) {
TAOS_ROW row = NULL;
int num_rows = 0;
int num_fields = taos_field_count(res);
TAOS_FIELD *fields = taos_fetch_fields(res);
- FILE *fp = NULL;
- if (resultFileName[0] != 0) {
- fp = fopen(resultFileName, "at");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n",
- __func__, __LINE__, resultFileName);
- }
- }
-
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
- if (fp)
- fclose(fp);
return ;
}
@@ -1116,7 +1125,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
- if (fp) fprintf(fp, "%s", databuf);
+ appendResultBufToFile(databuf, resultFile);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
@@ -1128,22 +1137,36 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
totalLen += len;
}
- if (fp) fprintf(fp, "%s", databuf);
- tmfclose(fp);
+ appendResultBufToFile(databuf, resultFile);
free(databuf);
}
-static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) {
- TAOS_RES *res = taos_query(taos, command);
- if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return;
- }
+static void selectAndGetResult(threadInfo *pThreadInfo, char *command, char* resultFileName) {
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return;
+ }
- getResult(res, resultFileName);
- taos_free_result(res);
+ appendResultToFile(res, resultFileName);
+ taos_free_result(res);
+
+ } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ int retCode = postProceSql(
+ g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ resultFileName);
+ if (0 != retCode) {
+ printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ }
+
+ } else {
+ errorPrint("%s() LN%d, unknown query mode: %s\n",
+ __func__, __LINE__, g_queryInfo.queryMode);
+ }
}
static int32_t rand_bool(){
@@ -1188,13 +1211,31 @@ static float rand_float(){
return randfloat[cursor];
}
+#if 0
+static const char charNum[] = "0123456789";
+
+static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose
+static void nonrand_string(char *str, int size)
+{
+ str[0] = 0;
+ if (size > 0) {
+ int n;
+ for (n = 0; n < size; n++) {
+ str[n] = charNum[n % 10];
+ }
+ str[n] = 0;
+ }
+}
+#endif
+
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
+
static void rand_string(char *str, int size) {
str[0] = 0;
if (size > 0) {
//--size;
int n;
- for (n = 0; n < size - 1; n++) {
+ for (n = 0; n < size; n++) {
int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
str[n] = charset[key];
}
@@ -1249,11 +1290,11 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
- printf("top insert interval: \033[33m%"PRId64"\033[0m\n",
+ printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
- printf("number of records per req: \033[33m%"PRId64"\033[0m\n",
+ printf("number of records per req: \033[33m%"PRIu64"\033[0m\n",
g_args.num_of_RPR);
- printf("max sql length: \033[33m%"PRId64"\033[0m\n",
+ printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
@@ -1315,10 +1356,10 @@ static int printfInsertMeta() {
}
}
- printf(" super table count: \033[33m%"PRId64"\033[0m\n",
+ printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTblCount);
- for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].sTblName);
@@ -1339,7 +1380,7 @@ static int printfInsertMeta() {
printf(" childTblExists: \033[33m%s\033[0m\n", "error");
}
- printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
+ printf(" childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblCount);
printf(" childTblPrefix: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
@@ -1351,11 +1392,11 @@ static int printfInsertMeta() {
printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblLimit);
}
- if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) {
- printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n",
+ if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
+ printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblOffset);
}
- printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
+ printf(" insertRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertRows);
/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
@@ -1364,11 +1405,11 @@ static int printfInsertMeta() {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
*/
- printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n",
+ printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n",
+ printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
@@ -1376,7 +1417,7 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].disorderRange);
printf(" disorderRatio: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].disorderRatio);
- printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n",
+ printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].timeStampStep);
@@ -1442,8 +1483,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR);
- fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len);
+ fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR);
+ fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
for (int i = 0; i < g_Dbs.dbCount; i++) {
@@ -1500,7 +1541,7 @@ static void printfInsertMetaToFile(FILE* fp) {
}
}
- fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount);
+ fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
fprintf(fp, " super table[%d]:\n", j);
@@ -1522,7 +1563,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " childTblExists: %s\n", "error");
}
- fprintf(fp, " childTblCount: %"PRId64"\n",
+ fprintf(fp, " childTblCount: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].childTblCount);
fprintf(fp, " childTblPrefix: %s\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
@@ -1530,12 +1571,12 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs.db[i].superTbls[j].dataSource);
fprintf(fp, " insertMode: %s\n",
g_Dbs.db[i].superTbls[j].insertMode);
- fprintf(fp, " insertRows: %"PRId64"\n",
+ fprintf(fp, " insertRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
- fprintf(fp, " interlace rows: %"PRId64"\n",
+ fprintf(fp, " interlace rows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- fprintf(fp, " stable insert interval: %"PRId64"\n",
+ fprintf(fp, " stable insert interval: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
/*
@@ -1545,11 +1586,11 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
*/
- fprintf(fp, " interlaceRows: %"PRId64"\n",
+ fprintf(fp, " interlaceRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
- fprintf(fp, " maxSqlLen: %"PRId64"\n",
+ fprintf(fp, " maxSqlLen: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
fprintf(fp, " timeStampStep: %"PRId64"\n",
@@ -1609,64 +1650,68 @@ static void printfQueryMeta() {
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
printf("\n");
- printf("specified table query info: \n");
- printf("query interval: \033[33m%"PRId64" ms\033[0m\n",
- g_queryInfo.specifiedQueryInfo.queryInterval);
- printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times);
- printf("concurrent: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.specifiedQueryInfo.concurrent);
- printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
+
+ if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
+ printf("specified table query info: \n");
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.sqlCount);
- printf("specified tbl query times:\n");
- printf(" \033[33m%"PRId64"\033[0m\n",
+ if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
+ printf("specified tbl query times:\n");
+ printf(" \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryTimes);
-
- if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.mode);
- printf("interval: \033[33m%"PRId64"\033[0m\n",
+ printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryInterval);
+ printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
+ printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
+ printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
+ printf("keepProgress: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
- }
- for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n",
- i, g_queryInfo.specifiedQueryInfo.sql[i]);
- }
- printf("\n");
- printf("super table query info:\n");
- printf("query interval: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.queryInterval);
- printf("threadCnt: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.childTblCount);
- printf("stable name: \033[33m%s\033[0m\n",
- g_queryInfo.superQueryInfo.sTblName);
- printf("stb query times:\033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.queryTimes);
+ for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.specifiedQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
- if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.mode);
- printf("interval: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeKeepProgress);
- }
-
- printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
+ printf("super table query info:\n");
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.sqlCount);
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n",
- i, g_queryInfo.superQueryInfo.sql[i]);
+
+ if (g_queryInfo.superQueryInfo.sqlCount > 0) {
+ printf("query interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryInterval);
+ printf("threadCnt: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n",
+ g_queryInfo.superQueryInfo.sTblName);
+ printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryTimes);
+
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.superQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
}
- printf("\n");
SHOW_PARSE_RESULT_END();
}
@@ -1914,13 +1959,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables
res = taos_query(taos, "show variables;");
- //getResult(res, filename);
+ //appendResultToFile(res, filename);
xDumpResultToFile(filename, res);
// show dnodes
res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res);
- //getResult(res, filename);
+ //appendResultToFile(res, filename);
// show databases
res = taos_query(taos, "show databases;");
@@ -1955,14 +2000,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
free(dbInfos);
}
-static int postProceSql(char* host, uint16_t port, char* sqlstr)
+static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
+ char* sqlstr, char *resultFile)
{
char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
char *url = "/rest/sql";
- struct hostent *server;
- struct sockaddr_in serv_addr;
int bytes, sent, received, req_str_len, resp_len;
char *request_buf;
char response_buf[RESP_BUF_LEN];
@@ -2011,27 +2055,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
ERROR_EXIT("ERROR opening socket");
}
- server = gethostbyname(host);
- if (server == NULL) {
- free(request_buf);
- ERROR_EXIT("ERROR, no such host");
- }
-
- debugPrint("h_name: %s\nh_addretype: %s\nh_length: %d\n",
- server->h_name,
- (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
- server->h_length);
-
- memset(&serv_addr, 0, sizeof(serv_addr));
- serv_addr.sin_family = AF_INET;
- serv_addr.sin_port = htons(rest_port);
-#ifdef WINDOWS
- serv_addr.sin_addr.s_addr = inet_addr(host);
-#else
- memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length);
-#endif
-
- int retConn = connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr));
+ int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr));
debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
if (retConn < 0) {
free(request_buf);
@@ -2113,6 +2137,10 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
response_buf[RESP_BUF_LEN - 1] = '\0';
printf("Response:\n%s\n", response_buf);
+ if (resultFile) {
+ appendResultBufToFile(response_buf, resultFile);
+ }
+
free(request_buf);
#ifdef WINDOWS
closesocket(sockfd);
@@ -2291,7 +2319,7 @@ static int calcRowLen(SSuperTable* superTbls) {
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
- int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) {
+ uint64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char limitBuf[100] = "\0";
@@ -2302,7 +2330,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* childTblName = *childTblNameOfSuperTbl;
if (offset >= 0) {
- snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"",
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
limit, offset);
}
@@ -2368,11 +2396,11 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
char* sTblName, char** childTblNameOfSuperTbl,
- int64_t* childTblCountOfSuperTbl) {
+ uint64_t* childTblCountOfSuperTbl) {
return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
- -1, -1);
+ -1, 0);
}
static int getSuperTableFromServer(TAOS * taos, char* dbName,
@@ -2708,7 +2736,7 @@ static int createDatabasesAndStables() {
printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
}
- debugPrint("%s() LN%d supertbl count:%"PRId64"\n",
+ debugPrint("%s() LN%d supertbl count:%"PRIu64"\n",
__func__, __LINE__, g_Dbs.db[i].superTblCount);
int validStbCount = 0;
@@ -2767,15 +2795,15 @@ static void* createTable(void *sarg)
int len = 0;
int batchNum = 0;
- verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n",
+ verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
__func__, __LINE__,
pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- for (int64_t i = pThreadInfo->start_table_from;
+ for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(buffer, buff_len,
- "create table if not exists %s.%s%"PRId64" %s;",
+ "create table if not exists %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
@@ -2806,7 +2834,7 @@ static void* createTable(void *sarg)
}
len += snprintf(buffer + len,
buff_len - len,
- "if not exists %s.%s%"PRId64" using %s.%s tags %s ",
+ "if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
pThreadInfo->db_name, superTblInfo->childTblPrefix,
i, pThreadInfo->db_name,
superTblInfo->sTblName, tagsValBuf);
@@ -2830,7 +2858,7 @@ static void* createTable(void *sarg)
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n",
+ printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
}
@@ -2848,7 +2876,7 @@ static void* createTable(void *sarg)
}
static int startMultiThreadCreateChildTable(
- char* cols, int threads, int64_t startFrom, int64_t ntables,
+ char* cols, int threads, uint64_t startFrom, uint64_t ntables,
char* db_name, SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
@@ -2863,13 +2891,13 @@ static int startMultiThreadCreateChildTable(
threads = 1;
}
- int64_t a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int64_t b = 0;
+ uint64_t b = 0;
b = ntables % threads;
for (int64_t i = 0; i < threads; i++) {
@@ -2898,7 +2926,7 @@ static int startMultiThreadCreateChildTable(
startFrom = t_info->end_table_to + 1;
t_info->use_metric = true;
t_info->cols = cols;
- t_info->minDelay = INT64_MAX;
+ t_info->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info);
}
@@ -2964,7 +2992,7 @@ static void createChildTables() {
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
- verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
+ verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRIu64" schema: %s\n",
__func__, __LINE__,
g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
startMultiThreadCreateChildTable(
@@ -3092,7 +3120,7 @@ static int readSampleFromCsvFileToMem(
}
if (readLen > superTblInfo->lenOfOneRow) {
- printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n",
+ printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
(int32_t)readLen, superTblInfo->lenOfOneRow);
continue;
}
@@ -3335,7 +3363,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (threads2 && threads2->type == cJSON_Number) {
g_Dbs.threadCountByCreateTbl = threads2->valueint;
} else if (!threads2) {
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCountByCreateTbl = 1;
} else {
errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
__func__, __LINE__);
@@ -3344,6 +3372,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
+ if (gInsertInterval->valueint <0) {
+ errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
@@ -3355,13 +3388,19 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
- printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
+ printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
@@ -3377,9 +3416,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len");
if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
+ if (maxSqlLen->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.max_sql_len = maxSqlLen->valueint;
} else if (!maxSqlLen) {
- g_args.max_sql_len = 1024000;
+ g_args.max_sql_len = (1024*1024);
} else {
errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
__func__, __LINE__);
@@ -3388,9 +3432,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req");
if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
+ if (numRecPerReq->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) {
+ numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
+ }
g_args.num_of_RPR = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = INT64_MAX;
+ g_args.num_of_RPR = MAX_RECORDS_PER_REQ;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
__func__, __LINE__);
@@ -3550,7 +3601,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (minRows && minRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
} else if (!minRows) {
- g_Dbs.db[i].dbCfg.minRows = -1;
+ g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, minRows not found\n");
goto PARSE_OVER;
@@ -3560,7 +3611,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (maxRows && maxRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
} else if (!maxRows) {
- g_Dbs.db[i].dbCfg.maxRows = -1;
+ g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, maxRows not found\n");
goto PARSE_OVER;
@@ -3705,7 +3756,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count not found\n",
+ errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
@@ -3724,7 +3775,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful
+ cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest
if (insertMode && insertMode->type == cJSON_String
&& insertMode->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].insertMode,
@@ -3859,12 +3910,17 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
*/
cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
- printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
+ printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
@@ -3907,6 +3963,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
+ if (insertRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
@@ -3919,8 +3980,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval");
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
+ if (insertInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
} else if (!insertInterval) {
- verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n",
+ verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
@@ -4001,6 +4067,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
+ if (gQueryTimes->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ __func__, __LINE__, gQueryTimes->valueint);
+ goto PARSE_OVER;
+ }
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
@@ -4028,10 +4099,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- // super_table_query
+ // specified_table_query
cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query");
if (!specifiedQuery) {
- g_queryInfo.specifiedQueryInfo.concurrent = 0;
+ g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, super_table_query not found\n");
@@ -4047,6 +4118,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
"query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
+ if (specifiedQueryTimes->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ __func__, __LINE__, specifiedQueryTimes->valueint);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
} else if (!specifiedQueryTimes) {
g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
@@ -4058,31 +4135,32 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
- g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
- if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
+ if (concurrent->valueint <= 0) {
+ errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
}
+ g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
} else if (!concurrent) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
}
- cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode");
- if (mode && mode->type == cJSON_String
- && mode->valuestring != NULL) {
- if (0 == strcmp("sync", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
- } else if (0 == strcmp("async", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE;
+ cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode");
+ if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String
+ && specifiedAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, query mode input error\n",
+ errorPrint("%s() LN%d, failed to read json, async mode input error\n",
__func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
}
cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval");
@@ -4165,10 +4243,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
}
- // sub_table_query
+ // super_table_query
cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
if (!superQuery) {
- g_queryInfo.superQueryInfo.threadCnt = 0;
+ g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, sub_table_query not found\n");
@@ -4184,6 +4262,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
+ if (superQueryTimes->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ __func__, __LINE__, superQueryTimes->valueint);
+ goto PARSE_OVER;
+ }
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
@@ -4195,6 +4278,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
+ if (threads->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
} else if (!threads) {
g_queryInfo.superQueryInfo.threadCnt = 1;
@@ -4218,26 +4307,31 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- cJSON* submode = cJSON_GetObjectItem(superQuery, "mode");
- if (submode && submode->type == cJSON_String
- && submode->valuestring != NULL) {
- if (0 == strcmp("sync", submode->valuestring)) {
- g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
- } else if (0 == strcmp("async", submode->valuestring)) {
- g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE;
+ cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode");
+ if (superAsyncMode && superAsyncMode->type == cJSON_String
+ && superAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, query mode input error\n",
+ errorPrint("%s() LN%d, failed to read json, async mode input error\n",
__func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
}
- cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval");
- if (subinterval && subinterval->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint;
- } else if (!subinterval) {
+ cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
+ if (superInterval && superInterval->type == cJSON_Number) {
+ if (superInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
+ } else if (!superInterval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.subscribeInterval = 10000;
@@ -4457,11 +4551,11 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
char *pstr = recBuf;
int64_t maxLen = MAX_DATA_SIZE;
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp);
for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6))
- || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) {
+ if ((0 == strncasecmp(stbInfo->columns[i].dataType, "BINARY", strlen("BINARY")))
+ || (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", strlen("NCHAR")))) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
errorPrint( "binary or nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
@@ -4474,47 +4568,47 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
tmfree(buf);
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "int", 3)) {
+ "INT", 3)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_int());
+ "%d,", rand_int());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "bigint", 6)) {
+ "BIGINT", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64",", rand_bigint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "float", 5)) {
+ "FLOAT", 5)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f, ", rand_float());
+ "%f,", rand_float());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "double", 6)) {
+ "DOUBLE", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f, ", rand_double());
+ "%f,", rand_double());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "smallint", 8)) {
+ "SMALLINT", 8)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_smallint());
+ "%d,", rand_smallint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "tinyint", strlen("tinyint"))) {
+ "TINYINT", strlen("TINYINT"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_tinyint());
+ "%d,", rand_tinyint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "bool", strlen("bool"))) {
+ "BOOL", strlen("BOOL"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d, ", rand_bool());
+ "%d,", rand_bool());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "timestamp", strlen("timestamp"))) {
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64", ", rand_bigint());
+ "%"PRId64",", rand_bigint());
} else {
errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
return -1;
}
}
- dataLen -= 2;
+ dataLen -= 1;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
@@ -4541,31 +4635,31 @@ static int64_t generateData(char *recBuf, char **data_type,
}
for (int i = 0; i < c; i++) {
- if (strcasecmp(data_type[i % c], "tinyint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_tinyint() );
- } else if (strcasecmp(data_type[i % c], "smallint") == 0) {
- pstr += sprintf(pstr, ", %d", rand_smallint());
- } else if (strcasecmp(data_type[i % c], "int") == 0) {
- pstr += sprintf(pstr, ", %d", rand_int());
- } else if (strcasecmp(data_type[i % c], "bigint") == 0) {
- pstr += sprintf(pstr, ", %" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % c], "float") == 0) {
- pstr += sprintf(pstr, ", %10.4f", rand_float());
- } else if (strcasecmp(data_type[i % c], "double") == 0) {
+ if (strcasecmp(data_type[i % c], "TINYINT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_tinyint() );
+ } else if (strcasecmp(data_type[i % c], "SMALLINT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_smallint());
+ } else if (strcasecmp(data_type[i % c], "INT") == 0) {
+ pstr += sprintf(pstr, ",%d", rand_int());
+ } else if (strcasecmp(data_type[i % c], "BIGINT") == 0) {
+ pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
+ } else if (strcasecmp(data_type[i % c], "FLOAT") == 0) {
+ pstr += sprintf(pstr, ",%10.4f", rand_float());
+ } else if (strcasecmp(data_type[i % c], "DOUBLE") == 0) {
double t = rand_double();
- pstr += sprintf(pstr, ", %20.8f", t);
- } else if (strcasecmp(data_type[i % c], "bool") == 0) {
+ pstr += sprintf(pstr, ",%20.8f", t);
+ } else if (strcasecmp(data_type[i % c], "BOOL") == 0) {
bool b = taosRandom() & 1;
- pstr += sprintf(pstr, ", %s", b ? "true" : "false");
- } else if (strcasecmp(data_type[i % c], "binary") == 0) {
+ pstr += sprintf(pstr, ",%s", b ? "true" : "false");
+ } else if (strcasecmp(data_type[i % c], "BINARY") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ", \"%s\"", s);
+ pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
- } else if (strcasecmp(data_type[i % c], "nchar") == 0) {
+ } else if (strcasecmp(data_type[i % c], "NCHAR") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ", \"%s\"", s);
+ pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
}
@@ -4588,7 +4682,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
sampleDataBuf = calloc(
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n",
+ errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
strerror(errno));
@@ -4609,7 +4703,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
return 0;
}
-static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
+static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k)
{
int affectedRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4619,14 +4713,19 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) {
affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
- } else {
- if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) {
+ } else if (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest"))) {
+ if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port,
+ buffer, NULL /* not set result file */)) {
affectedRows = -1;
printf("========restful return fail, threadID[%d]\n",
pThreadInfo->threadID);
} else {
affectedRows = k;
}
+ } else {
+ errorPrint("%s() LN%d: unknown insert mode: %s\n",
+ __func__, __LINE__, superTblInfo->insertMode);
+ affectedRows = 0;
}
} else {
affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
@@ -4635,18 +4734,17 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
return affectedRows;
}
-static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq)
+static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) {
- if ((superTblInfo->childTblOffset >= 0)
- && (superTblInfo->childTblLimit > 0)) {
+ if (superTblInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
superTblInfo->childTblName +
(tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
- verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRIu64" seq=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, tableSeq);
@@ -4654,31 +4752,31 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableS
superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"",
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
g_args.tb_prefix, tableSeq);
}
}
static int64_t generateDataTail(
SSuperTable* superTblInfo,
- int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
+ uint64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) {
- int64_t len = 0;
- int ncols_per_record = 1; // count first col ts
+ uint64_t len = 0;
+ uint32_t ncols_per_record = 1; // count first col ts
char *pstr = buffer;
if (superTblInfo == NULL) {
- int datatypeSeq = 0;
+ uint32_t datatypeSeq = 0;
while(g_args.datatype[datatypeSeq]) {
datatypeSeq ++;
ncols_per_record ++;
}
}
- verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch);
+ verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
- int64_t k = 0;
+ uint64_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
@@ -4753,7 +4851,7 @@ static int64_t generateDataTail(
remainderBufLen -= retLen;
}
- verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n",
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
startFrom ++;
@@ -4835,12 +4933,12 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
}
static int64_t generateInterlaceDataBuffer(
- char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes,
- int64_t tableSeq,
+ char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
+ uint64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
- int64_t insertRows,
+ uint64_t insertRows,
int64_t startTime,
- int64_t *pRemainderBufLen)
+ uint64_t *pRemainderBufLen)
{
assert(buffer);
char *pstr = buffer;
@@ -4853,7 +4951,7 @@ static int64_t generateInterlaceDataBuffer(
return 0;
}
// generate data buffer
- verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n",
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n",
pThreadInfo->threadID, __func__, __LINE__, i, buffer);
pstr += headLen;
@@ -4861,7 +4959,7 @@ static int64_t generateInterlaceDataBuffer(
int64_t dataLen = 0;
- verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
i, batchPerTblTimes, batchPerTbl);
@@ -4883,7 +4981,7 @@ static int64_t generateInterlaceDataBuffer(
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
- debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n",
+ debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
@@ -4893,7 +4991,7 @@ static int64_t generateInterlaceDataBuffer(
return k;
}
-static int generateProgressiveDataBuffer(
+static int64_t generateProgressiveDataBuffer(
char *tableName,
int64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
@@ -4938,12 +5036,21 @@ static int generateProgressiveDataBuffer(
return k;
}
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0)));
+}
+
static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__);
- int64_t insertRows;
- int64_t interlaceRows;
+ uint64_t insertRows;
+ uint64_t interlaceRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4978,10 +5085,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
// TODO: prompt tbl count multple interlace rows and batch
//
- int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n",
+ errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -4993,18 +5100,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
- int insert_interval =
+ uint64_t insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
- int64_t st = 0;
- int64_t et = 0xffffffff;
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
- int64_t tableSeq = pThreadInfo->start_table_from;
+ uint64_t tableSeq = pThreadInfo->start_table_from;
- debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n",
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRIu64" insertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
@@ -5012,9 +5119,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
assert(pThreadInfo->ntables > 0);
- int64_t batchPerTbl = interlaceRows;
+ uint64_t batchPerTbl = interlaceRows;
+ uint64_t batchPerTblTimes;
- int64_t batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
g_args.num_of_RPR / interlaceRows;
@@ -5022,9 +5129,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
batchPerTblTimes = 1;
}
- int64_t generatedRecPerTbl = 0;
+ uint64_t generatedRecPerTbl = 0;
bool flagSleep = true;
- int64_t sleepTimeTotal = 0;
+ uint64_t sleepTimeTotal = 0;
char *strInsertInto = "insert into ";
int nInsertBufLen = strlen(strInsertInto);
@@ -5036,7 +5143,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
// generate data
memset(buffer, 0, maxSqlLen);
- int64_t remainderBufLen = maxSqlLen;
+ uint64_t remainderBufLen = maxSqlLen;
char *pstr = buffer;
@@ -5044,9 +5151,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int64_t recOfBatch = 0;
+ uint64_t recOfBatch = 0;
- for (int64_t i = 0; i < batchPerTblTimes; i ++) {
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
errorPrint("[%d] %s() LN%d, getTableName return null\n",
@@ -5055,7 +5162,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return NULL;
}
- int64_t oldRemainderLen = remainderBufLen;
+ uint64_t oldRemainderLen = remainderBufLen;
int64_t generated = generateInterlaceDataBuffer(
tableName, batchPerTbl, i, batchPerTblTimes,
tableSeq,
@@ -5064,10 +5171,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTime,
&remainderBufLen);
- if (generated < 0) {
- debugPrint("[%d] %s() LN%d, generated data is %"PRId64"\n",
+ debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
- goto free_and_statistics_interlace;
+ if (generated < 0) {
+ errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace;
} else if (generated == 0) {
break;
}
@@ -5111,7 +5220,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
break;
}
- verbosePrint("[%d] %s() LN%d recOfBatch=%"PRId64" totalInsertRows=%"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
pThreadInfo->totalInsertRows);
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
@@ -5119,33 +5228,40 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampMs();
+ if (recOfBatch == 0) {
+ errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch);
+ errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n");
+ goto free_of_interlace;
+ }
int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
- performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n",
__func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
- pThreadInfo->threadID,
- __func__, __LINE__, affectedRows);
- if ((affectedRows < 0) || (recOfBatch != affectedRows)) {
- errorPrint("[%d] %s() LN%d execInsert insert %"PRId64", affected rows: %"PRId64"\n%s\n",
+ if (recOfBatch != affectedRows) {
+ errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch, affectedRows, buffer);
- goto free_and_statistics_interlace;
+ goto free_of_interlace;
}
pThreadInfo->totalAffectedRows += affectedRows;
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
pThreadInfo->threadID,
pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows);
@@ -5165,13 +5281,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
-free_and_statistics_interlace:
+free_of_interlace:
tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5187,19 +5299,19 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "Failed to alloc %d Bytes, reason:%s\n",
+ errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
maxSqlLen,
strerror(errno));
return NULL;
}
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
int64_t timeStampStep =
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
@@ -5214,15 +5326,15 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
- for (int64_t tableSeq =
+ for (uint64_t tableSeq =
pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
tableSeq ++) {
int64_t start_time = pThreadInfo->start_time;
- int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
+ uint64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows);
- for (int64_t i = 0; i < insertRows;) {
+ for (uint64_t i = 0; i < insertRows;) {
/*
if (insert_interval) {
st = taosGetTimestampMs();
@@ -5244,7 +5356,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int generated = generateProgressiveDataBuffer(
+ int64_t generated = generateProgressiveDataBuffer(
tableName, tableSeq, pThreadInfo, pstr, insertRows,
i, start_time,
&(pThreadInfo->samplePos),
@@ -5252,7 +5364,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (generated > 0)
i += generated;
else
- goto free_and_statistics_2;
+ goto free_of_progressive;
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
@@ -5262,17 +5374,23 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t affectedRows = execInsert(pThreadInfo, buffer, generated);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
+ uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
__func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- if (affectedRows < 0)
- goto free_and_statistics_2;
+ if (affectedRows < 0) {
+ errorPrint("%s() LN%d, affected rows: %"PRId64"\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_progressive;
+ }
pThreadInfo->totalAffectedRows += affectedRows;
@@ -5311,13 +5429,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
}
} // tableSeq
-free_and_statistics_2:
+free_of_progressive:
tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5346,6 +5460,7 @@ static void* syncWrite(void *sarg) {
// progressive mode
return syncWriteProgressive(pThreadInfo);
}
+
}
static void callBack(void *param, TAOS_RES *res, int code) {
@@ -5425,6 +5540,32 @@ static void *asyncWrite(void *sarg) {
return NULL;
}
+static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr)
+{
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
+ struct hostent *server = gethostbyname(host);
+ if ((server == NULL) || (server->h_addr == NULL)) {
+ errorPrint("%s", "ERROR, no such host");
+ return -1;
+ }
+
+ debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n",
+ server->h_name,
+ server->h_addr,
+ (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
+ server->h_length);
+
+ memset(serv_addr, 0, sizeof(struct sockaddr_in));
+ serv_addr->sin_family = AF_INET;
+ serv_addr->sin_port = htons(rest_port);
+#ifdef WINDOWS
+ serv_addr->sin_addr.s_addr = inet_addr(host);
+#else
+ memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length);
+#endif
+ return 0;
+}
+
static void startMultiThreadInsertData(int threads, char* db_name,
char* precision,SSuperTable* superTblInfo) {
@@ -5510,15 +5651,15 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int startFrom;
if (superTblInfo) {
- int limit, offset;
+ int64_t limit;
+ uint64_t offset;
if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) &&
((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
- if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
- && (superTblInfo->childTblOffset >= 0)) {
+ if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
if ((superTblInfo->childTblLimit < 0)
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
> (superTblInfo->childTblCount))) {
@@ -5563,7 +5704,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit(-1);
}
- int64_t childTblCount;
+ uint64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos,
db_name, superTblInfo->sTblName,
@@ -5577,17 +5718,23 @@ static void startMultiThreadInsertData(int threads, char* db_name,
taos_close(taos);
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
+ if ((superTblInfo)
+ && (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest")))) {
+ if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0)
+ exit(-1);
+ }
+
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
@@ -5596,7 +5743,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
t_info->superTblInfo = superTblInfo;
t_info->start_time = start_time;
- t_info->minDelay = INT64_MAX;
+ t_info->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) ||
(0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
@@ -5628,10 +5775,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
*/
tsem_init(&(t_info->lock_sem), 0, 0);
- if (SYNC == g_Dbs.queryMode) {
- pthread_create(pids + i, NULL, syncWrite, t_info);
- } else {
+ if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, t_info);
+ } else {
+ pthread_create(pids + i, NULL, syncWrite, t_info);
}
}
@@ -5639,10 +5786,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pthread_join(pids[i], NULL);
}
- int64_t totalDelay = 0;
- int64_t maxDelay = 0;
- int64_t minDelay = INT64_MAX;
- int64_t cntDelay = 1;
+ uint64_t totalDelay = 0;
+ uint64_t maxDelay = 0;
+ uint64_t minDelay = UINT64_MAX;
+ uint64_t cntDelay = 1;
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
@@ -5651,7 +5798,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
tsem_destroy(&(t_info->lock_sem));
taos_close(t_info->taos);
- debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n",
+ debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__,
t_info->threadID, t_info->totalInsertRows,
t_info->totalAffectedRows);
@@ -5677,35 +5824,42 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t t = end - start;
if (superTblInfo) {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
t / 1000.0, superTblInfo->totalInsertRows,
superTblInfo->totalAffectedRows,
threads, db_name, superTblInfo->sTblName,
(double)superTblInfo->totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
+
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
t / 1000.0, superTblInfo->totalInsertRows,
superTblInfo->totalAffectedRows,
threads, db_name, superTblInfo->sTblName,
(double)superTblInfo->totalInsertRows / (t / 1000.0));
+ }
} else {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
t / 1000.0, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
(double)g_args.totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
t * 1000.0, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
(double)g_args.totalInsertRows / (t / 1000.0));
+ }
}
- printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
- fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
+ }
//taos_close(taos);
@@ -5745,11 +5899,11 @@ static void *readTable(void *sarg) {
printf("%d records:\n", totalData);
fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
- for (int j = 0; j < n; j++) {
+ for (uint64_t j = 0; j < n; j++) {
double totalT = 0;
- int count = 0;
- for (int i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%d where ts>= %" PRId64,
+ uint64_t count = 0;
+ for (uint64_t i = 0; i < num_of_tables; i++) {
+ sprintf(command, "select %s from %s%"PRIu64" where ts>= %" PRIu64,
aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
@@ -5809,7 +5963,7 @@ static void *readMetric(void *sarg) {
fprintf(fp, "Querying On %d records:\n", totalData);
for (int j = 0; j < n; j++) {
- char condition[BUFFER_SIZE - 30] = "\0";
+ char condition[COND_BUF_LEN] = "\0";
char tempS[64] = "\0";
int m = 10 < num_of_tables ? 10 : num_of_tables;
@@ -5820,7 +5974,7 @@ static void *readMetric(void *sarg) {
} else {
sprintf(tempS, " or t1 = %d ", i);
}
- strcat(condition, tempS);
+ strncat(condition, tempS, COND_BUF_LEN - 1);
sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
@@ -5875,7 +6029,8 @@ static int insertTestProcess() {
return -1;
}
- printfInsertMetaToFile(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ printfInsertMetaToFile(g_fpOfInsertResult);
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
@@ -5886,7 +6041,8 @@ static int insertTestProcess() {
// create database and super tables
if(createDatabasesAndStables() != 0) {
- fclose(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
return -1;
}
@@ -5902,11 +6058,13 @@ static int insertTestProcess() {
end = taosGetTimestampMs();
if (g_totalChildTables > 0) {
- printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
+ fprintf(stderr, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
- fprintf(g_fpOfInsertResult,
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
"Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ }
}
taosMsleep(1000);
@@ -5979,14 +6137,14 @@ static void *specifiedTableQuery(void *sarg) {
return NULL;
}
- int64_t st = 0;
- int64_t et = 0;
+ uint64_t st = 0;
+ uint64_t et = 0;
- int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+ uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
+ uint64_t totalQueried = 0;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
@@ -5994,46 +6152,31 @@ static void *specifiedTableQuery(void *sarg) {
taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms
}
- st = taosGetTimestampMs();
-
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- int64_t t1 = taosGetTimestampMs();
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
- }
- selectAndGetResult(pThreadInfo->taos,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile);
- int64_t t2 = taosGetTimestampMs();
- printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000.0);
- } else {
- int64_t t1 = taosGetTimestampMs();
- int retCode = postProceSql(g_queryInfo.host,
- g_queryInfo.port,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
- if (0 != retCode) {
- printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
- return NULL;
- }
- int64_t t2 = taosGetTimestampMs();
- printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000.0);
-
}
+
+ st = taosGetTimestampMs();
+
+ selectAndGetResult(pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile);
+
+ et = taosGetTimestampMs();
+ printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n",
+ taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0);
+
totalQueried ++;
g_queryInfo.specifiedQueryInfo.totalQueried ++;
- et = taosGetTimestampMs();
-
- int64_t currentPrintTime = taosGetTimestampMs();
- int64_t endTs = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
__func__, __LINE__, endTs, startTs);
- printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
@@ -6059,14 +6202,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
tstrncpy(outSql, inSql, pos - inSql + 1);
//printf("1: %s\n", outSql);
- strcat(outSql, subTblName);
+ strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
//printf("2: %s\n", outSql);
- strcat(outSql, pos+strlen(sourceString));
+ strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
//printf("3: %s\n", outSql);
}
static void *superTableQuery(void *sarg) {
- char sqlstr[1024];
+ char sqlstr[MAX_QUERY_SQL_LENGTH];
threadInfo *pThreadInfo = (threadInfo *)sarg;
if (pThreadInfo->taos == NULL) {
@@ -6085,14 +6228,14 @@ static void *superTableQuery(void *sarg) {
}
}
- int64_t st = 0;
- int64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
+ uint64_t st = 0;
+ uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
- int queryTimes = g_queryInfo.superQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ uint64_t totalQueried = 0;
+ uint64_t startTs = taosGetTimestampMs();
- int64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t lastPrintTime = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.superQueryInfo.queryInterval
&& (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
@@ -6111,7 +6254,7 @@ static void *superTableQuery(void *sarg) {
g_queryInfo.superQueryInfo.result[j],
pThreadInfo->threadID);
}
- selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile);
+ selectAndGetResult(pThreadInfo, sqlstr, tmpFile);
totalQueried++;
g_queryInfo.superQueryInfo.totalQueried ++;
@@ -6119,7 +6262,7 @@ static void *superTableQuery(void *sarg) {
int64_t currentPrintTime = taosGetTimestampMs();
int64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently completed queries: %d, QPS: %10.3f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
@@ -6128,7 +6271,7 @@ static void *superTableQuery(void *sarg) {
}
}
et = taosGetTimestampMs();
- printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n",
+ printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(),
pThreadInfo->start_table_from,
pThreadInfo->end_table_to,
@@ -6171,13 +6314,19 @@ static int queryTestProcess() {
printfQuerySystemInfo(taos);
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ if (convertHostToServAddr(
+ g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
+ exit(-1);
+ }
+
pthread_t *pids = NULL;
threadInfo *infos = NULL;
//==== create sub threads for query from specify table
int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
if ((nSqlCount > 0) && (nConcurrent > 0)) {
@@ -6237,21 +6386,21 @@ static int queryTestProcess() {
ERROR_EXIT("memory allocation failed for create threads\n");
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
+ uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
- int startFrom = 0;
+ uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
@@ -6288,12 +6437,12 @@ static int queryTestProcess() {
tmfree((char*)infosOfSub);
// taos_close(taos);// TODO: workaround to use separate taos connection;
- int64_t endTs = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
- int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
g_queryInfo.superQueryInfo.totalQueried;
- printf("==== completed total queries: %d, the QPS of all threads: %10.3f====\n",
+ fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
return 0;
@@ -6306,14 +6455,15 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c
return;
}
- getResult(res, (char*)param);
- taos_free_result(res);
+ appendResultToFile(res, (char*)param);
+ // tao_unscribe() will free result.
}
-static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
+static TAOS_SUB* subscribeImpl(
+ TAOS *taos, char *sql, char* topic, char* resultFileName) {
TAOS_SUB* tsub = NULL;
- if (g_queryInfo.specifiedQueryInfo.mode) {
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
tsub = taos_subscribe(taos,
g_queryInfo.specifiedQueryInfo.subscribeRestart,
topic, sql, subscribe_callback, (void*)resultFileName,
@@ -6334,9 +6484,12 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- char subSqlstr[1024];
+ char subSqlstr[MAX_QUERY_SQL_LENGTH];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ if (g_queryInfo.superQueryInfo.sqlCount == 0)
+ return NULL;
+
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
@@ -6388,14 +6541,14 @@ static void *superSubscribe(void *sarg) {
}
}
//et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+ //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
} while(0);
// start loop to consume result
TAOS_RES* res = NULL;
while(1) {
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.superQueryInfo.mode) {
+ if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
continue;
}
@@ -6407,7 +6560,7 @@ static void *superSubscribe(void *sarg) {
g_queryInfo.superQueryInfo.result[i],
pThreadInfo->threadID);
}
- getResult(res, tmpFile);
+ appendResultToFile(res, tmpFile);
}
}
}
@@ -6425,6 +6578,9 @@ static void *specifiedSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ if (g_queryInfo.specifiedQueryInfo.sqlCount == 0)
+ return NULL;
+
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
@@ -6454,7 +6610,7 @@ static void *specifiedSubscribe(void *sarg) {
do {
//if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
// taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
+ // //printf("========sleep duration:%"PRIu64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
//}
//st = taosGetTimestampMs();
@@ -6462,7 +6618,7 @@ static void *specifiedSubscribe(void *sarg) {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
sprintf(topic, "taosdemo-subscribe-%d", i);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
+ if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
}
@@ -6474,14 +6630,14 @@ static void *specifiedSubscribe(void *sarg) {
}
}
//et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+ //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
} while(0);
// start loop to consume result
TAOS_RES* res = NULL;
while(1) {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
continue;
}
@@ -6492,7 +6648,7 @@ static void *specifiedSubscribe(void *sarg) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
}
- getResult(res, tmpFile);
+ appendResultToFile(res, tmpFile);
}
}
}
@@ -6544,8 +6700,9 @@ static int subscribeTestProcess() {
//==== create sub threads for query from super table
if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) ||
(g_queryInfo.specifiedQueryInfo.concurrent <= 0)) {
- errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
+ errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
exit(-1);
}
@@ -6580,21 +6737,21 @@ static int subscribeTestProcess() {
exit(-1);
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
+ uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
- int startFrom = 0;
+ uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
@@ -6703,7 +6860,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
- g_Dbs.queryMode = g_args.query_mode;
+ g_Dbs.asyncMode = g_args.async_mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 96a1cd16f8..f80ac069a0 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -72,7 +72,8 @@ enum _show_db_index {
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
@@ -83,10 +84,10 @@ enum _show_tables_index {
TSDB_SHOW_TABLES_NAME_INDEX,
TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
+ TSDB_SHOW_TABLES_METRIC_INDEX,
+ TSDB_SHOW_TABLES_UID_INDEX,
TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
+ TSDB_SHOW_TABLES_VGID_INDEX,
TSDB_MAX_SHOW_TABLES
};
@@ -99,22 +100,24 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
+#define COL_NOTE_LEN 128
+
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
- char note[128];
+ char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
- char name[TSDB_COL_NAME_LEN + 1];
+ char name[TSDB_TABLE_NAME_LEN];
SColDes cols[];
} STableDef;
extern char version[];
typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_DB_NAME_LEN];
char create_time[32];
int32_t ntables;
int32_t vgroups;
@@ -132,14 +135,15 @@ typedef struct {
int8_t wallevel;
int32_t fsync;
int8_t comp;
+ int8_t cachelast;
char precision[8]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN + 1];
- char metric[TSDB_TABLE_NAME_LEN + 1];
+ char name[TSDB_TABLE_NAME_LEN];
+ char metric[TSDB_TABLE_NAME_LEN];
} STableRecord;
typedef struct {
@@ -151,7 +155,7 @@ typedef struct {
pthread_t threadID;
int32_t threadIndex;
int32_t totalThreads;
- char dbName[TSDB_TABLE_NAME_LEN + 1];
+ char dbName[TSDB_DB_NAME_LEN];
void *taosCon;
int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut;
@@ -210,13 +214,13 @@ static struct argp_option options[] = {
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
// dump unit options
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'B', 0, 0, "Dump assigned databases", 2},
+ {"databases", 'D', 0, 0, "Dump assigned databases", 2},
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
- {"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
+ {"without-property", 'N', 0, 0, "Dump schema without properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
@@ -337,15 +341,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'A':
arguments->all_databases = true;
break;
- case 'B':
+ case 'D':
arguments->databases = true;
break;
// dump format option
case 's':
arguments->schemaonly = true;
break;
- case 'M':
- arguments->with_property = true;
+ case 'N':
+ arguments->with_property = false;
break;
case 'S':
// parse time here.
@@ -354,23 +358,23 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'E':
arguments->end_time = atol(arg);
break;
- case 'N':
+ case 'B':
arguments->data_batch = atoi(arg);
if (arguments->data_batch >= INT16_MAX) {
arguments->data_batch = INT16_MAX - 1;
- }
+ }
break;
- case 'L':
+ case 'L':
{
int32_t len = atoi(arg);
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
} else if (len < TSDB_MAX_SQL_LEN) {
len = TSDB_MAX_SQL_LEN;
- }
+ }
arguments->max_sql_len = len;
break;
- }
+ }
case 't':
arguments->table_batch = atoi(arg);
break;
@@ -398,27 +402,27 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-int taosDumpOut(struct arguments *arguments);
-int taosDumpIn(struct arguments *arguments);
-void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
-int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
-int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
-void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
-void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
-int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
-int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
-int taosCheckParam(struct arguments *arguments);
-void taosFreeDbInfos();
+static int taosDumpOut(struct arguments *arguments);
+static int taosDumpIn(struct arguments *arguments);
+static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
+static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
+static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
+static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
+static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
+static int taosCheckParam(struct arguments *arguments);
+static void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
struct arguments g_args = {
// connection option
- NULL,
- "root",
+ NULL,
+ "root",
#ifdef _TD_POWER_
- "powerdb",
+ "powerdb",
#else
- "taosdata",
+ "taosdata",
#endif
0,
"",
@@ -432,8 +436,8 @@ struct arguments g_args = {
false,
false,
// dump format option
- false,
- false,
+ false, // schemeonly
+ true, // with_property
0,
INT64_MAX,
1,
@@ -523,7 +527,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
- if (argc > 1)
+ if (argc > 2)
parse_args(argc, argv, &g_args);
argp_parse(&argp, argc, argv, 0, 0, &g_args);
@@ -675,10 +679,10 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
}
sprintf(tempCommand, "show tables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -705,12 +709,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
free(tempCommand);
return 0;
}
-
+
sprintf(tempCommand, "show stables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -748,7 +752,7 @@ int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric
return -1;
}
}
-
+
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
@@ -770,7 +774,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
sprintf(tmpCommand, "select tbname from %s", metric);
-
+
TAOS_RES *res = taos_query(taosCon, tmpCommand);
int32_t code = taos_errno(res);
if (code != 0) {
@@ -792,20 +796,20 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
-
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
+
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
numOfTable++;
}
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
-
+
int maxThreads = arguments->thread_num;
int tableOfPerFile ;
if (numOfTable <= arguments->thread_num) {
@@ -815,16 +819,16 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
tableOfPerFile = numOfTable / arguments->thread_num;
if (0 != numOfTable % arguments->thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
+ fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
close(fd);
return -1;
}
-
+
int32_t numOfThread = *totalNumOfThread;
int subFd = -1;
for (; numOfThread < maxThreads; numOfThread++) {
@@ -838,7 +842,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
(void)remove(tmpBuf);
}
sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
+ (void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
@@ -856,11 +860,11 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
-
+
if (fd >= 0) {
close(fd);
fd = -1;
- }
+ }
*totalNumOfThread = numOfThread;
@@ -884,7 +888,7 @@ int taosDumpOut(struct arguments *arguments) {
} else {
sprintf(tmpBuf, "dbs.sql");
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -916,9 +920,9 @@ int taosDumpOut(struct arguments *arguments) {
taosDumpCharset(fp);
sprintf(command, "show databases");
- result = taos_query(taos, command);
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
goto _exit_failure;
@@ -955,15 +959,17 @@ int taosDumpOut(struct arguments *arguments) {
goto _exit_failure;
}
- strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
if (arguments->with_property) {
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
//dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//dbInfos[count]->daysToKeep1;
//dbInfos[count]->daysToKeep2;
@@ -974,8 +980,10 @@ int taosDumpOut(struct arguments *arguments) {
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
//dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@@ -1007,8 +1015,8 @@ int taosDumpOut(struct arguments *arguments) {
g_resultStatistics.totalDatabasesOfDumpOut++;
sprintf(command, "use %s", dbInfos[0]->name);
-
- result = taos_query(taos, command);
+
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
@@ -1038,7 +1046,7 @@ int taosDumpOut(struct arguments *arguments) {
int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
if (0 == ret) {
superTblCnt++;
- }
+ }
}
retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
}
@@ -1050,7 +1058,7 @@ int taosDumpOut(struct arguments *arguments) {
goto _clean_tmp_file;
}
}
-
+
// TODO: save dump super table into result_output.txt
fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
@@ -1076,7 +1084,7 @@ int taosDumpOut(struct arguments *arguments) {
taos_close(taos);
taos_free_result(result);
tfree(command);
- taosFreeDbInfos();
+ taosFreeDbInfos();
fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
return 0;
@@ -1090,15 +1098,17 @@ _exit_failure:
return -1;
}
-int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
+int taosGetTableDes(
+ char* dbName, char *table,
+ STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
TAOS_ROW row = NULL;
TAOS_RES* res = NULL;
int count = 0;
char sqlstr[COMMAND_SIZE];
sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1108,7 +1118,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
TAOS_FIELD *fields = taos_fetch_fields(res);
- tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN);
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
@@ -1128,23 +1138,23 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
if (isSuperTable) {
return count;
}
-
+
// if chidl-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
taos_free_result(res);
return -1;
}
-
- fields = taos_fetch_fields(res);
+
+ fields = taos_fetch_fields(res);
row = taos_fetch_row(res);
if (NULL == row) {
@@ -1159,7 +1169,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
res = NULL;
continue;
}
-
+
int32_t* length = taos_fetch_lengths(res);
//int32_t* length = taos_fetch_lengths(tmpResult);
@@ -1188,16 +1198,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
case TSDB_DATA_TYPE_BINARY: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
- char tbuf[COMMAND_SIZE];
- converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN];
+ converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COMMAND_SIZE];
- convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
@@ -1219,15 +1229,17 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
default:
break;
}
-
+
taos_free_result(res);
- res = NULL;
+ res = NULL;
}
return count;
}
-int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) {
+int32_t taosDumpTable(
+ char *table, char *metric, struct arguments *arguments,
+ FILE *fp, TAOS* taosCon, char* dbName) {
int count = 0;
STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
@@ -1280,9 +1292,10 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
- "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update);
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
}
pstr += sprintf(pstr, ";");
@@ -1293,8 +1306,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
{
SThreadParaObj *pThread = (SThreadParaObj*)arg;
STableRecord tableRecord;
- int fd;
-
+ int fd;
+
char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
@@ -1305,13 +1318,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
-
+
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1321,13 +1334,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
memset(tmpBuf, 0, TSDB_FILENAME_LEN);
sprintf(tmpBuf, "use %s", pThread->dbName);
-
- TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
+
+ TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", pThread->dbName);
taos_free_result(tmpResult);
- fclose(fp);
+ fclose(fp);
close(fd);
return NULL;
}
@@ -1340,14 +1353,17 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
- int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
+ int ret = taosDumpTable(
+ tableRecord.name, tableRecord.metric, &g_args,
+ fp, pThread->taosCon, pThread->dbName);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
pThread->rowsOfDumpOut += ret;
-
+
if (pThread->rowsOfDumpOut >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName);
+ printf(" %"PRId64 " rows already be dumpout from database %s\n",
+ pThread->rowsOfDumpOut, pThread->dbName);
lastRowsPrint += 5000000;
}
@@ -1355,15 +1371,18 @@ void* taosDumpOutWorkThreadFp(void *arg)
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
-
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
+
+ memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
+ sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
+ g_args.outpath, pThread->dbName,
+ pThread->threadIndex, fileNameIndex);
} else {
- sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
+ sprintf(tmpBuf, "%s.tables.%d-%d.sql",
+ pThread->dbName, pThread->threadIndex, fileNameIndex);
}
fileNameIndex++;
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1377,7 +1396,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
taos_free_result(tmpResult);
close(fd);
- fclose(fp);
+ fclose(fp);
return NULL;
}
@@ -1385,15 +1404,16 @@ void* taosDumpOutWorkThreadFp(void *arg)
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName)
{
pthread_attr_t thattr;
- SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
+ SThreadParaObj *threadObj =
+ (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
for (int t = 0; t < numOfThread; ++t) {
SThreadParaObj *pThread = threadObj + t;
pThread->rowsOfDumpOut = 0;
pThread->tablesOfDumpOut = 0;
pThread->threadIndex = t;
pThread->totalThreads = numOfThread;
- tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN);
- pThread->taosCon = taosCon;
+ tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
+ pThread->taosCon = taosCon;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -1408,7 +1428,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
pthread_join(threadObj[t].threadID, NULL);
}
- // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
+ // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
int64_t totalRowsOfDumpOut = 0;
int64_t totalChildTblsOfDumpOut = 0;
for (int32_t t = 0; t < numOfThread; ++t) {
@@ -1449,7 +1469,7 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
}
-int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
+int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
TAOS_ROW row;
int fd = -1;
@@ -1457,8 +1477,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
sprintf(sqlstr, "show %s.stables", dbName);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
@@ -1478,13 +1498,14 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
(void)remove(".stables.tmp");
exit(-1);
}
-
- while ((row = taos_fetch_row(res)) != NULL) {
+
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
- }
-
+ }
+
taos_free_result(res);
(void)lseek(fd, 0, SEEK_SET);
@@ -1492,7 +1513,7 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
while (1) {
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
-
+
int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
if (0 == ret) {
superTblCnt++;
@@ -1505,8 +1526,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
close(fd);
(void)remove(".stables.tmp");
-
- return 0;
+
+ return 0;
}
@@ -1516,19 +1537,19 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
STableRecord tableRecord;
taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
-
+
fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
g_resultStatistics.totalDatabasesOfDumpOut++;
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
fprintf(fp, "USE %s;\n\n", dbInfo->name);
-
+
(void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
sprintf(sqlstr, "show %s.tables", dbInfo->name);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1547,15 +1568,17 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
-
+ tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+
taosWrite(fd, &tableRecord, sizeof(STableRecord));
-
+
numOfTable++;
}
taos_free_result(res);
@@ -1570,7 +1593,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
@@ -1579,7 +1602,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
return -1;
}
-
+
int32_t numOfThread = 0;
int subFd = -1;
for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
@@ -1616,7 +1639,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
fd = -1;
}
-
+
taos_free_result(res);
// start multi threads to dumpout
@@ -1624,7 +1647,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf);
- }
+ }
free(tblBuf);
return 0;
@@ -1637,15 +1660,18 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
char* pstr = sqlstr;
- pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name);
+ pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
+ dbName, tableDes->name);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (counter == 0) {
- pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, " (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
- pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@@ -1658,9 +1684,11 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
for (; counter < numOfCols; counter++) {
if (counter == count_temp) {
- pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ") TAGS (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
- pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@@ -1687,7 +1715,8 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
char *pstr = NULL;
pstr = tmpBuf;
- pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric);
+ pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
+ dbName, tableDes->name, dbName, metric);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
@@ -1735,7 +1764,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
char *pstr = NULL;
TAOS_ROW row = NULL;
int numFields = 0;
-
+
if (arguments->schemaonly) {
return 0;
}
@@ -1750,11 +1779,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
pstr = tmpBuffer;
char sqlstr[1024] = {0};
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbname, arguments->start_time, arguments->end_time);
-
- TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
@@ -1774,7 +1803,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
while ((row = taos_fetch_row(tmpResult)) != NULL) {
pstr = tmpBuffer;
curr_sqlstr_len = 0;
-
+
int32_t* length = taos_fetch_lengths(tmpResult); // act len
if (count == 0) {
@@ -1829,7 +1858,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
- pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR: {
@@ -1857,10 +1886,10 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
- totalRows++;
+ totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
-
+
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
lastRowsPrint += 5000000;
@@ -2206,7 +2235,7 @@ static FILE* taosOpenDumpInFile(char *fptr) {
}
char *fname = full_path.we_wordv[0];
-
+
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
@@ -2240,7 +2269,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
line[--read_len] = '\0';
//if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
+ if (read_len == 0 ) {
continue;
}
@@ -2259,8 +2288,8 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
-
+ cmd_len = 0;
+
if (lineNo >= lastRowsPrint) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
@@ -2300,7 +2329,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
if (totalThreads > tsSqlFileNum) {
totalThreads = tsSqlFileNum;
}
-
+
SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
for (int32_t t = 0; t < totalThreads; ++t) {
pThread = threadObj + t;
@@ -2330,7 +2359,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
int taosDumpIn(struct arguments *arguments) {
assert(arguments->isDumpIn);
-
+
TAOS *taos = NULL;
FILE *fp = NULL;
@@ -2345,22 +2374,22 @@ int taosDumpIn(struct arguments *arguments) {
int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
if (tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
-
+
fp = taosOpenDumpInFile(tsDbSqlFile);
if (NULL == fp) {
fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
return -1;
}
fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
-
+
taosLoadFileCharset(fp, tsfCharset);
-
+
taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
}
if (0 != tsSqlFileNumOfTbls) {
taosStartDumpInWorkThreads(taos, arguments);
- }
+ }
taos_close(taos);
taosFreeSQLFiles();
diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c
index 55ee39b6bc..e77c1b3e59 100644
--- a/src/mnode/src/mnodeUser.c
+++ b/src/mnode/src/mnodeUser.c
@@ -123,7 +123,7 @@ static void mnodePrintUserAuth() {
mnodeDecUserRef(pUser);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
}
diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt
index ab8b0f7678..4472c683c7 100644
--- a/src/os/CMakeLists.txt
+++ b/src/os/CMakeLists.txt
@@ -2,7 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX)
- ADD_SUBDIRECTORY(src/linux)
+ ADD_SUBDIRECTORY(src/linux)
ELSEIF (TD_DARWIN)
ADD_SUBDIRECTORY(src/darwin)
ELSEIF (TD_WINDOWS)
diff --git a/src/os/inc/os.h b/src/os/inc/os.h
index c3e02b14db..6731ca6d7d 100644
--- a/src/os/inc/os.h
+++ b/src/os/inc/os.h
@@ -20,45 +20,9 @@
extern "C" {
#endif
-#ifdef _TD_DARWIN_64
-#include "osDarwin.h"
-#endif
-
-#ifdef _TD_ARM_64
-#include "osArm64.h"
-#endif
-
-#ifdef _TD_ARM_32
-#include "osArm32.h"
-#endif
-
-#ifdef _TD_MIPS_64
-#include "osMips64.h"
-#endif
-
-#ifdef _TD_LINUX_64
-#include "osLinux64.h"
-#endif
-
-#ifdef _TD_LINUX_32
-#include "osLinux32.h"
-#endif
-
-#ifdef _ALPINE
-#include "osAlpine.h"
-#endif
-
-#ifdef _TD_NINGSI_60
-#include "osNingsi.h"
-#endif
-
-#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
-#include "osWindows.h"
-#endif
-
+#include "osInc.h"
#include "osDef.h"
#include "osAtomic.h"
-#include "osCommon.h"
#include "osDir.h"
#include "osFile.h"
#include "osLz4.h"
@@ -67,6 +31,7 @@ extern "C" {
#include "osRand.h"
#include "osSemphone.h"
#include "osSignal.h"
+#include "osSleep.h"
#include "osSocket.h"
#include "osString.h"
#include "osSysinfo.h"
diff --git a/src/os/inc/osAlpine.h b/src/os/inc/osAlpine.h
deleted file mode 100644
index eba9459395..0000000000
--- a/src/os/inc/osAlpine.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ALPINE_H
-#define TDENGINE_OS_ALPINE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-typedef int(*__compar_fn_t)(const void *, const void *);
-void error (int, int, const char *);
-#ifndef PTHREAD_MUTEX_RECURSIVE_NP
- #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h
deleted file mode 100644
index 54835a1ca8..0000000000
--- a/src/os/inc/osArm32.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ARM32_H
-#define TDENGINE_OS_ARM32_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define TAOS_OS_FUNC_LZ4
- #define BUILDIN_CLZL(val) __builtin_clzll(val)
- #define BUILDIN_CTZL(val) __builtin_ctzll(val)
- #define BUILDIN_CLZ(val) __builtin_clz(val)
- #define BUILDIN_CTZ(val) __builtin_ctz(val)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h
deleted file mode 100644
index 76098f6846..0000000000
--- a/src/os/inc/osArm64.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_ARM64_H
-#define TDENGINE_OS_ARM64_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/os/inc/osAtomic.h b/src/os/inc/osAtomic.h
index 803c351400..7affa444ee 100644
--- a/src/os/inc/osAtomic.h
+++ b/src/os/inc/osAtomic.h
@@ -20,7 +20,252 @@
extern "C" {
#endif
-#ifndef TAOS_OS_FUNC_ATOMIC
+#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
+ #define atomic_load_8(ptr) (*(char volatile*)(ptr))
+ #define atomic_load_16(ptr) (*(short volatile*)(ptr))
+ #define atomic_load_32(ptr) (*(long volatile*)(ptr))
+ #define atomic_load_64(ptr) (*(__int64 volatile*)(ptr))
+ #define atomic_load_ptr(ptr) (*(void* volatile*)(ptr))
+
+ #define atomic_store_8(ptr, val) ((*(char volatile*)(ptr)) = (char)(val))
+ #define atomic_store_16(ptr, val) ((*(short volatile*)(ptr)) = (short)(val))
+ #define atomic_store_32(ptr, val) ((*(long volatile*)(ptr)) = (long)(val))
+ #define atomic_store_64(ptr, val) ((*(__int64 volatile*)(ptr)) = (__int64)(val))
+ #define atomic_store_ptr(ptr, val) ((*(void* volatile*)(ptr)) = (void*)(val))
+
+ #define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val))
+ #define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val))
+ #define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val))
+ #define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
+ #ifdef _WIN64
+ #define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
+ #else
+ #define atomic_exchange_ptr(ptr, val) _InlineInterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
+ #endif
+
+ #ifdef _TD_GO_DLL_
+ #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
+ #else
+ #define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval))
+ #endif
+ #define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval))
+ #define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval))
+ #define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval))
+ #define atomic_val_compare_exchange_ptr(ptr, oldval, newval) _InterlockedCompareExchangePointer((void* volatile*)(ptr), (void*)(newval), (void*)(oldval))
+
+ char interlocked_add_fetch_8(char volatile *ptr, char val);
+ short interlocked_add_fetch_16(short volatile *ptr, short val);
+ long interlocked_add_fetch_32(long volatile *ptr, long val);
+ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val);
+
+ char interlocked_and_fetch_8(char volatile* ptr, char val);
+ short interlocked_and_fetch_16(short volatile* ptr, short val);
+ long interlocked_and_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val);
+
+ char interlocked_or_fetch_8(char volatile* ptr, char val);
+ short interlocked_or_fetch_16(short volatile* ptr, short val);
+ long interlocked_or_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ char interlocked_xor_fetch_8(char volatile* ptr, char val);
+ short interlocked_xor_fetch_16(short volatile* ptr, short val);
+ long interlocked_xor_fetch_32(long volatile* ptr, long val);
+ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val);
+
+ __int64 interlocked_fetch_xor_64(__int64 volatile* ptr, __int64 val);
+
+ #define atomic_add_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_add_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_add_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_add_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+ #ifdef _TD_GO_DLL_
+ #define atomic_fetch_add_8 __sync_fetch_and_ad
+ #define atomic_fetch_add_16 __sync_fetch_and_add
+ #else
+ #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
+ #endif
+ #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_sub_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), -(char)(val))
+ #define atomic_sub_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), -(short)(val))
+ #define atomic_sub_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), -(long)(val))
+ #define atomic_sub_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), -(__int64)(val))
+
+ #define atomic_fetch_sub_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), -(char)(val))
+ #define atomic_fetch_sub_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), -(short)(val))
+ #define atomic_fetch_sub_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), -(long)(val))
+ #define atomic_fetch_sub_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), -(__int64)(val))
+
+ #define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_and_64(ptr, val) interlocked_fetch_and_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_or_64(ptr, val) interlocked_fetch_or_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val))
+ #define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val))
+ #define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val))
+ #define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val))
+ #define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val))
+ #define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val))
+ #define atomic_fetch_xor_64(ptr, val) interlocked_fetch_xor_64((__int64 volatile*)(ptr), (__int64)(val))
+
+ #ifdef _WIN64
+ #define atomic_add_fetch_ptr atomic_add_fetch_64
+ #define atomic_fetch_add_ptr atomic_fetch_add_64
+ #define atomic_sub_fetch_ptr atomic_sub_fetch_64
+ #define atomic_fetch_sub_ptr atomic_fetch_sub_64
+ #define atomic_and_fetch_ptr atomic_and_fetch_64
+ #define atomic_fetch_and_ptr atomic_fetch_and_64
+ #define atomic_or_fetch_ptr atomic_or_fetch_64
+ #define atomic_fetch_or_ptr atomic_fetch_or_64
+ #define atomic_xor_fetch_ptr atomic_xor_fetch_64
+ #define atomic_fetch_xor_ptr atomic_fetch_xor_64
+ #else
+ #define atomic_add_fetch_ptr atomic_add_fetch_32
+ #define atomic_fetch_add_ptr atomic_fetch_add_32
+ #define atomic_sub_fetch_ptr atomic_sub_fetch_32
+ #define atomic_fetch_sub_ptr atomic_fetch_sub_32
+ #define atomic_and_fetch_ptr atomic_and_fetch_32
+ #define atomic_fetch_and_ptr atomic_fetch_and_32
+ #define atomic_or_fetch_ptr atomic_or_fetch_32
+ #define atomic_fetch_or_ptr atomic_fetch_or_32
+ #define atomic_xor_fetch_ptr atomic_xor_fetch_32
+ #define atomic_fetch_xor_ptr atomic_fetch_xor_32
+ #endif
+#elif defined(_TD_NINGSI_60)
+ /*
+ * type __sync_fetch_and_add (type *ptr, type value);
+ * type __sync_fetch_and_sub (type *ptr, type value);
+ * type __sync_fetch_and_or (type *ptr, type value);
+ * type __sync_fetch_and_and (type *ptr, type value);
+ * type __sync_fetch_and_xor (type *ptr, type value);
+ * type __sync_fetch_and_nand (type *ptr, type value);
+ * type __sync_add_and_fetch (type *ptr, type value);
+ * type __sync_sub_and_fetch (type *ptr, type value);
+ * type __sync_or_and_fetch (type *ptr, type value);
+ * type __sync_and_and_fetch (type *ptr, type value);
+ * type __sync_xor_and_fetch (type *ptr, type value);
+ * type __sync_nand_and_fetch (type *ptr, type value);
+ *
+ * bool __sync_bool_compare_and_swap (type*ptr, type oldval, type newval, ...)
+ * type __sync_val_compare_and_swap (type *ptr, type oldval, ?type newval, ...)
+ * */
+
+ #define atomic_load_8(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_16(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_32(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_64(ptr) __sync_fetch_and_add((ptr), 0)
+ #define atomic_load_ptr(ptr) __sync_fetch_and_add((ptr), 0)
+
+ #define atomic_store_8(ptr, val) (*(ptr)=(val))
+ #define atomic_store_16(ptr, val) (*(ptr)=(val))
+ #define atomic_store_32(ptr, val) (*(ptr)=(val))
+ #define atomic_store_64(ptr, val) (*(ptr)=(val))
+ #define atomic_store_ptr(ptr, val) (*(ptr)=(val))
+
+ int8_t atomic_exchange_8_impl(int8_t* ptr, int8_t val );
+ int16_t atomic_exchange_16_impl(int16_t* ptr, int16_t val );
+ int32_t atomic_exchange_32_impl(int32_t* ptr, int32_t val );
+ int64_t atomic_exchange_64_impl(int64_t* ptr, int64_t val );
+ void* atomic_exchange_ptr_impl( void **ptr, void *val );
+
+ #define atomic_exchange_8(ptr, val) atomic_exchange_8_impl((int8_t*)ptr, (int8_t)val)
+ #define atomic_exchange_16(ptr, val) atomic_exchange_16_impl((int16_t*)ptr, (int16_t)val)
+ #define atomic_exchange_32(ptr, val) atomic_exchange_32_impl((int32_t*)ptr, (int32_t)val)
+ #define atomic_exchange_64(ptr, val) atomic_exchange_64_impl((int64_t*)ptr, (int64_t)val)
+ #define atomic_exchange_ptr(ptr, val) atomic_exchange_ptr_impl((void **)ptr, (void*)val)
+
+ #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_16 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_32 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_64 __sync_val_compare_and_swap
+ #define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap
+
+ #define atomic_add_fetch_8(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_16(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_32(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_64(ptr, val) __sync_add_and_fetch((ptr), (val))
+ #define atomic_add_fetch_ptr(ptr, val) __sync_add_and_fetch((ptr), (val))
+
+ #define atomic_fetch_add_8(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_16(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_32(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_64(ptr, val) __sync_fetch_and_add((ptr), (val))
+ #define atomic_fetch_add_ptr(ptr, val) __sync_fetch_and_add((ptr), (val))
+
+ #define atomic_sub_fetch_8(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_16(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_32(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_64(ptr, val) __sync_sub_and_fetch((ptr), (val))
+ #define atomic_sub_fetch_ptr(ptr, val) __sync_sub_and_fetch((ptr), (val))
+
+ #define atomic_fetch_sub_8(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_16(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_32(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_64(ptr, val) __sync_fetch_and_sub((ptr), (val))
+ #define atomic_fetch_sub_ptr(ptr, val) __sync_fetch_and_sub((ptr), (val))
+
+ #define atomic_and_fetch_8(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_16(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_32(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_64(ptr, val) __sync_and_and_fetch((ptr), (val))
+ #define atomic_and_fetch_ptr(ptr, val) __sync_and_and_fetch((ptr), (val))
+
+ #define atomic_fetch_and_8(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_16(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_32(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_64(ptr, val) __sync_fetch_and_and((ptr), (val))
+ #define atomic_fetch_and_ptr(ptr, val) __sync_fetch_and_and((ptr), (val))
+
+ #define atomic_or_fetch_8(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_16(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_32(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_64(ptr, val) __sync_or_and_fetch((ptr), (val))
+ #define atomic_or_fetch_ptr(ptr, val) __sync_or_and_fetch((ptr), (val))
+
+ #define atomic_fetch_or_8(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_16(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_32(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_64(ptr, val) __sync_fetch_and_or((ptr), (val))
+ #define atomic_fetch_or_ptr(ptr, val) __sync_fetch_and_or((ptr), (val))
+
+ #define atomic_xor_fetch_8(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_16(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_32(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_64(ptr, val) __sync_xor_and_fetch((ptr), (val))
+ #define atomic_xor_fetch_ptr(ptr, val) __sync_xor_and_fetch((ptr), (val))
+
+ #define atomic_fetch_xor_8(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_16(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_32(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_64(ptr, val) __sync_fetch_and_xor((ptr), (val))
+ #define atomic_fetch_xor_ptr(ptr, val) __sync_fetch_and_xor((ptr), (val))
+
+#else
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
diff --git a/src/os/inc/osDarwin.h b/src/os/inc/osDarwin.h
deleted file mode 100644
index 7c206afe7a..0000000000
--- a/src/os/inc/osDarwin.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_OS_DARWIN_H
-#define TDENGINE_OS_DARWIN_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include