[td-225] merge develop
This commit is contained in:
commit
4b3301be83
|
|
@ -1,30 +1,49 @@
|
||||||
version: 1.0.{build}
|
version: 1.0.{build}
|
||||||
os: Visual Studio 2015
|
image:
|
||||||
|
- Visual Studio 2015
|
||||||
|
- macos
|
||||||
environment:
|
environment:
|
||||||
matrix:
|
matrix:
|
||||||
- ARCH: amd64
|
- ARCH: amd64
|
||||||
- ARCH: x86
|
- ARCH: x86
|
||||||
|
matrix:
|
||||||
|
exclude:
|
||||||
|
- image: macos
|
||||||
|
ARCH: x86
|
||||||
|
for:
|
||||||
|
-
|
||||||
|
matrix:
|
||||||
|
only:
|
||||||
|
- image: Visual Studio 2015
|
||||||
|
clone_folder: c:\dev\TDengine
|
||||||
|
clone_depth: 1
|
||||||
|
|
||||||
clone_folder: c:\dev\TDengine
|
init:
|
||||||
clone_depth: 1
|
|
||||||
|
|
||||||
init:
|
|
||||||
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
||||||
|
|
||||||
before_build:
|
before_build:
|
||||||
- cd c:\dev\TDengine
|
- cd c:\dev\TDengine
|
||||||
- md build
|
- md build
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- cd build
|
- cd build
|
||||||
- cmake -G "NMake Makefiles" ..
|
- cmake -G "NMake Makefiles" ..
|
||||||
- nmake install
|
- nmake install
|
||||||
|
-
|
||||||
|
matrix:
|
||||||
|
only:
|
||||||
|
- image: macos
|
||||||
|
clone_depth: 1
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake .. > /dev/null
|
||||||
|
- make > /dev/null
|
||||||
notifications:
|
notifications:
|
||||||
- provider: Email
|
- provider: Email
|
||||||
to:
|
to:
|
||||||
- sangshuduo@gmail.com
|
- sangshuduo@gmail.com
|
||||||
|
|
||||||
on_build_success: true
|
on_build_success: true
|
||||||
on_build_failure: true
|
on_build_failure: true
|
||||||
on_build_status_changed: true
|
on_build_status_changed: true
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,180 @@
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: test_amd64
|
||||||
|
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: amd64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: smoke_test
|
||||||
|
image: python:3.8
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y cmake build-essential gcc
|
||||||
|
- pip3 install psutil
|
||||||
|
- pip3 install guppy3
|
||||||
|
- pip3 install src/connector/python/linux/python3/
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake ..
|
||||||
|
- make
|
||||||
|
- cd ../tests
|
||||||
|
- ./test-all.sh smoke
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
|
||||||
|
- name: crash_gen
|
||||||
|
image: python:3.8
|
||||||
|
commands:
|
||||||
|
- pip3 install requests
|
||||||
|
- pip3 install src/connector/python/linux/python3/
|
||||||
|
- pip3 install psutil
|
||||||
|
- pip3 install guppy3
|
||||||
|
- cd tests/pytest
|
||||||
|
- ./crash_gen.sh -a -p -t 4 -s 2000
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: test_arm64
|
||||||
|
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: arm64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: gcc
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y cmake build-essential
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||||
|
- make
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: test_arm
|
||||||
|
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: arm
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: arm32v7/ubuntu:bionic
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y cmake build-essential
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake .. -DCPUTYPE=aarch32 > /dev/null
|
||||||
|
- make
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: build_trusty
|
||||||
|
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: amd64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: ubuntu:trusty
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||||
|
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake ..
|
||||||
|
- make
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: build_xenial
|
||||||
|
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: amd64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: ubuntu:xenial
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y gcc cmake build-essential
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake ..
|
||||||
|
- make
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: build_bionic
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: amd64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: ubuntu:bionic
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y gcc cmake build-essential
|
||||||
|
- mkdir debug
|
||||||
|
- cd debug
|
||||||
|
- cmake ..
|
||||||
|
- make
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: goodbye
|
||||||
|
|
||||||
|
platform:
|
||||||
|
os: linux
|
||||||
|
arch: amd64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: 64-bit
|
||||||
|
image: alpine
|
||||||
|
commands:
|
||||||
|
- echo 64-bit is good.
|
||||||
|
when:
|
||||||
|
branch:
|
||||||
|
- develop
|
||||||
|
- master
|
||||||
|
|
||||||
|
|
||||||
|
depends_on:
|
||||||
|
- test_arm64
|
||||||
|
- test_amd64
|
||||||
296
.travis.yml
296
.travis.yml
|
|
@ -1,296 +0,0 @@
|
||||||
#
|
|
||||||
# Configuration
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# Build Matrix
|
|
||||||
#
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- develop
|
|
||||||
- coverity_scan
|
|
||||||
- /^.*ci-.*$/
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
- os: linux
|
|
||||||
dist: focal
|
|
||||||
language: c
|
|
||||||
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
|
|
||||||
compiler: gcc
|
|
||||||
env: DESC="linux/gcc build and test"
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- build-essential
|
|
||||||
- cmake
|
|
||||||
- net-tools
|
|
||||||
- python3-pip
|
|
||||||
- python3-setuptools
|
|
||||||
- valgrind
|
|
||||||
- psmisc
|
|
||||||
- unixodbc
|
|
||||||
- unixodbc-dev
|
|
||||||
- mono-complete
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export TZ=Asia/Harbin
|
|
||||||
- date
|
|
||||||
- cd ${TRAVIS_BUILD_DIR}
|
|
||||||
- mkdir debug
|
|
||||||
- cd debug
|
|
||||||
|
|
||||||
script:
|
|
||||||
- cmake .. > /dev/null
|
|
||||||
- make > /dev/null
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- travis_wait 20
|
|
||||||
- |-
|
|
||||||
case $TRAVIS_OS_NAME in
|
|
||||||
linux)
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/debug
|
|
||||||
make install > /dev/null || travis_terminate $?
|
|
||||||
|
|
||||||
py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
|
|
||||||
pip3 install psutil
|
|
||||||
pip3 install guppy3
|
|
||||||
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
|
||||||
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
|
|
||||||
mcs -out:taosdemo *.cs || travis_terminate $?
|
|
||||||
pkill -TERM -x taosd
|
|
||||||
fuser -k -n tcp 6030
|
|
||||||
sleep 1
|
|
||||||
${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
|
|
||||||
sleep 5
|
|
||||||
mono taosdemo -Q DEFAULT -y || travis_terminate $?
|
|
||||||
pkill -KILL -x taosd
|
|
||||||
fuser -k -n tcp 6030
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/tests
|
|
||||||
./test-all.sh smoke || travis_terminate $?
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/tests/pytest
|
|
||||||
pkill -TERM -x taosd
|
|
||||||
fuser -k -n tcp 6030
|
|
||||||
sleep 1
|
|
||||||
./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/tests/pytest
|
|
||||||
./valgrind-test.sh 2>&1 > mem-error-out.log
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
|
|
||||||
# Color setting
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[1;32m'
|
|
||||||
GREEN_DARK='\033[0;32m'
|
|
||||||
GREEN_UNDERLINE='\033[4;32m'
|
|
||||||
NC='\033[0m'
|
|
||||||
|
|
||||||
grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
|
|
||||||
|
|
||||||
for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
|
|
||||||
do
|
|
||||||
if [ -n "$memError" ]; then
|
|
||||||
if [ "$memError" -gt 12 ]; then
|
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
|
|
||||||
More than our threshold! ## ${NC}"
|
|
||||||
travis_terminate $memError
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
|
|
||||||
for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
|
|
||||||
do
|
|
||||||
if [ -n "$defiMemError" ]; then
|
|
||||||
if [ "$defiMemError" -gt 13 ]; then
|
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports \
|
|
||||||
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
|
||||||
travis_terminate $defiMemError
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
- os: linux
|
|
||||||
dist: bionic
|
|
||||||
language: c
|
|
||||||
compiler: gcc
|
|
||||||
env: COVERITY_SCAN=true
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
|
|
||||||
script:
|
|
||||||
- echo "this job is for coverity scan"
|
|
||||||
|
|
||||||
addons:
|
|
||||||
coverity_scan:
|
|
||||||
# GitHub project metadata
|
|
||||||
# ** specific to your project **
|
|
||||||
project:
|
|
||||||
name: TDengine
|
|
||||||
version: 2.x
|
|
||||||
description: TDengine
|
|
||||||
|
|
||||||
# Where email notification of build analysis results will be sent
|
|
||||||
notification_email: sdsang@taosdata.com, slguan@taosdata.com
|
|
||||||
|
|
||||||
# Commands to prepare for build_command
|
|
||||||
# ** likely specific to your build **
|
|
||||||
build_command_prepend: cmake . > /dev/null
|
|
||||||
|
|
||||||
# The command that will be added as an argument to "cov-build" to compile your project for analysis,
|
|
||||||
# ** likely specific to your build **
|
|
||||||
build_command: make
|
|
||||||
|
|
||||||
# Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
|
|
||||||
# Take care in resource usage, and consider the build frequency allowances per
|
|
||||||
# https://scan.coverity.com/faq#frequency
|
|
||||||
branch_pattern: coverity_scan
|
|
||||||
|
|
||||||
- os: linux
|
|
||||||
dist: trusty
|
|
||||||
language: c
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- build-essential
|
|
||||||
- cmake
|
|
||||||
- binutils-2.26
|
|
||||||
- unixodbc
|
|
||||||
- unixodbc-dev
|
|
||||||
env:
|
|
||||||
- DESC="trusty/gcc-4.8/bintuils-2.26 build"
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export TZ=Asia/Harbin
|
|
||||||
- date
|
|
||||||
- cd ${TRAVIS_BUILD_DIR}
|
|
||||||
- mkdir debug
|
|
||||||
- cd debug
|
|
||||||
|
|
||||||
script:
|
|
||||||
- cmake .. > /dev/null
|
|
||||||
- export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
|
|
||||||
|
|
||||||
- os: linux
|
|
||||||
dist: bionic
|
|
||||||
language: c
|
|
||||||
compiler: clang
|
|
||||||
env: DESC="linux/clang build"
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- build-essential
|
|
||||||
- cmake
|
|
||||||
- unixodbc
|
|
||||||
- unixodbc-dev
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export TZ=Asia/Harbin
|
|
||||||
- date
|
|
||||||
- cd ${TRAVIS_BUILD_DIR}
|
|
||||||
- mkdir debug
|
|
||||||
- cd debug
|
|
||||||
|
|
||||||
script:
|
|
||||||
- cmake .. > /dev/null
|
|
||||||
- make > /dev/null
|
|
||||||
|
|
||||||
- os: linux
|
|
||||||
arch: arm64
|
|
||||||
dist: bionic
|
|
||||||
language: c
|
|
||||||
compiler: clang
|
|
||||||
env: DESC="arm64 linux/clang build"
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- build-essential
|
|
||||||
- cmake
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export TZ=Asia/Harbin
|
|
||||||
- date
|
|
||||||
- cd ${TRAVIS_BUILD_DIR}
|
|
||||||
- mkdir debug
|
|
||||||
- cd debug
|
|
||||||
|
|
||||||
script:
|
|
||||||
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
|
|
||||||
cmake .. -DCPUTYPE=aarch64 > /dev/null;
|
|
||||||
else
|
|
||||||
cmake .. > /dev/null;
|
|
||||||
fi
|
|
||||||
- make > /dev/null
|
|
||||||
|
|
||||||
- os: linux
|
|
||||||
arch: arm64
|
|
||||||
dist: xenial
|
|
||||||
language: c
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- build-essential
|
|
||||||
- cmake
|
|
||||||
- unixodbc
|
|
||||||
- unixodbc-dev
|
|
||||||
env:
|
|
||||||
- DESC="arm64 xenial build"
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export TZ=Asia/Harbin
|
|
||||||
- date
|
|
||||||
- cd ${TRAVIS_BUILD_DIR}
|
|
||||||
- mkdir debug
|
|
||||||
- cd debug
|
|
||||||
|
|
||||||
script:
|
|
||||||
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
|
|
||||||
cmake .. -DCPUTYPE=aarch64 > /dev/null;
|
|
||||||
else
|
|
||||||
cmake .. > /dev/null;
|
|
||||||
fi
|
|
||||||
- make > /dev/null
|
|
||||||
|
|
||||||
- os: osx
|
|
||||||
osx_image: xcode11.4
|
|
||||||
language: c
|
|
||||||
compiler: clang
|
|
||||||
env: DESC="mac/clang build"
|
|
||||||
git:
|
|
||||||
- depth: 1
|
|
||||||
addons:
|
|
||||||
homebrew:
|
|
||||||
- cmake
|
|
||||||
- unixodbc
|
|
||||||
|
|
||||||
script:
|
|
||||||
- cd ${TRAVIS_BUILD_DIR}
|
|
||||||
- mkdir debug
|
|
||||||
- cd debug
|
|
||||||
- cmake .. > /dev/null
|
|
||||||
- make > /dev/null
|
|
||||||
|
|
@ -94,6 +94,7 @@ def pre_test(){
|
||||||
make > /dev/null
|
make > /dev/null
|
||||||
make install > /dev/null
|
make install > /dev/null
|
||||||
cd ${WKC}/tests
|
cd ${WKC}/tests
|
||||||
|
pip3 install ${WKC}/src/connector/python/linux/python3/
|
||||||
'''
|
'''
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
[](https://travis-ci.org/taosdata/TDengine)
|
[](https://cloud.drone.io/taosdata/TDengine)
|
||||||
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
||||||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||||
|
|
|
||||||
|
|
@ -57,7 +57,7 @@ IF (TD_LINUX_64)
|
||||||
ADD_DEFINITIONS(-D_M_X64)
|
ADD_DEFINITIONS(-D_M_X64)
|
||||||
ADD_DEFINITIONS(-D_TD_LINUX_64)
|
ADD_DEFINITIONS(-D_TD_LINUX_64)
|
||||||
MESSAGE(STATUS "linux64 is defined")
|
MESSAGE(STATUS "linux64 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
|
@ -65,7 +65,7 @@ IF (TD_LINUX_32)
|
||||||
ADD_DEFINITIONS(-D_TD_LINUX_32)
|
ADD_DEFINITIONS(-D_TD_LINUX_32)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "linux32 is defined")
|
MESSAGE(STATUS "linux32 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_ARM_64)
|
IF (TD_ARM_64)
|
||||||
|
|
@ -73,7 +73,7 @@ IF (TD_ARM_64)
|
||||||
ADD_DEFINITIONS(-D_TD_ARM_)
|
ADD_DEFINITIONS(-D_TD_ARM_)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "arm64 is defined")
|
MESSAGE(STATUS "arm64 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_ARM_32)
|
IF (TD_ARM_32)
|
||||||
|
|
@ -81,7 +81,7 @@ IF (TD_ARM_32)
|
||||||
ADD_DEFINITIONS(-D_TD_ARM_)
|
ADD_DEFINITIONS(-D_TD_ARM_)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "arm32 is defined")
|
MESSAGE(STATUS "arm32 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
|
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_MIPS_64)
|
IF (TD_MIPS_64)
|
||||||
|
|
@ -89,7 +89,7 @@ IF (TD_MIPS_64)
|
||||||
ADD_DEFINITIONS(-D_TD_MIPS_64)
|
ADD_DEFINITIONS(-D_TD_MIPS_64)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "mips64 is defined")
|
MESSAGE(STATUS "mips64 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_MIPS_32)
|
IF (TD_MIPS_32)
|
||||||
|
|
@ -97,7 +97,7 @@ IF (TD_MIPS_32)
|
||||||
ADD_DEFINITIONS(-D_TD_MIPS_32)
|
ADD_DEFINITIONS(-D_TD_MIPS_32)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "mips32 is defined")
|
MESSAGE(STATUS "mips32 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_APLHINE)
|
IF (TD_APLHINE)
|
||||||
|
|
@ -139,7 +139,7 @@ IF (TD_DARWIN_64)
|
||||||
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
|
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "darwin64 is defined")
|
MESSAGE(STATUS "darwin64 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||||
SET(RELEASE_FLAGS "-Og")
|
SET(RELEASE_FLAGS "-Og")
|
||||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@ ENDIF ()
|
||||||
#
|
#
|
||||||
|
|
||||||
# Set compiler options
|
# Set compiler options
|
||||||
|
SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
|
||||||
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
|
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
|
||||||
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
|
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
||||||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||||
IF (TD_MVN_INSTALLED)
|
IF (TD_MVN_INSTALLED)
|
||||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
|
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF (TD_DARWIN)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "2.0.20.0")
|
SET(TD_VER_NUMBER "2.1.0.0")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,15 @@ static char monotonic_info_string[32];
|
||||||
|
|
||||||
static long mono_ticksPerMicrosecond = 0;
|
static long mono_ticksPerMicrosecond = 0;
|
||||||
|
|
||||||
|
#ifdef _TD_NINGSI_60
|
||||||
|
// implement __rdtsc in ningsi60
|
||||||
|
uint64_t __rdtsc(){
|
||||||
|
unsigned int lo,hi;
|
||||||
|
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
|
||||||
|
return ((uint64_t)hi << 32) | lo;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static monotime getMonotonicUs_x86() {
|
static monotime getMonotonicUs_x86() {
|
||||||
return __rdtsc() / mono_ticksPerMicrosecond;
|
return __rdtsc() / mono_ticksPerMicrosecond;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -117,9 +117,9 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
||||||
## 常用工具
|
## 常用工具
|
||||||
|
|
||||||
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
||||||
* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
||||||
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
|
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
|
||||||
* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
|
* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
|
||||||
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md))
|
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md))
|
||||||
|
|
||||||
## TDengine与其他数据库的对比测试
|
## TDengine与其他数据库的对比测试
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, G
|
||||||
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
|
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
|
||||||
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
|
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
|
||||||
|
|
||||||
例如:在TAOS Shell中,从表d1001中查询出vlotage > 215的记录,按时间降序排列,仅仅输出2条。
|
例如:在TAOS Shell中,从表d1001中查询出voltage > 215的记录,按时间降序排列,仅仅输出2条。
|
||||||
```mysql
|
```mysql
|
||||||
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
|
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
|
||||||
ts | current | voltage | phase |
|
ts | current | voltage | phase |
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
||||||
|
|
||||||
* TDengine 目前不支持针对单条数据记录的删除操作。
|
* TDengine 目前不支持针对单条数据记录的删除操作。
|
||||||
* 目前不支持事务操作。
|
* 目前不支持事务操作。
|
||||||
* 目前不支持表间的 union 操作。
|
|
||||||
* 目前不支持嵌套查询(nested query)。
|
* 目前不支持嵌套查询(nested query)。
|
||||||
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
|
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
|
||||||
|
|
||||||
|
|
@ -447,7 +446,7 @@ Query OK, 1 row(s) in set (0.000141s)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
||||||
| -------------------- | ----------------- | -------- |
|
| -------------------- | ----------------- | -------- |
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
||||||
|
|
||||||
**Linux**
|
**Linux**
|
||||||
|
|
||||||
**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载**
|
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
|
||||||
|
|
||||||
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
|
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
|
||||||
|
|
||||||
|
|
@ -68,7 +68,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
||||||
|
|
||||||
**Windows x64/x86**
|
**Windows x64/x86**
|
||||||
|
|
||||||
**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载 :**
|
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 :**
|
||||||
|
|
||||||
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
|
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
|
||||||
|
|
||||||
|
|
@ -213,7 +213,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
||||||
|
|
||||||
- `int taos_result_precision(TAOS_RES *res)`
|
- `int taos_result_precision(TAOS_RES *res)`
|
||||||
|
|
||||||
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。
|
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。
|
||||||
|
|
||||||
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
|
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
|
||||||
|
|
||||||
|
|
@ -349,7 +349,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
||||||
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
|
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
|
||||||
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
|
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
|
||||||
|
|
||||||
返回值为NULL,表示创建成功,返回值不为空,表示成功。
|
返回值为NULL,表示创建失败;返回值不为空,表示成功。
|
||||||
|
|
||||||
- `void taos_close_stream (TAOS_STREAM *tstr)`
|
- `void taos_close_stream (TAOS_STREAM *tstr)`
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -144,7 +144,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
|
||||||
- numOfMnodes:系统中管理节点个数。默认值:3。
|
- numOfMnodes:系统中管理节点个数。默认值:3。
|
||||||
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
|
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
|
||||||
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
|
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
|
||||||
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*100(即100天)。
|
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
|
||||||
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
|
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
|
||||||
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
||||||
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
|
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
|
||||||
|
|
@ -462,31 +462,31 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
||||||
|
|
||||||
| 关键字列表 | | | | |
|
| 关键字列表 | | | | |
|
||||||
| ---------- | ----------- | ------------ | ---------- | --------- |
|
| ---------- | ----------- | ------------ | ---------- | --------- |
|
||||||
| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
|
| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
|
||||||
| ABORT | COPY | ID | NCHAR | SMALLINT |
|
| ABORT | COPY | ID | NCHAR | SPREAD |
|
||||||
| ACCOUNT | COUNT | IF | NE | SPREAD |
|
| ACCOUNT | COUNT | IF | NE | STABLE |
|
||||||
| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
|
| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
|
||||||
| ADD | CTIME | IMMEDIATE | NOT | STABLES |
|
| ADD | CTIME | IMMEDIATE | NOT | STAR |
|
||||||
| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
|
| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
|
||||||
| ALL | DATABASES | IN | NOW | STATEMENT |
|
| ALL | DATABASES | IN | NOW | STDDEV |
|
||||||
| ALTER | DAYS | INITIALLY | OF | STDDEV |
|
| ALTER | DAYS | INITIALLY | OF | STREAM |
|
||||||
| AND | DEFERRED | INSERT | OFFSET | STREAM |
|
| AND | DEFERRED | INSERT | OFFSET | STREAMS |
|
||||||
| AS | DELIMITERS | INSTEAD | OR | STREAMS |
|
| AS | DELIMITERS | INSTEAD | OR | STRING |
|
||||||
| ASC | DESC | INTEGER | ORDER | STRING |
|
| ASC | DESC | INTEGER | ORDER | SUM |
|
||||||
| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
|
| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
|
||||||
| AVG | DETACH | INTO | PERCENTILE | TABLE |
|
| AVG | DETACH | INTO | PERCENTILE | TABLES |
|
||||||
| BEFORE | DIFF | IP | PLUS | TABLES |
|
| BEFORE | DIFF | IP | PLUS | TAG |
|
||||||
| BEGIN | DISTINCT | IS | PRAGMA | TAG |
|
| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
|
||||||
| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
|
| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
|
||||||
| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
|
| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
|
||||||
| BINARY | DNODES | KEEP | QUERIES | TBNAME |
|
| BINARY | DNODES | KEEP | QUERIES | TIMES |
|
||||||
| BITAND | DOT | KEY | QUERY | TIMES |
|
| BITAND | DOT | KEY | QUERY | TIMESTAMP |
|
||||||
| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
|
| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
|
||||||
| BITOR | DROP | LAST | REM | TINYINT |
|
| BITOR | DROP | LAST | REM | TOP |
|
||||||
| BOOL | EACH | LE | REPLACE | TOP |
|
| BOOL | EACH | LE | REPLACE | TOPIC |
|
||||||
| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
|
| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
|
||||||
| BY | EQ | LIKE | RESET | TRIGGER |
|
| BY | EQ | LIKE | RESET | UMINUS |
|
||||||
| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
|
| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
|
||||||
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
|
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
|
||||||
| CHANGE | FAIL | LOCAL | ROWS | USE |
|
| CHANGE | FAIL | LOCAL | ROWS | USE |
|
||||||
| CLOG | FILL | LP | RP | USER |
|
| CLOG | FILL | LP | RP | USER |
|
||||||
|
|
@ -498,5 +498,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
||||||
| CONCAT | GLOB | METRICS | SHOW | VIEW |
|
| CONCAT | GLOB | METRICS | SHOW | VIEW |
|
||||||
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
|
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
|
||||||
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
|
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
|
||||||
| CONNECTION | GT | MNODES | | |
|
| CONNECTION | GT | MNODES | SLIMIT | |
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
|
||||||
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
|
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
|
||||||
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
|
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
|
||||||
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
|
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
|
||||||
| 6 | BINARY | 自定义 | 记录二进制字节型字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字节型字符的字符串,每个字节型字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
|
| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
|
||||||
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
|
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
|
||||||
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
|
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
|
||||||
| 9 | BOOL | 1 | 布尔型,{true, false} |
|
| 9 | BOOL | 1 | 布尔型,{true, false} |
|
||||||
|
|
@ -56,7 +56,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
|
||||||
|
|
||||||
**Tips**:
|
**Tips**:
|
||||||
1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
|
1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
|
||||||
2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏。
|
2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
|
||||||
|
|
||||||
## <a class="anchor" id="management"></a>数据库管理
|
## <a class="anchor" id="management"></a>数据库管理
|
||||||
|
|
||||||
|
|
@ -407,18 +407,14 @@ SELECT select_expr [, select_expr ...]
|
||||||
[INTERVAL (interval_val [, interval_offset])]
|
[INTERVAL (interval_val [, interval_offset])]
|
||||||
[SLIDING sliding_val]
|
[SLIDING sliding_val]
|
||||||
[FILL fill_val]
|
[FILL fill_val]
|
||||||
[GROUP BY col_list [HAVING having_condition]]
|
[GROUP BY col_list]
|
||||||
[ORDER BY col_list { DESC | ASC }]
|
[ORDER BY col_list { DESC | ASC }]
|
||||||
[SLIMIT limit_val [SOFFSET offset_val]]
|
[SLIMIT limit_val [SOFFSET offset_val]]
|
||||||
[LIMIT limit_val [OFFSET offset_val]]
|
[LIMIT limit_val [OFFSET offset_val]]
|
||||||
[>> export_file];
|
[>> export_file];
|
||||||
```
|
```
|
||||||
|
|
||||||
#### SELECT子句
|
#### 通配符
|
||||||
|
|
||||||
一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。
|
|
||||||
|
|
||||||
##### 通配符
|
|
||||||
|
|
||||||
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
|
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -470,7 +466,7 @@ Query OK, 1 row(s) in set (0.020443s)
|
||||||
```
|
```
|
||||||
|
|
||||||
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
|
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
|
||||||
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
|
```count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> SELECT COUNT(*) FROM d1001;
|
taos> SELECT COUNT(*) FROM d1001;
|
||||||
|
|
@ -488,7 +484,7 @@ taos> SELECT FIRST(*) FROM d1001;
|
||||||
Query OK, 1 row(s) in set (0.000849s)
|
Query OK, 1 row(s) in set (0.000849s)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### 标签列
|
#### 标签列
|
||||||
|
|
||||||
从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。
|
从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -622,13 +618,15 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
|
||||||
Query OK, 1 row(s) in set (0.001091s)
|
Query OK, 1 row(s) in set (0.001091s)
|
||||||
```
|
```
|
||||||
|
|
||||||
- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
|
- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。
|
||||||
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
|
* 暂不支持含列名的四则运算表达式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。
|
||||||
|
* 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。
|
||||||
|
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。
|
||||||
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
|
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
|
||||||
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
|
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
|
||||||
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
|
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
|
||||||
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
|
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
|
||||||
- 通过”>>"输出结果可以导出到指定文件
|
- 通过 ">>" 输出结果可以导出到指定文件。
|
||||||
|
|
||||||
### 支持的条件过滤操作
|
### 支持的条件过滤操作
|
||||||
|
|
||||||
|
|
@ -648,7 +646,8 @@ Query OK, 1 row(s) in set (0.001091s)
|
||||||
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
|
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
|
||||||
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||||
|
|
||||||
### GROUP BY 之后的 HAVING 过滤
|
<!--
|
||||||
|
### <a class="anchor" id="having"></a>GROUP BY 之后的 HAVING 过滤
|
||||||
|
|
||||||
从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
|
从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
|
||||||
|
|
||||||
|
|
@ -656,6 +655,17 @@ Query OK, 1 row(s) in set (0.001091s)
|
||||||
```mysql
|
```mysql
|
||||||
SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING AVG(f1) > 0;
|
SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING AVG(f1) > 0;
|
||||||
```
|
```
|
||||||
|
-->
|
||||||
|
|
||||||
|
### <a class="anchor" id="union"></a>UNION ALL 操作符
|
||||||
|
|
||||||
|
```mysql
|
||||||
|
SELECT ...
|
||||||
|
UNION ALL SELECT ...
|
||||||
|
[UNION ALL SELECT ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。
|
||||||
|
|
||||||
### SQL 示例
|
### SQL 示例
|
||||||
|
|
||||||
|
|
@ -705,11 +715,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:应用全部字段。
|
应用字段:应用全部字段。
|
||||||
|
|
||||||
适用于:表、超级表。
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。
|
1)可以使用星号\*来替代具体的字段,使用星号(\*)返回全部记录数量。
|
||||||
|
|
||||||
2)针对同一表的(不包含NULL值)字段查询结果均相同。
|
2)针对同一表的(不包含NULL值)字段查询结果均相同。
|
||||||
|
|
||||||
|
|
@ -740,7 +750,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool字段。
|
||||||
|
|
||||||
适用于:表、超级表。
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -767,7 +777,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
适用于:表。
|
适用于:**表**。
|
||||||
|
|
||||||
- **SUM**
|
- **SUM**
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -779,7 +789,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
适用于:表、超级表。
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -806,7 +816,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
适用于:表。(从 2.0.15.1 版本开始,本函数也支持超级表)
|
适用于:**表**。(从 2.0.15.1 版本开始,本函数也支持**超级表**)
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -829,7 +839,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
说明:自变量是时间戳,因变量是该列的值。
|
说明:自变量是时间戳,因变量是该列的值。
|
||||||
|
|
||||||
适用于:表。
|
适用于:**表**。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -852,6 +862,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
taos> SELECT MIN(current), MIN(voltage) FROM meters;
|
taos> SELECT MIN(current), MIN(voltage) FROM meters;
|
||||||
|
|
@ -877,6 +889,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
taos> SELECT MAX(current), MAX(voltage) FROM meters;
|
taos> SELECT MAX(current), MAX(voltage) FROM meters;
|
||||||
|
|
@ -902,6 +916,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:所有字段。
|
应用字段:所有字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
|
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
|
||||||
|
|
@ -935,6 +951,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:所有字段。
|
应用字段:所有字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
|
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
|
||||||
|
|
@ -966,6 +984,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)*k*值取值范围1≤*k*≤100;
|
1)*k*值取值范围1≤*k*≤100;
|
||||||
|
|
@ -1000,6 +1020,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)*k*值取值范围1≤*k*≤100;
|
1)*k*值取值范围1≤*k*≤100;
|
||||||
|
|
@ -1033,6 +1055,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表**。
|
||||||
|
|
||||||
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
|
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
|
|
@ -1048,12 +1072,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
```mysql
|
```mysql
|
||||||
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
|
功能说明:统计表/超级表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
|
||||||
|
|
||||||
返回结果数据类型: 双精度浮点数Double。
|
返回结果数据类型: 双精度浮点数Double。
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
|
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
|
|
@ -1068,12 +1094,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
```mysql
|
```mysql
|
||||||
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
||||||
```
|
```
|
||||||
功能说明:返回表(超级表)的最后一条记录。
|
功能说明:返回表/超级表的最后一条记录。
|
||||||
|
|
||||||
返回结果数据类型:同应用的字段。
|
返回结果数据类型:同应用的字段。
|
||||||
|
|
||||||
应用字段:所有字段。
|
应用字段:所有字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
|
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
|
|
@ -1102,6 +1130,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表**。
|
||||||
|
|
||||||
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
|
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
|
|
@ -1124,6 +1154,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在binary、nchar、bool类型字段。
|
应用字段:不能应用在binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
|
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
|
|
@ -1152,6 +1184,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
|
||||||
|
|
||||||
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)支持两列或多列之间进行计算,可使用括号控制计算优先级;
|
1)支持两列或多列之间进行计算,可使用括号控制计算优先级;
|
||||||
|
|
|
||||||
|
|
@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go
|
||||||
|
|
||||||
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
|
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
|
||||||
|
|
||||||
* -savetb int
|
* -savetb string
|
||||||
|
|
||||||
当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -17,47 +16,55 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
dataimport "github.com/taosdata/TDengine/importSampleData/import"
|
dataImport "github.com/taosdata/TDengine/importSampleData/import"
|
||||||
|
|
||||||
_ "github.com/taosdata/driver-go/taosSql"
|
_ "github.com/taosdata/driver-go/taosSql"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// 主键类型必须为 timestamp
|
||||||
TIMESTAMP = "timestamp"
|
TIMESTAMP = "timestamp"
|
||||||
|
|
||||||
|
// 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
|
||||||
DATETIME = "datetime"
|
DATETIME = "datetime"
|
||||||
MILLISECOND = "millisecond"
|
MILLISECOND = "millisecond"
|
||||||
DEFAULT_STARTTIME int64 = -1
|
|
||||||
DEFAULT_INTERVAL int64 = 1 * 1000
|
|
||||||
DEFAULT_DELAY int64 = -1
|
|
||||||
DEFAULT_STATISTIC_TABLE = "statistic"
|
|
||||||
|
|
||||||
JSON_FORMAT = "json"
|
DefaultStartTime int64 = -1
|
||||||
CSV_FORMAT = "csv"
|
DefaultInterval int64 = 1 * 1000 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
|
||||||
SUPERTABLE_PREFIX = "s_"
|
DefaultDelay int64 = -1 //
|
||||||
SUBTABLE_PREFIX = "t_"
|
|
||||||
|
|
||||||
DRIVER_NAME = "taosSql"
|
// 当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
||||||
STARTTIME_LAYOUT = "2006-01-02 15:04:05.000"
|
DefaultStatisticTable = "statistic"
|
||||||
INSERT_PREFIX = "insert into "
|
|
||||||
|
// 样例数据文件格式,可以是 json 或 csv
|
||||||
|
JsonFormat = "json"
|
||||||
|
CsvFormat = "csv"
|
||||||
|
|
||||||
|
SuperTablePrefix = "s_" // 超级表前缀
|
||||||
|
SubTablePrefix = "t_" // 子表前缀
|
||||||
|
|
||||||
|
DriverName = "taosSql"
|
||||||
|
StartTimeLayout = "2006-01-02 15:04:05.000"
|
||||||
|
InsertPrefix = "insert into "
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
cfg string
|
cfg string // 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 config/cfg.toml
|
||||||
cases string
|
cases string // 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 [usecase] 查看,可同时导入多个场景,中间使用逗号分隔,如:sensor_info,camera_detection,默认为 sensor_info
|
||||||
hnum int
|
hnum int // 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 t_0 数据,指定 hnum 为 2 时会根据原有表名创建 t、t_1 两张子表。默认为 100。
|
||||||
vnum int
|
vnum int // 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次
|
||||||
thread int
|
thread int // 执行导入数据的线程数目,默认为 10
|
||||||
batch int
|
batch int // 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录
|
||||||
auto int
|
auto int // 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0
|
||||||
starttimestr string
|
startTimeStr string // 导入的记录开始时间,格式为 "yyyy-MM-dd HH:mm:ss.SSS",不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空
|
||||||
interval int64
|
interval int64 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000
|
||||||
host string
|
host string // 导入的 TDengine 服务器 IP,默认为 127.0.0.1
|
||||||
port int
|
port int // 导入的 TDengine 服务器端口,默认为 6030
|
||||||
user string
|
user string // 导入的 TDengine 用户名,默认为 root
|
||||||
password string
|
password string // 导入的 TDengine 用户密码,默认为 taosdata
|
||||||
dropdb int
|
dropdb int // 导入数据之前是否删除数据库,1 是,0 否, 默认 0
|
||||||
db string
|
db string // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
|
||||||
dbparam string
|
dbparam string // 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 days 10 cache 16000 ablocks 4,默认为空
|
||||||
|
|
||||||
dataSourceName string
|
dataSourceName string
|
||||||
startTime int64
|
startTime int64
|
||||||
|
|
@ -72,10 +79,10 @@ var (
|
||||||
lastStaticTime time.Time
|
lastStaticTime time.Time
|
||||||
lastTotalRows int64
|
lastTotalRows int64
|
||||||
timeTicker *time.Ticker
|
timeTicker *time.Ticker
|
||||||
delay int64 // default 10 milliseconds
|
delay int64 // 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
|
||||||
tick int64
|
tick int64 // 打印统计信息的时间间隔,默认 2000 ms。
|
||||||
save int
|
save int // 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
|
||||||
saveTable string
|
saveTable string // 当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
||||||
)
|
)
|
||||||
|
|
||||||
type superTableConfig struct {
|
type superTableConfig struct {
|
||||||
|
|
@ -83,7 +90,7 @@ type superTableConfig struct {
|
||||||
endTime int64
|
endTime int64
|
||||||
cycleTime int64
|
cycleTime int64
|
||||||
avgInterval int64
|
avgInterval int64
|
||||||
config dataimport.CaseConfig
|
config dataImport.CaseConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
type scaleTableInfo struct {
|
type scaleTableInfo struct {
|
||||||
|
|
@ -92,14 +99,14 @@ type scaleTableInfo struct {
|
||||||
insertRows int64
|
insertRows int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type tableRows struct {
|
//type tableRows struct {
|
||||||
tableName string // tableName
|
// tableName string // tableName
|
||||||
value string // values(...)
|
// value string // values(...)
|
||||||
}
|
//}
|
||||||
|
|
||||||
type dataRows struct {
|
type dataRows struct {
|
||||||
rows []map[string]interface{}
|
rows []map[string]interface{}
|
||||||
config dataimport.CaseConfig
|
config dataImport.CaseConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rows dataRows) Len() int {
|
func (rows dataRows) Len() int {
|
||||||
|
|
@ -107,9 +114,9 @@ func (rows dataRows) Len() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rows dataRows) Less(i, j int) bool {
|
func (rows dataRows) Less(i, j int) bool {
|
||||||
itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
|
iTime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
|
||||||
jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
|
jTime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
|
||||||
return itime < jtime
|
return iTime < jTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rows dataRows) Swap(i, j int) {
|
func (rows dataRows) Swap(i, j int) {
|
||||||
|
|
@ -123,26 +130,26 @@ func getPrimaryKey(value interface{}) int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
parseArg() //parse argument
|
parseArg() // parse argument
|
||||||
|
|
||||||
if db == "" {
|
if db == "" {
|
||||||
//db = "go"
|
// 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
|
||||||
db = fmt.Sprintf("test_%s", time.Now().Format("20060102"))
|
db = fmt.Sprintf("test_%s", time.Now().Format("20060102"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if auto == 1 && len(starttimestr) == 0 {
|
if auto == 1 && len(startTimeStr) == 0 {
|
||||||
log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
|
log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(starttimestr) != 0 {
|
if len(startTimeStr) != 0 {
|
||||||
t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local)
|
t, err := time.ParseInLocation(StartTimeLayout, strings.TrimSpace(startTimeStr), time.Local)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("param startTime %s error, %s\n", starttimestr, err)
|
log.Fatalf("param startTime %s error, %s\n", startTimeStr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
startTime = t.UnixNano() / 1e6 // as millisecond
|
startTime = t.UnixNano() / 1e6 // as millisecond
|
||||||
} else {
|
} else {
|
||||||
startTime = DEFAULT_STARTTIME
|
startTime = DefaultStartTime
|
||||||
}
|
}
|
||||||
|
|
||||||
dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port)
|
dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port)
|
||||||
|
|
@ -154,9 +161,9 @@ func init() {
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
||||||
importConfig := dataimport.LoadConfig(cfg)
|
importConfig := dataImport.LoadConfig(cfg)
|
||||||
|
|
||||||
var caseMinumInterval int64 = -1
|
var caseMinInterval int64 = -1
|
||||||
|
|
||||||
for _, userCase := range strings.Split(cases, ",") {
|
for _, userCase := range strings.Split(cases, ",") {
|
||||||
caseConfig, ok := importConfig.UserCases[userCase]
|
caseConfig, ok := importConfig.UserCases[userCase]
|
||||||
|
|
@ -168,7 +175,7 @@ func main() {
|
||||||
|
|
||||||
checkUserCaseConfig(userCase, &caseConfig)
|
checkUserCaseConfig(userCase, &caseConfig)
|
||||||
|
|
||||||
//read file as map array
|
// read file as map array
|
||||||
fileRows := readFile(caseConfig)
|
fileRows := readFile(caseConfig)
|
||||||
log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows))
|
log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows))
|
||||||
|
|
||||||
|
|
@ -177,31 +184,31 @@ func main() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
_, exists := superTableConfigMap[caseConfig.Stname]
|
_, exists := superTableConfigMap[caseConfig.StName]
|
||||||
if !exists {
|
if !exists {
|
||||||
superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig}
|
superTableConfigMap[caseConfig.StName] = &superTableConfig{config: caseConfig}
|
||||||
} else {
|
} else {
|
||||||
log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname)
|
log.Fatalf("the stname of case %s already exist.\n", caseConfig.StName)
|
||||||
}
|
}
|
||||||
|
|
||||||
var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows)
|
var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows)
|
||||||
|
|
||||||
// set super table's startTime, cycleTime and avgInterval
|
// set super table's startTime, cycleTime and avgInterval
|
||||||
superTableConfigMap[caseConfig.Stname].startTime = start
|
superTableConfigMap[caseConfig.StName].startTime = start
|
||||||
superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval
|
superTableConfigMap[caseConfig.StName].cycleTime = cycleTime
|
||||||
superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime
|
superTableConfigMap[caseConfig.StName].avgInterval = avgInterval
|
||||||
|
|
||||||
if caseMinumInterval == -1 || caseMinumInterval > avgInterval {
|
if caseMinInterval == -1 || caseMinInterval > avgInterval {
|
||||||
caseMinumInterval = avgInterval
|
caseMinInterval = avgInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT)
|
startStr := time.Unix(0, start*int64(time.Millisecond)).Format(StartTimeLayout)
|
||||||
log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime)
|
log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
if DEFAULT_DELAY == delay {
|
if DefaultDelay == delay {
|
||||||
// default delay
|
// default delay
|
||||||
delay = caseMinumInterval / 2
|
delay = caseMinInterval / 2
|
||||||
if delay < 1 {
|
if delay < 1 {
|
||||||
delay = 1
|
delay = 1
|
||||||
}
|
}
|
||||||
|
|
@ -218,7 +225,7 @@ func main() {
|
||||||
createSuperTable(superTableConfigMap)
|
createSuperTable(superTableConfigMap)
|
||||||
log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6)
|
log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6)
|
||||||
|
|
||||||
//create sub table
|
// create sub table
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
createSubTable(subTableMap)
|
createSubTable(subTableMap)
|
||||||
log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6)
|
log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6)
|
||||||
|
|
@ -278,7 +285,7 @@ func staticSpeed() {
|
||||||
defer connection.Close()
|
defer connection.Close()
|
||||||
|
|
||||||
if save == 1 {
|
if save == 1 {
|
||||||
connection.Exec("use " + db)
|
_, _ = connection.Exec("use " + db)
|
||||||
_, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)")
|
_, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("create %s Table error: %s\n", saveTable, err)
|
log.Fatalf("create %s Table error: %s\n", saveTable, err)
|
||||||
|
|
@ -294,12 +301,12 @@ func staticSpeed() {
|
||||||
total := getTotalRows(successRows)
|
total := getTotalRows(successRows)
|
||||||
currentSuccessRows := total - lastTotalRows
|
currentSuccessRows := total - lastTotalRows
|
||||||
|
|
||||||
speed := currentSuccessRows * 1e9 / int64(usedTime)
|
speed := currentSuccessRows * 1e9 / usedTime
|
||||||
log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed)
|
log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed)
|
||||||
|
|
||||||
if save == 1 {
|
if save == 1 {
|
||||||
insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed)
|
insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed)
|
||||||
connection.Exec(insertSql)
|
_, _ = connection.Exec(insertSql)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastStaticTime = currentTime
|
lastStaticTime = currentTime
|
||||||
|
|
@ -327,12 +334,13 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// use the sample data primary timestamp
|
// use the sample data primary timestamp
|
||||||
sort.Sort(fileRows) // sort the file data by the primarykey
|
sort.Sort(fileRows) // sort the file data by the primaryKey
|
||||||
minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
|
minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
|
||||||
maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])
|
maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])
|
||||||
|
|
||||||
start = minTime // default startTime use the minTime
|
start = minTime // default startTime use the minTime
|
||||||
if DEFAULT_STARTTIME != startTime {
|
// 设置了start时间的话 按照start来
|
||||||
|
if DefaultStartTime != startTime {
|
||||||
start = startTime
|
start = startTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -350,31 +358,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createStatisticTable() {
|
|
||||||
connection := getConnection()
|
|
||||||
defer connection.Close()
|
|
||||||
|
|
||||||
_, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("createStatisticTable error: %s\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createSubTable(subTableMaps map[string]*dataRows) {
|
func createSubTable(subTableMaps map[string]*dataRows) {
|
||||||
|
|
||||||
connection := getConnection()
|
connection := getConnection()
|
||||||
defer connection.Close()
|
defer connection.Close()
|
||||||
|
|
||||||
connection.Exec("use " + db)
|
_, _ = connection.Exec("use " + db)
|
||||||
|
|
||||||
createTablePrefix := "create table if not exists "
|
createTablePrefix := "create table if not exists "
|
||||||
|
var buffer bytes.Buffer
|
||||||
for subTableName := range subTableMaps {
|
for subTableName := range subTableMaps {
|
||||||
|
|
||||||
superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname)
|
superTableName := getSuperTableName(subTableMaps[subTableName].config.StName)
|
||||||
tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
|
firstRowValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
|
||||||
|
|
||||||
buffers := bytes.Buffer{}
|
// create table t using superTable tags(...);
|
||||||
// create table t using supertTable tags(...);
|
|
||||||
for i := 0; i < hnum; i++ {
|
for i := 0; i < hnum; i++ {
|
||||||
tableName := getScaleSubTableName(subTableName, i)
|
tableName := getScaleSubTableName(subTableName, i)
|
||||||
|
|
||||||
|
|
@ -384,21 +382,21 @@ func createSubTable(subTableMaps map[string]*dataRows) {
|
||||||
}
|
}
|
||||||
scaleTableNames = append(scaleTableNames, tableName)
|
scaleTableNames = append(scaleTableNames, tableName)
|
||||||
|
|
||||||
buffers.WriteString(createTablePrefix)
|
buffer.WriteString(createTablePrefix)
|
||||||
buffers.WriteString(tableName)
|
buffer.WriteString(tableName)
|
||||||
buffers.WriteString(" using ")
|
buffer.WriteString(" using ")
|
||||||
buffers.WriteString(superTableName)
|
buffer.WriteString(superTableName)
|
||||||
buffers.WriteString(" tags(")
|
buffer.WriteString(" tags(")
|
||||||
for _, tag := range subTableMaps[subTableName].config.Tags {
|
for _, tag := range subTableMaps[subTableName].config.Tags {
|
||||||
tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)])
|
tagValue := fmt.Sprintf("%v", firstRowValues[strings.ToLower(tag.Name)])
|
||||||
buffers.WriteString("'" + tagValue + "'")
|
buffer.WriteString("'" + tagValue + "'")
|
||||||
buffers.WriteString(",")
|
buffer.WriteString(",")
|
||||||
}
|
}
|
||||||
buffers.Truncate(buffers.Len() - 1)
|
buffer.Truncate(buffer.Len() - 1)
|
||||||
buffers.WriteString(")")
|
buffer.WriteString(")")
|
||||||
|
|
||||||
createTableSql := buffers.String()
|
createTableSql := buffer.String()
|
||||||
buffers.Reset()
|
buffer.Reset()
|
||||||
|
|
||||||
//log.Printf("create table: %s\n", createTableSql)
|
//log.Printf("create table: %s\n", createTableSql)
|
||||||
_, err := connection.Exec(createTableSql)
|
_, err := connection.Exec(createTableSql)
|
||||||
|
|
@ -420,7 +418,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("drop database error: %s\n", err)
|
log.Fatalf("drop database error: %s\n", err)
|
||||||
}
|
}
|
||||||
log.Printf("dropDb: %s\n", dropDbSql)
|
log.Printf("dropdb: %s\n", dropDbSql)
|
||||||
}
|
}
|
||||||
|
|
||||||
createDbSql := "create database if not exists " + db + " " + dbparam
|
createDbSql := "create database if not exists " + db + " " + dbparam
|
||||||
|
|
@ -431,7 +429,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
||||||
}
|
}
|
||||||
log.Printf("createDb: %s\n", createDbSql)
|
log.Printf("createDb: %s\n", createDbSql)
|
||||||
|
|
||||||
connection.Exec("use " + db)
|
_, _ = connection.Exec("use " + db)
|
||||||
|
|
||||||
prefix := "create table if not exists "
|
prefix := "create table if not exists "
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
|
|
@ -464,7 +462,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
||||||
createSql := buffer.String()
|
createSql := buffer.String()
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
|
|
||||||
//log.Printf("supertable: %s\n", createSql)
|
//log.Printf("superTable: %s\n", createSql)
|
||||||
_, err = connection.Exec(createSql)
|
_, err = connection.Exec(createSql)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("create supertable error: %s\n", err)
|
log.Fatalf("create supertable error: %s\n", err)
|
||||||
|
|
@ -473,15 +471,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getScaleSubTableName(subTableName string, hnum int) string {
|
func getScaleSubTableName(subTableName string, hNum int) string {
|
||||||
if hnum == 0 {
|
if hNum == 0 {
|
||||||
return subTableName
|
return subTableName
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s_%d", subTableName, hnum)
|
return fmt.Sprintf("%s_%d", subTableName, hNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSuperTableName(stname string) string {
|
func getSuperTableName(stName string) string {
|
||||||
return SUPERTABLE_PREFIX + stname
|
return SuperTablePrefix + stName
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -499,7 +497,7 @@ func normalizationData(fileRows dataRows, minTime int64) int64 {
|
||||||
|
|
||||||
row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime
|
row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime
|
||||||
|
|
||||||
subTableName := getSubTableName(tableValue, fileRows.config.Stname)
|
subTableName := getSubTableName(tableValue, fileRows.config.StName)
|
||||||
|
|
||||||
value, ok := subTableMap[subTableName]
|
value, ok := subTableMap[subTableName]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
@ -527,7 +525,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
subTableName := getSubTableName(tableValue, fileRows.config.Stname)
|
subTableName := getSubTableName(tableValue, fileRows.config.StName)
|
||||||
|
|
||||||
value, ok := currSubTableMap[subTableName]
|
value, ok := currSubTableMap[subTableName]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
@ -543,7 +541,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var maxRows, tableRows int = 0, 0
|
var maxRows, tableRows = 0, 0
|
||||||
for tableName := range currSubTableMap {
|
for tableName := range currSubTableMap {
|
||||||
tableRows = len(currSubTableMap[tableName].rows)
|
tableRows = len(currSubTableMap[tableName].rows)
|
||||||
subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
|
subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
|
||||||
|
|
@ -556,7 +554,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSubTableName(subTableValue string, superTableName string) string {
|
func getSubTableName(subTableValue string, superTableName string) string {
|
||||||
return SUBTABLE_PREFIX + subTableValue + "_" + superTableName
|
return SubTablePrefix + subTableValue + "_" + superTableName
|
||||||
}
|
}
|
||||||
|
|
||||||
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
|
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
|
||||||
|
|
@ -564,25 +562,25 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
||||||
defer connection.Close()
|
defer connection.Close()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
connection.Exec("use " + db) // use db
|
_, _ = connection.Exec("use " + db) // use db
|
||||||
|
|
||||||
log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end)
|
log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end)
|
||||||
|
|
||||||
num := 0
|
num := 0
|
||||||
subTables := scaleTableNames[start:end]
|
subTables := scaleTableNames[start:end]
|
||||||
|
var buffer bytes.Buffer
|
||||||
for {
|
for {
|
||||||
var currSuccessRows int64
|
var currSuccessRows int64
|
||||||
var appendRows int
|
var appendRows int
|
||||||
var lastTableName string
|
var lastTableName string
|
||||||
|
|
||||||
buffers := bytes.Buffer{}
|
buffer.WriteString(InsertPrefix)
|
||||||
buffers.WriteString(INSERT_PREFIX)
|
|
||||||
|
|
||||||
for _, tableName := range subTables {
|
for _, tableName := range subTables {
|
||||||
|
|
||||||
subTableInfo := subTableMap[scaleTableMap[tableName].subTableName]
|
subTableInfo := subTableMap[scaleTableMap[tableName].subTableName]
|
||||||
subTableRows := int64(len(subTableInfo.rows))
|
subTableRows := int64(len(subTableInfo.rows))
|
||||||
superTableConf := superTableConfigMap[subTableInfo.config.Stname]
|
superTableConf := superTableConfigMap[subTableInfo.config.StName]
|
||||||
|
|
||||||
tableStartTime := superTableConf.startTime
|
tableStartTime := superTableConf.startTime
|
||||||
var tableEndTime int64
|
var tableEndTime int64
|
||||||
|
|
@ -605,40 +603,35 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
||||||
// append
|
// append
|
||||||
|
|
||||||
if lastTableName != tableName {
|
if lastTableName != tableName {
|
||||||
buffers.WriteString(tableName)
|
buffer.WriteString(tableName)
|
||||||
buffers.WriteString(" values")
|
buffer.WriteString(" values")
|
||||||
}
|
}
|
||||||
lastTableName = tableName
|
lastTableName = tableName
|
||||||
|
|
||||||
buffers.WriteString("(")
|
buffer.WriteString("(")
|
||||||
buffers.WriteString(fmt.Sprintf("%v", currentTime))
|
buffer.WriteString(fmt.Sprintf("%v", currentTime))
|
||||||
buffers.WriteString(",")
|
buffer.WriteString(",")
|
||||||
|
|
||||||
// fieldNum := len(subTableInfo.config.Fields)
|
|
||||||
for _, field := range subTableInfo.config.Fields {
|
for _, field := range subTableInfo.config.Fields {
|
||||||
buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
buffer.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
||||||
buffers.WriteString(",")
|
buffer.WriteString(",")
|
||||||
// if( i != fieldNum -1){
|
|
||||||
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buffers.Truncate(buffers.Len() - 1)
|
buffer.Truncate(buffer.Len() - 1)
|
||||||
buffers.WriteString(") ")
|
buffer.WriteString(") ")
|
||||||
|
|
||||||
appendRows++
|
appendRows++
|
||||||
insertRows++
|
insertRows++
|
||||||
if appendRows == batch {
|
if appendRows == batch {
|
||||||
// executebatch
|
// executeBatch
|
||||||
insertSql := buffers.String()
|
insertSql := buffer.String()
|
||||||
connection.Exec("use " + db)
|
|
||||||
affectedRows := executeBatchInsert(insertSql, connection)
|
affectedRows := executeBatchInsert(insertSql, connection)
|
||||||
|
|
||||||
successRows[threadIndex] += affectedRows
|
successRows[threadIndex] += affectedRows
|
||||||
currSuccessRows += affectedRows
|
currSuccessRows += affectedRows
|
||||||
|
|
||||||
buffers.Reset()
|
buffer.Reset()
|
||||||
buffers.WriteString(INSERT_PREFIX)
|
buffer.WriteString(InsertPrefix)
|
||||||
lastTableName = ""
|
lastTableName = ""
|
||||||
appendRows = 0
|
appendRows = 0
|
||||||
}
|
}
|
||||||
|
|
@ -654,15 +647,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
||||||
|
|
||||||
// left := len(rows)
|
// left := len(rows)
|
||||||
if appendRows > 0 {
|
if appendRows > 0 {
|
||||||
// executebatch
|
// executeBatch
|
||||||
insertSql := buffers.String()
|
insertSql := buffer.String()
|
||||||
connection.Exec("use " + db)
|
|
||||||
affectedRows := executeBatchInsert(insertSql, connection)
|
affectedRows := executeBatchInsert(insertSql, connection)
|
||||||
|
|
||||||
successRows[threadIndex] += affectedRows
|
successRows[threadIndex] += affectedRows
|
||||||
currSuccessRows += affectedRows
|
currSuccessRows += affectedRows
|
||||||
|
|
||||||
buffers.Reset()
|
buffer.Reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
// log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6)
|
// log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6)
|
||||||
|
|
@ -688,65 +680,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildSql(rows []tableRows) string {
|
|
||||||
|
|
||||||
var lastTableName string
|
|
||||||
|
|
||||||
buffers := bytes.Buffer{}
|
|
||||||
|
|
||||||
for i, row := range rows {
|
|
||||||
if i == 0 {
|
|
||||||
lastTableName = row.tableName
|
|
||||||
buffers.WriteString(INSERT_PREFIX)
|
|
||||||
buffers.WriteString(row.tableName)
|
|
||||||
buffers.WriteString(" values")
|
|
||||||
buffers.WriteString(row.value)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if lastTableName == row.tableName {
|
|
||||||
buffers.WriteString(row.value)
|
|
||||||
} else {
|
|
||||||
buffers.WriteString(" ")
|
|
||||||
buffers.WriteString(row.tableName)
|
|
||||||
buffers.WriteString(" values")
|
|
||||||
buffers.WriteString(row.value)
|
|
||||||
lastTableName = row.tableName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inserSql := buffers.String()
|
|
||||||
return inserSql
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows {
|
|
||||||
|
|
||||||
tableRows := tableRows{tableName: tableName}
|
|
||||||
|
|
||||||
buffers := bytes.Buffer{}
|
|
||||||
|
|
||||||
buffers.WriteString("(")
|
|
||||||
buffers.WriteString(fmt.Sprintf("%v", currentTime))
|
|
||||||
buffers.WriteString(",")
|
|
||||||
|
|
||||||
for _, field := range subTableInfo.config.Fields {
|
|
||||||
buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
|
||||||
buffers.WriteString(",")
|
|
||||||
}
|
|
||||||
|
|
||||||
buffers.Truncate(buffers.Len() - 1)
|
|
||||||
buffers.WriteString(")")
|
|
||||||
|
|
||||||
insertSql := buffers.String()
|
|
||||||
tableRows.value = insertSql
|
|
||||||
|
|
||||||
return tableRows
|
|
||||||
}
|
|
||||||
|
|
||||||
func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
|
func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
|
||||||
result, error := connection.Exec(insertSql)
|
result, err := connection.Exec(insertSql)
|
||||||
if error != nil {
|
if err != nil {
|
||||||
log.Printf("execute insertSql %s error, %s\n", insertSql, error)
|
log.Printf("execute insertSql %s error, %s\n", insertSql, err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
affected, _ := result.RowsAffected()
|
affected, _ := result.RowsAffected()
|
||||||
|
|
@ -754,7 +691,6 @@ func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
|
||||||
affected = 0
|
affected = 0
|
||||||
}
|
}
|
||||||
return affected
|
return affected
|
||||||
// return 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFieldValue(fieldValue interface{}) string {
|
func getFieldValue(fieldValue interface{}) string {
|
||||||
|
|
@ -762,7 +698,7 @@ func getFieldValue(fieldValue interface{}) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getConnection() *sql.DB {
|
func getConnection() *sql.DB {
|
||||||
db, err := sql.Open(DRIVER_NAME, dataSourceName)
|
db, err := sql.Open(DriverName, dataSourceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
@ -773,19 +709,11 @@ func getSubTableNameValue(suffix interface{}) string {
|
||||||
return fmt.Sprintf("%v", suffix)
|
return fmt.Sprintf("%v", suffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash(s string) int {
|
func readFile(config dataImport.CaseConfig) dataRows {
|
||||||
v := int(crc32.ChecksumIEEE([]byte(s)))
|
|
||||||
if v < 0 {
|
|
||||||
return -v
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func readFile(config dataimport.CaseConfig) dataRows {
|
|
||||||
fileFormat := strings.ToLower(config.Format)
|
fileFormat := strings.ToLower(config.Format)
|
||||||
if fileFormat == JSON_FORMAT {
|
if fileFormat == JsonFormat {
|
||||||
return readJSONFile(config)
|
return readJSONFile(config)
|
||||||
} else if fileFormat == CSV_FORMAT {
|
} else if fileFormat == CsvFormat {
|
||||||
return readCSVFile(config)
|
return readCSVFile(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -793,7 +721,7 @@ func readFile(config dataimport.CaseConfig) dataRows {
|
||||||
return dataRows{}
|
return dataRows{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func readCSVFile(config dataimport.CaseConfig) dataRows {
|
func readCSVFile(config dataImport.CaseConfig) dataRows {
|
||||||
var rows dataRows
|
var rows dataRows
|
||||||
f, err := os.Open(config.FilePath)
|
f, err := os.Open(config.FilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -813,7 +741,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
|
||||||
line := strings.ToLower(string(lineBytes))
|
line := strings.ToLower(string(lineBytes))
|
||||||
titles := strings.Split(line, config.Separator)
|
titles := strings.Split(line, config.Separator)
|
||||||
if len(titles) < 3 {
|
if len(titles) < 3 {
|
||||||
// need suffix、 primarykey and at least one other field
|
// need suffix、 primaryKey and at least one other field
|
||||||
log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath)
|
log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath)
|
||||||
return rows
|
return rows
|
||||||
}
|
}
|
||||||
|
|
@ -848,7 +776,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the primary key valid
|
// if the primary key valid
|
||||||
primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
|
primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
|
||||||
if primaryKeyValue == -1 {
|
if primaryKeyValue == -1 {
|
||||||
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
|
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
|
||||||
continue
|
continue
|
||||||
|
|
@ -861,7 +789,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
|
||||||
return rows
|
return rows
|
||||||
}
|
}
|
||||||
|
|
||||||
func readJSONFile(config dataimport.CaseConfig) dataRows {
|
func readJSONFile(config dataImport.CaseConfig) dataRows {
|
||||||
|
|
||||||
var rows dataRows
|
var rows dataRows
|
||||||
f, err := os.Open(config.FilePath)
|
f, err := os.Open(config.FilePath)
|
||||||
|
|
@ -899,7 +827,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
|
primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
|
||||||
if primaryKeyValue == -1 {
|
if primaryKeyValue == -1 {
|
||||||
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
|
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
|
||||||
continue
|
continue
|
||||||
|
|
@ -916,7 +844,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
|
||||||
/**
|
/**
|
||||||
* get primary key as millisecond , otherwise return -1
|
* get primary key as millisecond , otherwise return -1
|
||||||
*/
|
*/
|
||||||
func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
|
func getPrimaryKeyMilliSec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
|
||||||
if !existMapKeyAndNotEmpty(key, line) {
|
if !existMapKeyAndNotEmpty(key, line) {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
@ -971,13 +899,13 @@ func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
|
func checkUserCaseConfig(caseName string, caseConfig *dataImport.CaseConfig) {
|
||||||
|
|
||||||
if len(caseConfig.Stname) == 0 {
|
if len(caseConfig.StName) == 0 {
|
||||||
log.Fatalf("the stname of case %s can't be empty\n", caseName)
|
log.Fatalf("the stname of case %s can't be empty\n", caseName)
|
||||||
}
|
}
|
||||||
|
|
||||||
caseConfig.Stname = strings.ToLower(caseConfig.Stname)
|
caseConfig.StName = strings.ToLower(caseConfig.StName)
|
||||||
|
|
||||||
if len(caseConfig.Tags) == 0 {
|
if len(caseConfig.Tags) == 0 {
|
||||||
log.Fatalf("the tags of case %s can't be empty\n", caseName)
|
log.Fatalf("the tags of case %s can't be empty\n", caseName)
|
||||||
|
|
@ -1029,24 +957,24 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseArg() {
|
func parseArg() {
|
||||||
flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.")
|
flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes useCase and data format.")
|
||||||
flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
|
flag.StringVar(&cases, "cases", "sensor_info", "useCase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
|
||||||
flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.")
|
flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.")
|
||||||
flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.")
|
flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.")
|
||||||
flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
|
flag.Int64Var(&delay, "delay", DefaultDelay, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
|
||||||
flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.")
|
flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.")
|
||||||
flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.")
|
flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.")
|
||||||
flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.")
|
flag.StringVar(&saveTable, "savetb", DefaultStatisticTable, "the table to save 'statistic' info when save set 1.")
|
||||||
flag.IntVar(&thread, "thread", 10, "number of threads to import data.")
|
flag.IntVar(&thread, "thread", 10, "number of threads to import data.")
|
||||||
flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.")
|
flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.")
|
||||||
flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
|
flag.IntVar(&auto, "auto", 0, "whether to use the startTime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
|
||||||
flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.")
|
flag.StringVar(&startTimeStr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the earliest timestamp in the sample data will be set as the startTime.")
|
||||||
flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
|
flag.Int64Var(&interval, "interval", DefaultInterval, "time interval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
|
||||||
flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.")
|
flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.")
|
||||||
flag.IntVar(&port, "port", 6030, "tdengine server port.")
|
flag.IntVar(&port, "port", 6030, "tdengine server port.")
|
||||||
flag.StringVar(&user, "user", "root", "user name to login into the database.")
|
flag.StringVar(&user, "user", "root", "user name to login into the database.")
|
||||||
flag.StringVar(&password, "password", "taosdata", "the import tdengine user password")
|
flag.StringVar(&password, "password", "taosdata", "the import tdengine user password")
|
||||||
flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.")
|
flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing database. 1 is yes and 0 otherwise.")
|
||||||
flag.StringVar(&db, "db", "", "name of the database to store data.")
|
flag.StringVar(&db, "db", "", "name of the database to store data.")
|
||||||
flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.")
|
flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.")
|
||||||
|
|
||||||
|
|
@ -1066,7 +994,7 @@ func printArg() {
|
||||||
fmt.Println("-thread:", thread)
|
fmt.Println("-thread:", thread)
|
||||||
fmt.Println("-batch:", batch)
|
fmt.Println("-batch:", batch)
|
||||||
fmt.Println("-auto:", auto)
|
fmt.Println("-auto:", auto)
|
||||||
fmt.Println("-start:", starttimestr)
|
fmt.Println("-start:", startTimeStr)
|
||||||
fmt.Println("-interval:", interval)
|
fmt.Println("-interval:", interval)
|
||||||
fmt.Println("-host:", host)
|
fmt.Println("-host:", host)
|
||||||
fmt.Println("-port", port)
|
fmt.Println("-port", port)
|
||||||
|
|
|
||||||
|
|
@ -899,103 +899,103 @@ devid,location,color,devgroup,ts,temperature,humidity
|
||||||
8, haerbing, yellow, 2, 1575129697000, 31, 16.321497
|
8, haerbing, yellow, 2, 1575129697000, 31, 16.321497
|
||||||
8, haerbing, yellow, 2, 1575129698000, 25, 15.864515
|
8, haerbing, yellow, 2, 1575129698000, 25, 15.864515
|
||||||
8, haerbing, yellow, 2, 1575129699000, 25, 16.492443
|
8, haerbing, yellow, 2, 1575129699000, 25, 16.492443
|
||||||
9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889
|
9, shijiazhuang, blue, 0, 1575129600000, 23, 16.002889
|
||||||
9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610
|
9, shijiazhuang, blue, 0, 1575129601000, 26, 17.034610
|
||||||
9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319
|
9, shijiazhuang, blue, 0, 1575129602000, 29, 12.892319
|
||||||
9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807
|
9, shijiazhuang, blue, 0, 1575129603000, 34, 15.321807
|
||||||
9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642
|
9, shijiazhuang, blue, 0, 1575129604000, 29, 12.562642
|
||||||
9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246
|
9, shijiazhuang, blue, 0, 1575129605000, 32, 17.190246
|
||||||
9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774
|
9, shijiazhuang, blue, 0, 1575129606000, 19, 15.361774
|
||||||
9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364
|
9, shijiazhuang, blue, 0, 1575129607000, 26, 15.022364
|
||||||
9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084
|
9, shijiazhuang, blue, 0, 1575129608000, 31, 14.837084
|
||||||
9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289
|
9, shijiazhuang, blue, 0, 1575129609000, 25, 11.554289
|
||||||
9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973
|
9, shijiazhuang, blue, 0, 1575129610000, 21, 15.313973
|
||||||
9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783
|
9, shijiazhuang, blue, 0, 1575129611000, 27, 18.621783
|
||||||
9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101
|
9, shijiazhuang, blue, 0, 1575129612000, 31, 18.018101
|
||||||
9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450
|
9, shijiazhuang, blue, 0, 1575129613000, 23, 14.421450
|
||||||
9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142
|
9, shijiazhuang, blue, 0, 1575129614000, 28, 10.833142
|
||||||
9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837
|
9, shijiazhuang, blue, 0, 1575129615000, 33, 18.169837
|
||||||
9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730
|
9, shijiazhuang, blue, 0, 1575129616000, 21, 18.772730
|
||||||
9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146
|
9, shijiazhuang, blue, 0, 1575129617000, 24, 18.893146
|
||||||
9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187
|
9, shijiazhuang, blue, 0, 1575129618000, 24, 10.290187
|
||||||
9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345
|
9, shijiazhuang, blue, 0, 1575129619000, 23, 17.393345
|
||||||
9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215
|
9, shijiazhuang, blue, 0, 1575129620000, 30, 12.949215
|
||||||
9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621
|
9, shijiazhuang, blue, 0, 1575129621000, 19, 19.267621
|
||||||
9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735
|
9, shijiazhuang, blue, 0, 1575129622000, 33, 14.831735
|
||||||
9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125
|
9, shijiazhuang, blue, 0, 1575129623000, 21, 14.711125
|
||||||
9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485
|
9, shijiazhuang, blue, 0, 1575129624000, 16, 17.168485
|
||||||
9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433
|
9, shijiazhuang, blue, 0, 1575129625000, 17, 16.426433
|
||||||
9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050
|
9, shijiazhuang, blue, 0, 1575129626000, 19, 13.879050
|
||||||
9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168
|
9, shijiazhuang, blue, 0, 1575129627000, 21, 18.308168
|
||||||
9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681
|
9, shijiazhuang, blue, 0, 1575129628000, 17, 10.845681
|
||||||
9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272
|
9, shijiazhuang, blue, 0, 1575129629000, 20, 10.238272
|
||||||
9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976
|
9, shijiazhuang, blue, 0, 1575129630000, 19, 19.424976
|
||||||
9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909
|
9, shijiazhuang, blue, 0, 1575129631000, 31, 13.885909
|
||||||
9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740
|
9, shijiazhuang, blue, 0, 1575129632000, 15, 19.264740
|
||||||
9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645
|
9, shijiazhuang, blue, 0, 1575129633000, 30, 12.460645
|
||||||
9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036
|
9, shijiazhuang, blue, 0, 1575129634000, 27, 17.608036
|
||||||
9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812
|
9, shijiazhuang, blue, 0, 1575129635000, 25, 13.493812
|
||||||
9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939
|
9, shijiazhuang, blue, 0, 1575129636000, 19, 10.955939
|
||||||
9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587
|
9, shijiazhuang, blue, 0, 1575129637000, 24, 11.956587
|
||||||
9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381
|
9, shijiazhuang, blue, 0, 1575129638000, 15, 19.141381
|
||||||
9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530
|
9, shijiazhuang, blue, 0, 1575129639000, 24, 14.801530
|
||||||
9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318
|
9, shijiazhuang, blue, 0, 1575129640000, 17, 14.347318
|
||||||
9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237
|
9, shijiazhuang, blue, 0, 1575129641000, 29, 14.803237
|
||||||
9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297
|
9, shijiazhuang, blue, 0, 1575129642000, 28, 10.342297
|
||||||
9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282
|
9, shijiazhuang, blue, 0, 1575129643000, 29, 19.368282
|
||||||
9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654
|
9, shijiazhuang, blue, 0, 1575129644000, 31, 17.491654
|
||||||
9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736
|
9, shijiazhuang, blue, 0, 1575129645000, 18, 13.161736
|
||||||
9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354
|
9, shijiazhuang, blue, 0, 1575129646000, 17, 16.067354
|
||||||
9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465
|
9, shijiazhuang, blue, 0, 1575129647000, 18, 13.736465
|
||||||
9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276
|
9, shijiazhuang, blue, 0, 1575129648000, 23, 19.103276
|
||||||
9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892
|
9, shijiazhuang, blue, 0, 1575129649000, 29, 16.075892
|
||||||
9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566
|
9, shijiazhuang, blue, 0, 1575129650000, 21, 10.728566
|
||||||
9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849
|
9, shijiazhuang, blue, 0, 1575129651000, 15, 18.921849
|
||||||
9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709
|
9, shijiazhuang, blue, 0, 1575129652000, 24, 16.914709
|
||||||
9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651
|
9, shijiazhuang, blue, 0, 1575129653000, 19, 13.501651
|
||||||
9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347
|
9, shijiazhuang, blue, 0, 1575129654000, 19, 13.538347
|
||||||
9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095
|
9, shijiazhuang, blue, 0, 1575129655000, 16, 13.261095
|
||||||
9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746
|
9, shijiazhuang, blue, 0, 1575129656000, 32, 16.315746
|
||||||
9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939
|
9, shijiazhuang, blue, 0, 1575129657000, 27, 16.400939
|
||||||
9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819
|
9, shijiazhuang, blue, 0, 1575129658000, 24, 13.321819
|
||||||
9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181
|
9, shijiazhuang, blue, 0, 1575129659000, 27, 19.070181
|
||||||
9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922
|
9, shijiazhuang, blue, 0, 1575129660000, 27, 13.040922
|
||||||
9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530
|
9, shijiazhuang, blue, 0, 1575129661000, 32, 10.872530
|
||||||
9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657
|
9, shijiazhuang, blue, 0, 1575129662000, 28, 16.428657
|
||||||
9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854
|
9, shijiazhuang, blue, 0, 1575129663000, 32, 13.883854
|
||||||
9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554
|
9, shijiazhuang, blue, 0, 1575129664000, 33, 14.299554
|
||||||
9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130
|
9, shijiazhuang, blue, 0, 1575129665000, 30, 16.445130
|
||||||
9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404
|
9, shijiazhuang, blue, 0, 1575129666000, 15, 18.059404
|
||||||
9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847
|
9, shijiazhuang, blue, 0, 1575129667000, 21, 12.348847
|
||||||
9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378
|
9, shijiazhuang, blue, 0, 1575129668000, 32, 13.315378
|
||||||
9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507
|
9, shijiazhuang, blue, 0, 1575129669000, 17, 15.689507
|
||||||
9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808
|
9, shijiazhuang, blue, 0, 1575129670000, 22, 15.591808
|
||||||
9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065
|
9, shijiazhuang, blue, 0, 1575129671000, 27, 16.386065
|
||||||
9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803
|
9, shijiazhuang, blue, 0, 1575129672000, 25, 10.564803
|
||||||
9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544
|
9, shijiazhuang, blue, 0, 1575129673000, 20, 12.276544
|
||||||
9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786
|
9, shijiazhuang, blue, 0, 1575129674000, 26, 15.828786
|
||||||
9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420
|
9, shijiazhuang, blue, 0, 1575129675000, 18, 12.236420
|
||||||
9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522
|
9, shijiazhuang, blue, 0, 1575129676000, 15, 19.439522
|
||||||
9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531
|
9, shijiazhuang, blue, 0, 1575129677000, 19, 19.831531
|
||||||
9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744
|
9, shijiazhuang, blue, 0, 1575129678000, 22, 17.115744
|
||||||
9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456
|
9, shijiazhuang, blue, 0, 1575129679000, 29, 19.879456
|
||||||
9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136
|
9, shijiazhuang, blue, 0, 1575129680000, 34, 10.207136
|
||||||
9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523
|
9, shijiazhuang, blue, 0, 1575129681000, 16, 17.633523
|
||||||
9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873
|
9, shijiazhuang, blue, 0, 1575129682000, 15, 14.227873
|
||||||
9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768
|
9, shijiazhuang, blue, 0, 1575129683000, 34, 12.027768
|
||||||
9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610
|
9, shijiazhuang, blue, 0, 1575129684000, 22, 11.376610
|
||||||
9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299
|
9, shijiazhuang, blue, 0, 1575129685000, 21, 11.711299
|
||||||
9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126
|
9, shijiazhuang, blue, 0, 1575129686000, 33, 14.281126
|
||||||
9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302
|
9, shijiazhuang, blue, 0, 1575129687000, 31, 10.895302
|
||||||
9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350
|
9, shijiazhuang, blue, 0, 1575129688000, 31, 13.971350
|
||||||
9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790
|
9, shijiazhuang, blue, 0, 1575129689000, 15, 15.262790
|
||||||
9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568
|
9, shijiazhuang, blue, 0, 1575129690000, 23, 12.440568
|
||||||
9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267
|
9, shijiazhuang, blue, 0, 1575129691000, 32, 19.731267
|
||||||
9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092
|
9, shijiazhuang, blue, 0, 1575129692000, 22, 10.518092
|
||||||
9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021
|
9, shijiazhuang, blue, 0, 1575129693000, 34, 17.863021
|
||||||
9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909
|
9, shijiazhuang, blue, 0, 1575129694000, 28, 11.478909
|
||||||
9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524
|
9, shijiazhuang, blue, 0, 1575129695000, 16, 15.075524
|
||||||
9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127
|
9, shijiazhuang, blue, 0, 1575129696000, 16, 10.292127
|
||||||
9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012
|
9, shijiazhuang, blue, 0, 1575129697000, 22, 13.716012
|
||||||
9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551
|
9, shijiazhuang, blue, 0, 1575129698000, 32, 10.906551
|
||||||
9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868
|
9, shijiazhuang, blue, 0, 1575129699000, 19, 18.386868
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
module github.com/taosdata/TDengine/importSampleData
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/pelletier/go-toml v1.9.0 // indirect
|
||||||
|
github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect
|
||||||
|
)
|
||||||
|
|
@ -14,7 +14,7 @@ var (
|
||||||
once sync.Once
|
once sync.Once
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config inclue all scene import config
|
// Config include all scene import config
|
||||||
type Config struct {
|
type Config struct {
|
||||||
UserCases map[string]CaseConfig
|
UserCases map[string]CaseConfig
|
||||||
}
|
}
|
||||||
|
|
@ -24,7 +24,7 @@ type CaseConfig struct {
|
||||||
Format string
|
Format string
|
||||||
FilePath string
|
FilePath string
|
||||||
Separator string
|
Separator string
|
||||||
Stname string
|
StName string
|
||||||
SubTableName string
|
SubTableName string
|
||||||
Timestamp string
|
Timestamp string
|
||||||
TimestampType string
|
TimestampType string
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,7 @@
|
||||||
# monitorInterval 30
|
# monitorInterval 30
|
||||||
|
|
||||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||||
# offlineThreshold 8640000
|
# offlineThreshold 864000
|
||||||
|
|
||||||
# RPC re-try timer, millisecond
|
# RPC re-try timer, millisecond
|
||||||
# rpcTimer 300
|
# rpcTimer 300
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
name: tdengine
|
name: tdengine
|
||||||
base: core18
|
base: core18
|
||||||
version: '2.0.20.0'
|
|
||||||
|
version: '2.1.0.0'
|
||||||
icon: snap/gui/t-dengine.svg
|
icon: snap/gui/t-dengine.svg
|
||||||
summary: an open-source big data platform designed and optimized for IoT.
|
summary: an open-source big data platform designed and optimized for IoT.
|
||||||
description: |
|
description: |
|
||||||
|
|
@ -72,7 +73,7 @@ parts:
|
||||||
- usr/bin/taosd
|
- usr/bin/taosd
|
||||||
- usr/bin/taos
|
- usr/bin/taos
|
||||||
- usr/bin/taosdemo
|
- usr/bin/taosdemo
|
||||||
- usr/lib/libtaos.so.2.0.20.0
|
- usr/lib/libtaos.so.2.1.0.0
|
||||||
- usr/lib/libtaos.so.1
|
- usr/lib/libtaos.so.1
|
||||||
- usr/lib/libtaos.so
|
- usr/lib/libtaos.so
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,8 @@ void tscLockByThread(int64_t *lockedBy);
|
||||||
|
|
||||||
void tscUnlockByThread(int64_t *lockedBy);
|
void tscUnlockByThread(int64_t *lockedBy);
|
||||||
|
|
||||||
|
int tsInsertInitialCheck(SSqlObj *pSql);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -175,7 +175,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
|
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
|
||||||
|
|
||||||
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
|
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
|
||||||
|
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
|
||||||
|
|
||||||
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
|
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
|
||||||
|
|
||||||
|
|
@ -308,7 +309,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
|
||||||
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
|
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
|
||||||
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
|
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
|
||||||
uint32_t tscGetTableMetaMaxSize();
|
uint32_t tscGetTableMetaMaxSize();
|
||||||
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
|
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
|
||||||
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
|
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
|
||||||
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
|
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,8 @@ extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "taosmsg.h"
|
#include "taosmsg.h"
|
||||||
#include "tstoken.h"
|
|
||||||
#include "tsclient.h"
|
#include "tsclient.h"
|
||||||
|
#include "ttoken.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get the number of tags of this table
|
* get the number of tags of this table
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,7 @@ typedef struct STableMeta {
|
||||||
|
|
||||||
typedef struct STableMetaInfo {
|
typedef struct STableMetaInfo {
|
||||||
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
|
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
|
||||||
|
uint32_t tableMetaSize;
|
||||||
SVgroupsInfo *vgroupList;
|
SVgroupsInfo *vgroupList;
|
||||||
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
|
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
|
||||||
|
|
||||||
|
|
@ -154,13 +155,12 @@ typedef struct STagCond {
|
||||||
|
|
||||||
typedef struct SParamInfo {
|
typedef struct SParamInfo {
|
||||||
int32_t idx;
|
int32_t idx;
|
||||||
char type;
|
uint8_t type;
|
||||||
uint8_t timePrec;
|
uint8_t timePrec;
|
||||||
int16_t bytes;
|
int16_t bytes;
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
} SParamInfo;
|
} SParamInfo;
|
||||||
|
|
||||||
|
|
||||||
typedef struct SBoundColumn {
|
typedef struct SBoundColumn {
|
||||||
bool hasVal; // denote if current column has bound or not
|
bool hasVal; // denote if current column has bound or not
|
||||||
int32_t offset; // all column offset value
|
int32_t offset; // all column offset value
|
||||||
|
|
@ -376,6 +376,7 @@ typedef struct SSqlObj {
|
||||||
tsem_t rspSem;
|
tsem_t rspSem;
|
||||||
SSqlCmd cmd;
|
SSqlCmd cmd;
|
||||||
SSqlRes res;
|
SSqlRes res;
|
||||||
|
bool isBind;
|
||||||
|
|
||||||
SSubqueryState subState;
|
SSubqueryState subState;
|
||||||
struct SSqlObj **pSubs;
|
struct SSqlObj **pSubs;
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
|
||||||
/*
|
/*
|
||||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||||
* Method: isUpdateQueryImp
|
* Method: isUpdateQueryImp
|
||||||
* Signature: (J)J
|
* Signature: (JJ)I
|
||||||
*/
|
*/
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
|
||||||
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
|
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
|
||||||
|
|
@ -185,6 +185,44 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
|
||||||
(JNIEnv *, jobject, jlong, jbyteArray);
|
(JNIEnv *, jobject, jlong, jbyteArray);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||||
|
* Method: prepareStmtImp
|
||||||
|
* Signature: ([BJ)I
|
||||||
|
*/
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
|
||||||
|
(JNIEnv *, jobject, jbyteArray, jlong);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||||
|
* Method: setBindTableNameImp
|
||||||
|
* Signature: (JLjava/lang/String;J)I
|
||||||
|
*/
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
|
||||||
|
(JNIEnv *, jobject, jlong, jstring, jlong);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||||
|
* Method: bindColDataImp
|
||||||
|
* Signature: (J[B[B[BIIIIJ)J
|
||||||
|
*/
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
|
||||||
|
(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||||
|
* Method: executeBatchImp
|
||||||
|
* Signature: (JJ)I
|
||||||
|
*/
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||||
|
* Method: executeBatchImp
|
||||||
|
* Signature: (JJ)I
|
||||||
|
*/
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -688,3 +688,193 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec
|
||||||
|
|
||||||
return taos_result_precision(result);
|
return taos_result_precision(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) {
|
||||||
|
TAOS *tscon = (TAOS *)con;
|
||||||
|
if (tscon == NULL) {
|
||||||
|
jniError("jobj:%p, connection already closed", jobj);
|
||||||
|
return JNI_CONNECTION_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (jsql == NULL) {
|
||||||
|
jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
|
||||||
|
return JNI_SQL_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
jsize len = (*env)->GetArrayLength(env, jsql);
|
||||||
|
|
||||||
|
char *str = (char *) calloc(1, sizeof(char) * (len + 1));
|
||||||
|
if (str == NULL) {
|
||||||
|
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
|
||||||
|
return JNI_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
|
||||||
|
if ((*env)->ExceptionCheck(env)) {
|
||||||
|
// todo handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_STMT* pStmt = taos_stmt_init(tscon);
|
||||||
|
int32_t code = taos_stmt_prepare(pStmt, str, len);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||||
|
return JNI_TDENGINE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(str);
|
||||||
|
return (jlong) pStmt;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) {
|
||||||
|
TAOS *tsconn = (TAOS *)conn;
|
||||||
|
if (tsconn == NULL) {
|
||||||
|
jniError("jobj:%p, connection already closed", jobj);
|
||||||
|
return JNI_CONNECTION_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
|
||||||
|
if (pStmt == NULL) {
|
||||||
|
jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
|
||||||
|
return JNI_SQL_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
|
||||||
|
|
||||||
|
int32_t code = taos_stmt_set_tbname((void*)stmt, name);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
(*env)->ReleaseStringUTFChars(env, jname, name);
|
||||||
|
|
||||||
|
jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
|
||||||
|
return JNI_TDENGINE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name);
|
||||||
|
|
||||||
|
(*env)->ReleaseStringUTFChars(env, jname, name);
|
||||||
|
return JNI_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt,
|
||||||
|
jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
|
||||||
|
TAOS *tscon = (TAOS *)con;
|
||||||
|
if (tscon == NULL) {
|
||||||
|
jniError("jobj:%p, connection already closed", jobj);
|
||||||
|
return JNI_CONNECTION_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
|
||||||
|
if (pStmt == NULL) {
|
||||||
|
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
|
||||||
|
return JNI_SQL_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo refactor
|
||||||
|
jsize len = (*env)->GetArrayLength(env, colDataList);
|
||||||
|
char *colBuf = (char *)calloc(1, len);
|
||||||
|
(*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf);
|
||||||
|
if ((*env)->ExceptionCheck(env)) {
|
||||||
|
// todo handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
len = (*env)->GetArrayLength(env, lengthList);
|
||||||
|
char *lengthArray = (char*) calloc(1, len);
|
||||||
|
(*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray);
|
||||||
|
if ((*env)->ExceptionCheck(env)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
len = (*env)->GetArrayLength(env, nullList);
|
||||||
|
char *nullArray = (char*) calloc(1, len);
|
||||||
|
(*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray);
|
||||||
|
if ((*env)->ExceptionCheck(env)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// bind multi-rows with only one invoke.
|
||||||
|
TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND));
|
||||||
|
|
||||||
|
b->num = numOfRows;
|
||||||
|
b->buffer_type = dataType; // todo check data type
|
||||||
|
b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes;
|
||||||
|
b->is_null = nullArray;
|
||||||
|
b->buffer = colBuf;
|
||||||
|
b->length = (int32_t*)lengthArray;
|
||||||
|
|
||||||
|
// set the length and is_null array
|
||||||
|
switch(dataType) {
|
||||||
|
case TSDB_DATA_TYPE_INT:
|
||||||
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
|
case TSDB_DATA_TYPE_BIGINT: {
|
||||||
|
int32_t bytes = tDataTypes[dataType].bytes;
|
||||||
|
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||||
|
b->length[i] = bytes;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_BINARY: {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex);
|
||||||
|
tfree(b->length);
|
||||||
|
tfree(b->buffer);
|
||||||
|
tfree(b->is_null);
|
||||||
|
tfree(b);
|
||||||
|
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||||
|
return JNI_TDENGINE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
return JNI_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
|
||||||
|
TAOS *tscon = (TAOS *)con;
|
||||||
|
if (tscon == NULL) {
|
||||||
|
jniError("jobj:%p, connection already closed", jobj);
|
||||||
|
return JNI_CONNECTION_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
|
||||||
|
if (pStmt == NULL) {
|
||||||
|
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
|
||||||
|
return JNI_SQL_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
taos_stmt_add_batch(pStmt);
|
||||||
|
int32_t code = taos_stmt_execute(pStmt);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||||
|
return JNI_TDENGINE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
|
||||||
|
return JNI_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
|
||||||
|
TAOS *tscon = (TAOS *)con;
|
||||||
|
if (tscon == NULL) {
|
||||||
|
jniError("jobj:%p, connection already closed", jobj);
|
||||||
|
return JNI_CONNECTION_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
|
||||||
|
if (pStmt == NULL) {
|
||||||
|
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
|
||||||
|
return JNI_SQL_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = taos_stmt_close(pStmt);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||||
|
return JNI_TDENGINE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
|
||||||
|
return JNI_SUCCESS;
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
|
||||||
|
|
||||||
pSql->sqlstr = calloc(1, sqlLen + 1);
|
pSql->sqlstr = calloc(1, sqlLen + 1);
|
||||||
if (pSql->sqlstr == NULL) {
|
if (pSql->sqlstr == NULL) {
|
||||||
tscError("%p failed to malloc sql string buffer", pSql);
|
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tscAsyncResultOnError(pSql);
|
tscAsyncResultOnError(pSql);
|
||||||
return;
|
return;
|
||||||
|
|
@ -81,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
|
||||||
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
|
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
|
||||||
STscObj *pObj = (STscObj *)taos;
|
STscObj *pObj = (STscObj *)taos;
|
||||||
if (pObj == NULL || pObj->signature != pObj) {
|
if (pObj == NULL || pObj->signature != pObj) {
|
||||||
tscError("bug!!! pObj:%p", pObj);
|
tscError("pObj:%p is NULL or freed", pObj);
|
||||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -288,7 +288,7 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pSql->res.code != TSDB_CODE_SUCCESS);
|
assert(pSql->res.code != TSDB_CODE_SUCCESS);
|
||||||
tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code));
|
tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code));
|
||||||
|
|
||||||
SSqlRes *pRes = &pSql->res;
|
SSqlRes *pRes = &pSql->res;
|
||||||
if (pSql->fp == NULL || pSql->fetchFp == NULL){
|
if (pSql->fp == NULL || pSql->fetchFp == NULL){
|
||||||
|
|
@ -368,7 +368,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
||||||
SSqlObj *sub = (SSqlObj*) res;
|
SSqlObj *sub = (SSqlObj*) res;
|
||||||
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
|
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
|
tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -326,6 +326,7 @@ TAOS_ROW tscFetchRow(void *param) {
|
||||||
pCmd->command == TSDB_SQL_FETCH ||
|
pCmd->command == TSDB_SQL_FETCH ||
|
||||||
pCmd->command == TSDB_SQL_SHOW ||
|
pCmd->command == TSDB_SQL_SHOW ||
|
||||||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
|
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
|
||||||
|
pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
|
||||||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
|
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
|
||||||
pCmd->command == TSDB_SQL_SELECT ||
|
pCmd->command == TSDB_SQL_SELECT ||
|
||||||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
|
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
|
||||||
|
|
@ -679,6 +680,9 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
|
||||||
assert(pTableMetaInfo->pTableMeta != NULL);
|
assert(pTableMetaInfo->pTableMeta != NULL);
|
||||||
|
|
||||||
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
|
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
|
||||||
|
if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
|
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
@ -907,7 +911,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
|
||||||
*/
|
*/
|
||||||
pRes->qId = 0x1;
|
pRes->qId = 0x1;
|
||||||
pRes->numOfRows = 0;
|
pRes->numOfRows = 0;
|
||||||
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
|
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) {
|
||||||
pRes->code = tscProcessShowCreateTable(pSql);
|
pRes->code = tscProcessShowCreateTable(pSql);
|
||||||
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
|
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
|
||||||
pRes->code = tscProcessShowCreateDatabase(pSql);
|
pRes->code = tscProcessShowCreateDatabase(pSql);
|
||||||
|
|
@ -926,7 +930,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
|
||||||
pRes->code = tscProcessServStatus(pSql);
|
pRes->code = tscProcessServStatus(pSql);
|
||||||
} else {
|
} else {
|
||||||
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
|
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
|
||||||
tscError("%p not support command:%d", pSql, pCmd->command);
|
tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command);
|
||||||
}
|
}
|
||||||
|
|
||||||
// keep the code in local variable in order to avoid invalid read in case of async query
|
// keep the code in local variable in order to avoid invalid read in case of async query
|
||||||
|
|
|
||||||
|
|
@ -113,14 +113,14 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
|
||||||
|
|
||||||
if (pMemBuffer == NULL) {
|
if (pMemBuffer == NULL) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
tscError("%p pMemBuffer is NULL", pMemBuffer);
|
tscError("pMemBuffer:%p is NULL", pMemBuffer);
|
||||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pDesc->pColumnModel == NULL) {
|
if (pDesc->pColumnModel == NULL) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
tscError("%p no local buffer or intermediate result format model", pSql);
|
tscError("0x%"PRIx64" no local buffer or intermediate result format model", pSql->self);
|
||||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -144,7 +144,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
|
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
|
||||||
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
|
tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", pSql->self, pDesc->pColumnModel->capacity,
|
||||||
pMemBuffer[0]->pageSize);
|
pMemBuffer[0]->pageSize);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
|
|
@ -156,7 +156,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
|
||||||
|
|
||||||
SLocalMerger *pMerger = (SLocalMerger *) calloc(1, size);
|
SLocalMerger *pMerger = (SLocalMerger *) calloc(1, size);
|
||||||
if (pMerger == NULL) {
|
if (pMerger == NULL) {
|
||||||
tscError("%p failed to create local merge structure, out of memory", pSql);
|
tscError("0x%"PRIx64" failed to create local merge structure, out of memory", pSql->self);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
|
@ -180,7 +180,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
|
||||||
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
|
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
|
||||||
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
|
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
|
||||||
if (ds == NULL) {
|
if (ds == NULL) {
|
||||||
tscError("%p failed to create merge structure", pSql);
|
tscError("0x%"PRIx64" failed to create merge structure", pSql->self);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tfree(pMerger);
|
tfree(pMerger);
|
||||||
return;
|
return;
|
||||||
|
|
@ -538,7 +538,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
||||||
|
|
||||||
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub);
|
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub);
|
||||||
if (*pMemBuffer == NULL) {
|
if (*pMemBuffer == NULL) {
|
||||||
tscError("%p failed to allocate memory", pSql);
|
tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
return pRes->code;
|
return pRes->code;
|
||||||
}
|
}
|
||||||
|
|
@ -547,7 +547,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
||||||
|
|
||||||
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
|
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
|
||||||
if (pSchema == NULL) {
|
if (pSchema == NULL) {
|
||||||
tscError("%p failed to allocate memory", pSql);
|
tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
return pRes->code;
|
return pRes->code;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -29,8 +29,7 @@
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
|
|
||||||
#include "tscLog.h"
|
#include "tscLog.h"
|
||||||
#include "tscSubquery.h"
|
#include "ttoken.h"
|
||||||
#include "tstoken.h"
|
|
||||||
|
|
||||||
#include "tdataformat.h"
|
#include "tdataformat.h"
|
||||||
|
|
||||||
|
|
@ -68,7 +67,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
|
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
|
||||||
// do nothing
|
// do nothing
|
||||||
} else if (pToken->type == TK_INTEGER) {
|
} else if (pToken->type == TK_INTEGER) {
|
||||||
useconds = tsosStr2int64(pToken->z);
|
useconds = taosStr2int64(pToken->z);
|
||||||
} else {
|
} else {
|
||||||
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
|
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
|
||||||
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
|
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
|
||||||
|
|
@ -386,7 +385,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
||||||
* The server time/client time should not be mixed up in one sql string
|
* The server time/client time should not be mixed up in one sql string
|
||||||
* Do not employ sort operation is not involved if server time is used.
|
* Do not employ sort operation is not involved if server time is used.
|
||||||
*/
|
*/
|
||||||
static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
|
int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
|
||||||
// once the data block is disordered, we do NOT keep previous timestamp any more
|
// once the data block is disordered, we do NOT keep previous timestamp any more
|
||||||
if (!pDataBlocks->ordered) {
|
if (!pDataBlocks->ordered) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
@ -411,6 +410,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
|
||||||
|
|
||||||
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
|
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
|
||||||
pDataBlocks->ordered = false;
|
pDataBlocks->ordered = false;
|
||||||
|
tscWarn("NOT ordered input timestamp");
|
||||||
}
|
}
|
||||||
|
|
||||||
pDataBlocks->prevTS = k;
|
pDataBlocks->prevTS = k;
|
||||||
|
|
@ -464,22 +464,23 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
|
||||||
if (TK_STRING == sToken.type) {
|
if (TK_STRING == sToken.type) {
|
||||||
// delete escape character: \\, \', \"
|
// delete escape character: \\, \', \"
|
||||||
char delim = sToken.z[0];
|
char delim = sToken.z[0];
|
||||||
|
|
||||||
int32_t cnt = 0;
|
int32_t cnt = 0;
|
||||||
int32_t j = 0;
|
int32_t j = 0;
|
||||||
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
|
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
|
||||||
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
|
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
|
||||||
if (sToken.z[k + 1] == delim) {
|
|
||||||
cnt++;
|
|
||||||
tmpTokenBuf[j] = sToken.z[k + 1];
|
tmpTokenBuf[j] = sToken.z[k + 1];
|
||||||
|
|
||||||
|
cnt++;
|
||||||
j++;
|
j++;
|
||||||
k++;
|
k++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
tmpTokenBuf[j] = sToken.z[k];
|
tmpTokenBuf[j] = sToken.z[k];
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpTokenBuf[j] = 0;
|
tmpTokenBuf[j] = 0;
|
||||||
sToken.z = tmpTokenBuf;
|
sToken.z = tmpTokenBuf;
|
||||||
sToken.n -= 2 + cnt;
|
sToken.n -= 2 + cnt;
|
||||||
|
|
@ -693,6 +694,8 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
|
||||||
pBlocks->numOfRows = i + 1;
|
pBlocks->numOfRows = i + 1;
|
||||||
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
|
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dataBuf->prevTS = INT64_MIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
||||||
|
|
@ -705,19 +708,11 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
code = TSDB_CODE_TSC_INVALID_SQL;
|
code = TSDB_CODE_TSC_INVALID_SQL;
|
||||||
char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
|
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
|
||||||
if (NULL == tmpTokenBuf) {
|
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t numOfRows = 0;
|
int32_t numOfRows = 0;
|
||||||
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
|
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
|
||||||
|
|
||||||
free(tmpTokenBuf);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
|
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
|
||||||
SParamInfo *param = dataBuf->params + i;
|
SParamInfo *param = dataBuf->params + i;
|
||||||
if (param->idx == -1) {
|
if (param->idx == -1) {
|
||||||
|
|
@ -934,6 +929,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
||||||
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* parse columns after super table tags values.
|
||||||
|
* insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
|
||||||
|
* (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
|
||||||
|
* */
|
||||||
|
index = 0;
|
||||||
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
|
sql += index;
|
||||||
|
int numOfColsAfterTags = 0;
|
||||||
|
if (sToken.type == TK_LP) {
|
||||||
|
if (*boundColumn != NULL) {
|
||||||
|
return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
|
||||||
|
} else {
|
||||||
|
*boundColumn = &sToken.z[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
index = 0;
|
||||||
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
|
|
||||||
|
if (sToken.type == TK_RP) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
sql += index;
|
||||||
|
++numOfColsAfterTags;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
sql = sToken.z;
|
||||||
|
|
||||||
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
|
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
|
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
|
||||||
}
|
}
|
||||||
|
|
@ -975,7 +1006,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
|
||||||
|
|
||||||
psTblToken->n = len;
|
psTblToken->n = len;
|
||||||
psTblToken->type = TK_ID;
|
psTblToken->type = TK_ID;
|
||||||
tSQLGetToken(psTblToken->z, &psTblToken->type);
|
tGetToken(psTblToken->z, &psTblToken->type);
|
||||||
|
|
||||||
return tscValidateName(psTblToken);
|
return tscValidateName(psTblToken);
|
||||||
}
|
}
|
||||||
|
|
@ -1147,7 +1178,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscError("%p async insert parse error, code:%s", pSql, tstrerror(code));
|
tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
|
||||||
pCmd->curSql = NULL;
|
pCmd->curSql = NULL;
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
@ -1262,7 +1293,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
|
if ((pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
|
||||||
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
|
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
@ -1415,7 +1446,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
||||||
assert(pSql->res.numOfRows == 0);
|
assert(pSql->res.numOfRows == 0);
|
||||||
int32_t ret = fseek(fp, 0, SEEK_SET);
|
int32_t ret = fseek(fp, 0, SEEK_SET);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
|
tscError("0x%"PRIx64" failed to seek SEEK_SET since:%s", pSql->self, tstrerror(errno));
|
||||||
code = TAOS_SYSTEM_ERROR(errno);
|
code = TAOS_SYSTEM_ERROR(errno);
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
@ -1536,7 +1567,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
|
||||||
FILE *fp = fopen(pCmd->payload, "rb");
|
FILE *fp = fopen(pCmd->payload, "rb");
|
||||||
if (fp == NULL) {
|
if (fp == NULL) {
|
||||||
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||||
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
|
tscError("0x%"PRIx64" failed to open file %s to load data from file, code:%s", pSql->self, pCmd->payload, tstrerror(pSql->res.code));
|
||||||
|
|
||||||
tfree(pSupporter);
|
tfree(pSupporter);
|
||||||
taos_free_result(pNew);
|
taos_free_result(pNew);
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@
|
||||||
#include "tscSubquery.h"
|
#include "tscSubquery.h"
|
||||||
|
|
||||||
int tsParseInsertSql(SSqlObj *pSql);
|
int tsParseInsertSql(SSqlObj *pSql);
|
||||||
|
int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start);
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// functions for normal statement preparation
|
// functions for normal statement preparation
|
||||||
|
|
@ -43,10 +44,32 @@ typedef struct SNormalStmt {
|
||||||
tVariant* params;
|
tVariant* params;
|
||||||
} SNormalStmt;
|
} SNormalStmt;
|
||||||
|
|
||||||
|
typedef struct SMultiTbStmt {
|
||||||
|
bool nameSet;
|
||||||
|
uint64_t currentUid;
|
||||||
|
uint32_t tbNum;
|
||||||
|
SStrToken tbname;
|
||||||
|
SHashObj *pTableHash;
|
||||||
|
SHashObj *pTableBlockHashList; // data block for each table
|
||||||
|
} SMultiTbStmt;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
STMT_INIT = 1,
|
||||||
|
STMT_PREPARE,
|
||||||
|
STMT_SETTBNAME,
|
||||||
|
STMT_BIND,
|
||||||
|
STMT_BIND_COL,
|
||||||
|
STMT_ADD_BATCH,
|
||||||
|
STMT_EXECUTE
|
||||||
|
} STMT_ST;
|
||||||
|
|
||||||
typedef struct STscStmt {
|
typedef struct STscStmt {
|
||||||
bool isInsert;
|
bool isInsert;
|
||||||
|
bool multiTbInsert;
|
||||||
|
int16_t last;
|
||||||
STscObj* taos;
|
STscObj* taos;
|
||||||
SSqlObj* pSql;
|
SSqlObj* pSql;
|
||||||
|
SMultiTbStmt mtb;
|
||||||
SNormalStmt normal;
|
SNormalStmt normal;
|
||||||
} STscStmt;
|
} STscStmt;
|
||||||
|
|
||||||
|
|
@ -135,7 +158,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
tscDebug("param %d: type mismatch or invalid", i);
|
tscDebug("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -151,7 +174,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
|
||||||
|
|
||||||
while (sql[i] != 0) {
|
while (sql[i] != 0) {
|
||||||
SStrToken token = {0};
|
SStrToken token = {0};
|
||||||
token.n = tSQLGetToken(sql + i, &token.type);
|
token.n = tGetToken(sql + i, &token.type);
|
||||||
|
|
||||||
if (token.type == TK_QUESTION) {
|
if (token.type == TK_QUESTION) {
|
||||||
sql[i] = 0;
|
sql[i] = 0;
|
||||||
|
|
@ -255,12 +278,13 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// functions for insertion statement preparation
|
// functions for insertion statement preparation
|
||||||
static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
|
static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
|
||||||
if (bind->is_null != NULL && *(bind->is_null)) {
|
if (bind->is_null != NULL && *(bind->is_null)) {
|
||||||
setNull(data + param->offset, param->type, param->bytes);
|
setNull(data + param->offset, param->type, param->bytes);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
if (0) {
|
if (0) {
|
||||||
// allow user bind param data with different type
|
// allow user bind param data with different type
|
||||||
union {
|
union {
|
||||||
|
|
@ -641,6 +665,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (bind->buffer_type != param->type) {
|
if (bind->buffer_type != param->type) {
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
|
@ -690,12 +715,90 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(data + param->offset, bind->buffer, size);
|
memcpy(data + param->offset, bind->buffer, size);
|
||||||
|
if (param->offset == 0) {
|
||||||
|
if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
|
||||||
|
tscError("invalid timestamp");
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
|
||||||
|
if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) {
|
||||||
|
tscError("BINARY/NCHAR no length");
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < bind->num; ++i) {
|
||||||
|
char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
|
||||||
|
|
||||||
|
if (bind->is_null != NULL && bind->is_null[i]) {
|
||||||
|
setNull(data + param->offset, param->type, param->bytes);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!IS_VAR_DATA_TYPE(param->type)) {
|
||||||
|
memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes);
|
||||||
|
|
||||||
|
if (param->offset == 0) {
|
||||||
|
if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
|
||||||
|
tscError("invalid timestamp");
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (param->type == TSDB_DATA_TYPE_BINARY) {
|
||||||
|
if (bind->length[i] > (uintptr_t)param->bytes) {
|
||||||
|
tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
int16_t bsize = (short)bind->length[i];
|
||||||
|
STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize);
|
||||||
|
} else if (param->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
if (bind->length[i] > (uintptr_t)param->bytes) {
|
||||||
|
tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t output = 0;
|
||||||
|
if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
|
||||||
|
tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i));
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
varDataSetLen(data + param->offset, output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
||||||
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
||||||
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
|
||||||
|
STableDataBlocks* pBlock = NULL;
|
||||||
|
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
if (pCmd->pTableBlockHashList == NULL) {
|
||||||
|
tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
|
||||||
|
if (t1 == NULL) {
|
||||||
|
tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBlock = *t1;
|
||||||
|
} else {
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
|
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
|
||||||
|
|
||||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
|
@ -703,16 +806,15 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
||||||
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
STableDataBlocks* pBlock = NULL;
|
|
||||||
|
|
||||||
int32_t ret =
|
int32_t ret =
|
||||||
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||||
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
// todo handle error
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
|
uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + 1) * pBlock->rowSize;
|
||||||
if (totalDataSize > pBlock->nAllocSize) {
|
if (totalDataSize > pBlock->nAllocSize) {
|
||||||
const double factor = 1.5;
|
const double factor = 1.5;
|
||||||
|
|
||||||
|
|
@ -729,9 +831,9 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
||||||
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
|
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
|
||||||
SParamInfo* param = &pBlock->params[j];
|
SParamInfo* param = &pBlock->params[j];
|
||||||
|
|
||||||
int code = doBindParam(data, param, &bind[param->idx]);
|
int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscDebug("param %d: type mismatch or invalid", param->idx);
|
tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -739,9 +841,135 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
|
||||||
|
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
||||||
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
int rowNum = bind->num;
|
||||||
|
|
||||||
|
STableDataBlocks* pBlock = NULL;
|
||||||
|
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
if (pCmd->pTableBlockHashList == NULL) {
|
||||||
|
tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
|
||||||
|
if (t1 == NULL) {
|
||||||
|
tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBlock = *t1;
|
||||||
|
} else {
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
|
||||||
|
|
||||||
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
if (pCmd->pTableBlockHashList == NULL) {
|
||||||
|
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t ret =
|
||||||
|
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||||
|
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||||
|
if (ret != 0) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams));
|
||||||
|
|
||||||
|
uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize;
|
||||||
|
if (totalDataSize > pBlock->nAllocSize) {
|
||||||
|
const double factor = 1.5;
|
||||||
|
|
||||||
|
void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
|
||||||
|
if (tmp == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBlock->pData = (char*)tmp;
|
||||||
|
pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (colIdx == -1) {
|
||||||
|
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
|
||||||
|
SParamInfo* param = &pBlock->params[j];
|
||||||
|
if (bind[param->idx].num != rowNum) {
|
||||||
|
tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pCmd->batchSize += rowNum - 1;
|
||||||
|
} else {
|
||||||
|
SParamInfo* param = &pBlock->params[colIdx];
|
||||||
|
|
||||||
|
int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (colIdx == (pBlock->numOfParams - 1)) {
|
||||||
|
pCmd->batchSize += rowNum - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int insertStmtUpdateBatch(STscStmt* stmt) {
|
||||||
|
SSqlObj* pSql = stmt->pSql;
|
||||||
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
|
STableDataBlocks* pBlock = NULL;
|
||||||
|
|
||||||
|
if (pCmd->batchSize > INT16_MAX) {
|
||||||
|
tscError("too many record:%d", pCmd->batchSize);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(pCmd->numOfClause == 1);
|
||||||
|
if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid));
|
||||||
|
if (t1 == NULL) {
|
||||||
|
tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, stmt->mtb.currentUid);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBlock = *t1;
|
||||||
|
|
||||||
|
STableMeta* pTableMeta = pBlock->pTableMeta;
|
||||||
|
|
||||||
|
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
|
||||||
|
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
|
||||||
|
pBlk->numOfRows = pCmd->batchSize;
|
||||||
|
pBlk->dataLen = 0;
|
||||||
|
pBlk->uid = pTableMeta->id.uid;
|
||||||
|
pBlk->tid = pTableMeta->id.tid;
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
static int insertStmtAddBatch(STscStmt* stmt) {
|
static int insertStmtAddBatch(STscStmt* stmt) {
|
||||||
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
||||||
++pCmd->batchSize;
|
++pCmd->batchSize;
|
||||||
|
|
||||||
|
if (stmt->multiTbInsert) {
|
||||||
|
return insertStmtUpdateBatch(stmt);
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -835,6 +1063,83 @@ static int insertStmtExecute(STscStmt* stmt) {
|
||||||
return pSql->res.code;
|
return pSql->res.code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void insertBatchClean(STscStmt* pStmt) {
|
||||||
|
SSqlCmd *pCmd = &pStmt->pSql->cmd;
|
||||||
|
SSqlObj *pSql = pStmt->pSql;
|
||||||
|
int32_t size = taosHashGetSize(pCmd->pTableBlockHashList);
|
||||||
|
|
||||||
|
// data block reset
|
||||||
|
pCmd->batchSize = 0;
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < size; ++i) {
|
||||||
|
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
|
||||||
|
tfree(pCmd->pTableNameList[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tfree(pCmd->pTableNameList);
|
||||||
|
|
||||||
|
/*
|
||||||
|
STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
|
||||||
|
|
||||||
|
STableDataBlocks* pOneTableBlock = *p;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
|
||||||
|
|
||||||
|
pOneTableBlock->size = sizeof(SSubmitBlk);
|
||||||
|
|
||||||
|
pBlocks->numOfRows = 0;
|
||||||
|
|
||||||
|
p = taosHashIterate(pCmd->pTableBlockHashList, p);
|
||||||
|
if (p == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
pOneTableBlock = *p;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||||
|
pCmd->numOfTables = 0;
|
||||||
|
|
||||||
|
taosHashEmpty(pCmd->pTableBlockHashList);
|
||||||
|
tscFreeSqlResult(pSql);
|
||||||
|
tscFreeSubobj(pSql);
|
||||||
|
tfree(pSql->pSubs);
|
||||||
|
pSql->subState.numOfSub = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int insertBatchStmtExecute(STscStmt* pStmt) {
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if(pStmt->mtb.nameSet == false) {
|
||||||
|
tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
|
||||||
|
|
||||||
|
if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) > 0) { // merge according to vgId
|
||||||
|
if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tscHandleMultivnodeInsert(pStmt->pSql);
|
||||||
|
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for the callback function to post the semaphore
|
||||||
|
tsem_wait(&pStmt->pSql->rspSem);
|
||||||
|
|
||||||
|
insertBatchClean(pStmt);
|
||||||
|
|
||||||
|
return pStmt->pSql->res.code;
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// interface functions
|
// interface functions
|
||||||
|
|
||||||
|
|
@ -866,7 +1171,9 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
|
||||||
pSql->signature = pSql;
|
pSql->signature = pSql;
|
||||||
pSql->pTscObj = pObj;
|
pSql->pTscObj = pObj;
|
||||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||||
|
pSql->isBind = true;
|
||||||
pStmt->pSql = pSql;
|
pStmt->pSql = pSql;
|
||||||
|
pStmt->last = STMT_INIT;
|
||||||
|
|
||||||
return pStmt;
|
return pStmt;
|
||||||
}
|
}
|
||||||
|
|
@ -879,6 +1186,13 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
||||||
return TSDB_CODE_TSC_DISCONNECTED;
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pStmt->last != STMT_INIT) {
|
||||||
|
tscError("prepare status error, last:%d", pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_PREPARE;
|
||||||
|
|
||||||
SSqlObj* pSql = pStmt->pSql;
|
SSqlObj* pSql = pStmt->pSql;
|
||||||
size_t sqlLen = strlen(sql);
|
size_t sqlLen = strlen(sql);
|
||||||
|
|
||||||
|
|
@ -917,6 +1231,36 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
||||||
|
|
||||||
registerSqlObj(pSql);
|
registerSqlObj(pSql);
|
||||||
|
|
||||||
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
if ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t index = 0;
|
||||||
|
SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||||
|
|
||||||
|
if (sToken.n == 0) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sToken.n == 1 && sToken.type == TK_QUESTION) {
|
||||||
|
pStmt->multiTbInsert = true;
|
||||||
|
pStmt->mtb.tbname = sToken;
|
||||||
|
pStmt->mtb.nameSet = false;
|
||||||
|
if (pStmt->mtb.pTableHash == NULL) {
|
||||||
|
pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
|
||||||
|
}
|
||||||
|
if (pStmt->mtb.pTableBlockHashList == NULL) {
|
||||||
|
pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->multiTbInsert = false;
|
||||||
|
memset(&pStmt->mtb, 0, sizeof(pStmt->mtb));
|
||||||
|
|
||||||
int32_t code = tsParseSql(pSql, true);
|
int32_t code = tsParseSql(pSql, true);
|
||||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
// wait for the callback function to post the semaphore
|
// wait for the callback function to post the semaphore
|
||||||
|
|
@ -931,6 +1275,105 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
||||||
return normalStmtPrepare(pStmt);
|
return normalStmtPrepare(pStmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
|
||||||
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
SSqlObj* pSql = pStmt->pSql;
|
||||||
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
|
|
||||||
|
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (name == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
tscError("0x%"PRIx64" name is NULL", pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) {
|
||||||
|
terrno = TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
tscError("0x%"PRIx64" not multi table insert", pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) {
|
||||||
|
tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_SETTBNAME;
|
||||||
|
|
||||||
|
uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name));
|
||||||
|
if (uid != NULL) {
|
||||||
|
pStmt->mtb.currentUid = *uid;
|
||||||
|
|
||||||
|
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
|
||||||
|
if (t1 == NULL) {
|
||||||
|
tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
|
||||||
|
pCmd->batchSize = pBlk->numOfRows;
|
||||||
|
|
||||||
|
taosHashPut(pCmd->pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
|
||||||
|
|
||||||
|
tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
|
||||||
|
pStmt->mtb.nameSet = true;
|
||||||
|
|
||||||
|
tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||||
|
|
||||||
|
pSql->cmd.parseFinished = 0;
|
||||||
|
pSql->cmd.numOfParams = 0;
|
||||||
|
pSql->cmd.batchSize = 0;
|
||||||
|
|
||||||
|
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) {
|
||||||
|
SHashObj* hashList = pCmd->pTableBlockHashList;
|
||||||
|
pCmd->pTableBlockHashList = NULL;
|
||||||
|
tscResetSqlCmd(pCmd, true);
|
||||||
|
pCmd->pTableBlockHashList = hashList;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = tsParseSql(pStmt->pSql, true);
|
||||||
|
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
|
// wait for the callback function to post the semaphore
|
||||||
|
tsem_wait(&pStmt->pSql->rspSem);
|
||||||
|
|
||||||
|
code = pStmt->pSql->res.code;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
|
||||||
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
STableDataBlocks* pBlock = NULL;
|
||||||
|
code = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||||
|
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData;
|
||||||
|
blk->numOfRows = 0;
|
||||||
|
|
||||||
|
pStmt->mtb.currentUid = pTableMeta->id.uid;
|
||||||
|
pStmt->mtb.tbNum++;
|
||||||
|
|
||||||
|
taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
|
||||||
|
|
||||||
|
taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
|
||||||
|
|
||||||
|
tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
int taos_stmt_close(TAOS_STMT* stmt) {
|
int taos_stmt_close(TAOS_STMT* stmt) {
|
||||||
STscStmt* pStmt = (STscStmt*)stmt;
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
if (!pStmt->isInsert) {
|
if (!pStmt->isInsert) {
|
||||||
|
|
@ -943,6 +1386,13 @@ int taos_stmt_close(TAOS_STMT* stmt) {
|
||||||
}
|
}
|
||||||
free(normal->parts);
|
free(normal->parts);
|
||||||
free(normal->sql);
|
free(normal->sql);
|
||||||
|
} else {
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
taosHashCleanup(pStmt->mtb.pTableHash);
|
||||||
|
pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
|
||||||
|
taosHashCleanup(pStmt->pSql->cmd.pTableBlockHashList);
|
||||||
|
pStmt->pSql->cmd.pTableBlockHashList = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taos_free_result(pStmt->pSql);
|
taos_free_result(pStmt->pSql);
|
||||||
|
|
@ -952,18 +1402,122 @@ int taos_stmt_close(TAOS_STMT* stmt) {
|
||||||
|
|
||||||
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
|
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
|
||||||
STscStmt* pStmt = (STscStmt*)stmt;
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
}
|
||||||
|
|
||||||
if (pStmt->isInsert) {
|
if (pStmt->isInsert) {
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
|
||||||
|
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
|
||||||
|
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_BIND;
|
||||||
|
|
||||||
return insertStmtBindParam(pStmt, bind);
|
return insertStmtBindParam(pStmt, bind);
|
||||||
} else {
|
} else {
|
||||||
return normalStmtBindParam(pStmt, bind);
|
return normalStmtBindParam(pStmt, bind);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
|
||||||
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
|
||||||
|
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
|
||||||
|
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pStmt->isInsert) {
|
||||||
|
tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
|
||||||
|
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
|
||||||
|
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_BIND;
|
||||||
|
|
||||||
|
return insertStmtBindParamBatch(pStmt, bind, -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
|
||||||
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
|
||||||
|
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pStmt->isInsert) {
|
||||||
|
tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) {
|
||||||
|
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) {
|
||||||
|
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_BIND_COL;
|
||||||
|
|
||||||
|
return insertStmtBindParamBatch(pStmt, bind, colIdx);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int taos_stmt_add_batch(TAOS_STMT* stmt) {
|
int taos_stmt_add_batch(TAOS_STMT* stmt) {
|
||||||
STscStmt* pStmt = (STscStmt*)stmt;
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
}
|
||||||
|
|
||||||
if (pStmt->isInsert) {
|
if (pStmt->isInsert) {
|
||||||
|
if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) {
|
||||||
|
tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_ADD_BATCH;
|
||||||
|
|
||||||
return insertStmtAddBatch(pStmt);
|
return insertStmtAddBatch(pStmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -978,8 +1532,24 @@ int taos_stmt_reset(TAOS_STMT* stmt) {
|
||||||
int taos_stmt_execute(TAOS_STMT* stmt) {
|
int taos_stmt_execute(TAOS_STMT* stmt) {
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
STscStmt* pStmt = (STscStmt*)stmt;
|
STscStmt* pStmt = (STscStmt*)stmt;
|
||||||
|
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||||
|
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
}
|
||||||
|
|
||||||
if (pStmt->isInsert) {
|
if (pStmt->isInsert) {
|
||||||
|
if (pStmt->last != STMT_ADD_BATCH) {
|
||||||
|
tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStmt->last = STMT_EXECUTE;
|
||||||
|
|
||||||
|
if (pStmt->multiTbInsert) {
|
||||||
|
ret = insertBatchStmtExecute(pStmt);
|
||||||
|
} else {
|
||||||
ret = insertStmtExecute(pStmt);
|
ret = insertStmtExecute(pStmt);
|
||||||
|
}
|
||||||
} else { // normal stmt query
|
} else { // normal stmt query
|
||||||
char* sql = normalStmtBuildSql(pStmt);
|
char* sql = normalStmtBuildSql(pStmt);
|
||||||
if (sql == NULL) {
|
if (sql == NULL) {
|
||||||
|
|
@ -1074,7 +1644,7 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idx<0 || idx>=pBlock->numOfParams) {
|
if (idx<0 || idx>=pBlock->numOfParams) {
|
||||||
tscError("param %d: out of range", idx);
|
tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx);
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
|
||||||
|
|
||||||
char *sql = malloc(sqlSize);
|
char *sql = malloc(sqlSize);
|
||||||
if (sql == NULL) {
|
if (sql == NULL) {
|
||||||
tscError("%p failed to allocate memory to sent slow query to dnode", pSql);
|
tscError("0x%"PRIx64" failed to allocate memory to sent slow query to dnode", pSql->self);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,20 +21,20 @@
|
||||||
#endif // __APPLE__
|
#endif // __APPLE__
|
||||||
|
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "ttype.h"
|
|
||||||
#include "texpr.h"
|
|
||||||
#include "taos.h"
|
#include "taos.h"
|
||||||
#include "taosmsg.h"
|
#include "taosmsg.h"
|
||||||
#include "tcompare.h"
|
#include "tcompare.h"
|
||||||
|
#include "texpr.h"
|
||||||
#include "tname.h"
|
#include "tname.h"
|
||||||
#include "tscLog.h"
|
#include "tscLog.h"
|
||||||
#include "tscUtil.h"
|
#include "tscUtil.h"
|
||||||
#include "tschemautil.h"
|
#include "tschemautil.h"
|
||||||
#include "tsclient.h"
|
#include "tsclient.h"
|
||||||
#include "tstoken.h"
|
|
||||||
#include "tstrbuild.h"
|
#include "tstrbuild.h"
|
||||||
|
#include "ttoken.h"
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
#include "qScript.h"
|
#include "qScript.h"
|
||||||
|
#include "ttype.h"
|
||||||
#include "qUtil.h"
|
#include "qUtil.h"
|
||||||
|
|
||||||
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
|
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
|
||||||
|
|
@ -65,7 +65,7 @@ static char* getAccountId(SSqlObj* pSql);
|
||||||
|
|
||||||
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
|
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
|
||||||
static char* cloneCurrentDBName(SSqlObj* pSql);
|
static char* cloneCurrentDBName(SSqlObj* pSql);
|
||||||
static bool hasSpecifyDB(SStrToken* pTableName);
|
static int32_t getDelimiterIndex(SStrToken* pTableName);
|
||||||
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
|
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
|
||||||
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
|
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
|
||||||
|
|
||||||
|
|
@ -570,17 +570,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
|
|
||||||
case TSDB_SQL_DESCRIBE_TABLE: {
|
case TSDB_SQL_DESCRIBE_TABLE: {
|
||||||
const char* msg1 = "invalid table name";
|
const char* msg1 = "invalid table name";
|
||||||
const char* msg2 = "table name too long";
|
|
||||||
|
|
||||||
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
||||||
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tscValidateTableNameLength(pToken->n)) {
|
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
|
||||||
}
|
|
||||||
|
|
||||||
// additional msg has been attached already
|
// additional msg has been attached already
|
||||||
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
|
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
|
@ -589,19 +583,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
|
|
||||||
return tscGetTableMeta(pSql, pTableMetaInfo);
|
return tscGetTableMeta(pSql, pTableMetaInfo);
|
||||||
}
|
}
|
||||||
|
case TSDB_SQL_SHOW_CREATE_STABLE:
|
||||||
case TSDB_SQL_SHOW_CREATE_TABLE: {
|
case TSDB_SQL_SHOW_CREATE_TABLE: {
|
||||||
const char* msg1 = "invalid table name";
|
const char* msg1 = "invalid table name";
|
||||||
const char* msg2 = "table name is too long";
|
|
||||||
|
|
||||||
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
||||||
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tscValidateTableNameLength(pToken->n)) {
|
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
|
||||||
}
|
|
||||||
|
|
||||||
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
|
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
|
|
@ -788,18 +778,26 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
// set the command/global limit parameters from the first subclause to the sqlcmd object
|
// set the command/global limit parameters from the first subclause to the sqlcmd object
|
||||||
SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, 0);
|
SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, 0);
|
||||||
pCmd->command = pQueryInfo1->command;
|
pCmd->command = pQueryInfo1->command;
|
||||||
|
int32_t diffSize = 0;
|
||||||
|
|
||||||
// if there is only one element, the limit of clause is the limit of global result.
|
// if there is only one element, the limit of clause is the limit of global result.
|
||||||
// validate the select node for "UNION ALL" subclause
|
// validate the select node for "UNION ALL" subclause
|
||||||
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
|
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
|
||||||
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
|
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
|
||||||
|
|
||||||
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
|
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (diffSize) {
|
||||||
|
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
|
||||||
|
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
|
||||||
|
tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pCmd->parseFinished = 1;
|
pCmd->parseFinished = 1;
|
||||||
return TSDB_CODE_SUCCESS; // do not build query message here
|
return TSDB_CODE_SUCCESS; // do not build query message here
|
||||||
}
|
}
|
||||||
|
|
@ -1126,11 +1124,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
|
||||||
const char* msg1 = "name too long";
|
const char* msg1 = "name too long";
|
||||||
const char* msg2 = "acctId too long";
|
const char* msg2 = "acctId too long";
|
||||||
const char* msg3 = "no acctId";
|
const char* msg3 = "no acctId";
|
||||||
|
const char* msg4 = "db name too long";
|
||||||
|
const char* msg5 = "table name too long";
|
||||||
|
|
||||||
SSqlCmd* pCmd = &pSql->cmd;
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
int32_t idx = getDelimiterIndex(pTableName);
|
||||||
if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
|
if (idx != -1) { // db has been specified in sql string so we ignore current db path
|
||||||
char* acctId = getAccountId(pSql);
|
char* acctId = getAccountId(pSql);
|
||||||
if (acctId == NULL || strlen(acctId) <= 0) {
|
if (acctId == NULL || strlen(acctId) <= 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
|
|
@ -1140,6 +1140,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
if (idx >= TSDB_DB_NAME_LEN) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||||
|
}
|
||||||
|
|
||||||
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
||||||
strncpy(name, pTableName->z, pTableName->n);
|
strncpy(name, pTableName->z, pTableName->n);
|
||||||
|
|
@ -1484,14 +1491,13 @@ static char* cloneCurrentDBName(SSqlObj* pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* length limitation, strstr cannot be applied */
|
/* length limitation, strstr cannot be applied */
|
||||||
static bool hasSpecifyDB(SStrToken* pTableName) {
|
static int32_t getDelimiterIndex(SStrToken* pTableName) {
|
||||||
for (uint32_t i = 0; i < pTableName->n; ++i) {
|
for (uint32_t i = 0; i < pTableName->n; ++i) {
|
||||||
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
|
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
|
||||||
return true;
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return -1;
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
|
int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
|
||||||
|
|
@ -1750,6 +1756,21 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
|
||||||
|
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
|
||||||
|
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||||
|
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
|
||||||
|
|
||||||
|
if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
void genUdfList(SArray* pUdfInfo, tSqlExpr *pNode) {
|
void genUdfList(SArray* pUdfInfo, tSqlExpr *pNode) {
|
||||||
if (pNode == NULL) {
|
if (pNode == NULL) {
|
||||||
return;
|
return;
|
||||||
|
|
@ -1816,6 +1837,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
||||||
assert(pSelNodeList != NULL && pCmd != NULL);
|
assert(pSelNodeList != NULL && pCmd != NULL);
|
||||||
|
|
||||||
const char* msg1 = "too many items in selection clause";
|
const char* msg1 = "too many items in selection clause";
|
||||||
|
|
||||||
const char* msg2 = "functions or others can not be mixed up";
|
const char* msg2 = "functions or others can not be mixed up";
|
||||||
const char* msg3 = "not support query expression";
|
const char* msg3 = "not support query expression";
|
||||||
const char* msg4 = "only support distinct one tag";
|
const char* msg4 = "only support distinct one tag";
|
||||||
|
|
@ -1885,7 +1907,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
||||||
|
|
||||||
// there is only one user-defined column in the final result field, add the timestamp column.
|
// there is only one user-defined column in the final result field, add the timestamp column.
|
||||||
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
|
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
|
||||||
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
|
if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
|
||||||
addPrimaryTsColIntoResult(pQueryInfo);
|
addPrimaryTsColIntoResult(pQueryInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -4914,7 +4936,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID};
|
SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID};
|
||||||
int32_t len = tSQLGetToken(pRight->value.pz, &token.type);
|
int32_t len = tGetToken(pRight->value.pz, &token.type);
|
||||||
|
|
||||||
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) {
|
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
|
@ -5514,7 +5536,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
|
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||||
tscError("%p failed to malloc for alter table msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -5812,13 +5834,13 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t validateColumnName(char* name) {
|
int32_t validateColumnName(char* name) {
|
||||||
bool ret = isKeyWord(name, (int32_t)strlen(name));
|
bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStrToken token = {.z = name};
|
SStrToken token = {.z = name};
|
||||||
token.n = tSQLGetToken(name, &token.type);
|
token.n = tGetToken(name, &token.type);
|
||||||
|
|
||||||
if (token.type != TK_STRING && token.type != TK_ID) {
|
if (token.type != TK_STRING && token.type != TK_ID) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
|
@ -5829,7 +5851,7 @@ int32_t validateColumnName(char* name) {
|
||||||
strntolower(token.z, token.z, token.n);
|
strntolower(token.z, token.z, token.n);
|
||||||
token.n = (uint32_t)strtrim(token.z);
|
token.n = (uint32_t)strtrim(token.z);
|
||||||
|
|
||||||
int32_t k = tSQLGetToken(token.z, &token.type);
|
int32_t k = tGetToken(token.z, &token.type);
|
||||||
if (k != token.n) {
|
if (k != token.n) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
@ -7843,4 +7865,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -222,7 +222,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
|
||||||
assert(online <= total);
|
assert(online <= total);
|
||||||
|
|
||||||
if (online < total) {
|
if (online < total) {
|
||||||
tscError("HB:%p, total dnode:%d, online dnode:%d", pSql, total, online);
|
tscError("0x%"PRIx64", HB, total dnode:%d, online dnode:%d", pSql->self, total, online);
|
||||||
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -274,7 +274,7 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
|
||||||
taosReleaseRef(tscObjRef, pObj->hbrid);
|
taosReleaseRef(tscObjRef, pObj->hbrid);
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code));
|
tscError("0x%"PRIx64" failed to sent HB to server, reason:%s", pHB->self, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
taosReleaseRef(tscRefId, rid);
|
taosReleaseRef(tscRefId, rid);
|
||||||
|
|
@ -286,7 +286,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
||||||
|
|
||||||
char *pMsg = rpcMallocCont(pCmd->payloadLen);
|
char *pMsg = rpcMallocCont(pCmd->payloadLen);
|
||||||
if (NULL == pMsg) {
|
if (NULL == pMsg) {
|
||||||
tscError("%p msg:%s malloc failed", pSql, taosMsg[pSql->cmd.msgType]);
|
tscError("0x%"PRIx64" msg:%s malloc failed", pSql->self, taosMsg[pSql->cmd.msgType]);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -370,11 +370,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
|
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
|
||||||
|
|
||||||
pSql->retry++;
|
pSql->retry++;
|
||||||
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), pSql->retry);
|
tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
|
||||||
|
|
||||||
pSql->res.code = rpcMsg->code; // keep the previous error code
|
pSql->res.code = rpcMsg->code; // keep the previous error code
|
||||||
if (pSql->retry > pSql->maxRetry) {
|
if (pSql->retry > pSql->maxRetry) {
|
||||||
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
|
tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry);
|
||||||
} else {
|
} else {
|
||||||
// wait for a little bit moment and then retry
|
// wait for a little bit moment and then retry
|
||||||
// todo do not sleep in rpc callback thread, add this process into queueu to process
|
// todo do not sleep in rpc callback thread, add this process into queueu to process
|
||||||
|
|
@ -667,7 +667,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
|
||||||
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
|
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
|
||||||
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
|
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
|
||||||
} else {
|
} else {
|
||||||
tscError("%p No vgroup info found", pSql);
|
tscError("0x%"PRIx64" No vgroup info found", pSql->self);
|
||||||
|
|
||||||
*succeed = 0;
|
*succeed = 0;
|
||||||
return pMsg;
|
return pMsg;
|
||||||
|
|
@ -762,21 +762,20 @@ static int32_t serializeColFilterInfo(SColumnFilterInfo* pColFilters, int16_t nu
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, char** pMsg, void* addr) {
|
static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, char** pMsg, int64_t id, bool validateColumn) {
|
||||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
|
||||||
// the queried table has been removed and a new table with the same name has already been created already
|
// the queried table has been removed and a new table with the same name has already been created already
|
||||||
// return error msg
|
// return error msg
|
||||||
if (pExpr->uid != pTableMeta->id.uid) {
|
if (pExpr->uid != pTableMeta->id.uid) {
|
||||||
tscError("%p table has already been destroyed", addr);
|
tscError("0x%"PRIx64" table has already been destroyed", id);
|
||||||
return TSDB_CODE_TSC_INVALID_TABLE_NAME;
|
return TSDB_CODE_TSC_INVALID_TABLE_NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO disable it temporarily
|
if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
|
||||||
// if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
|
tscError("0x%"PRIx64" table schema is not matched with parsed sql", id);
|
||||||
// tscError("%p table schema is not matched with parsed sql", addr);
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
// return TSDB_CODE_TSC_INVALID_SQL;
|
}
|
||||||
// }
|
|
||||||
|
|
||||||
assert(pExpr->resColId < 0);
|
assert(pExpr->resColId < 0);
|
||||||
SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg);
|
SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg);
|
||||||
|
|
@ -909,14 +908,14 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < query.numOfOutput; ++i) {
|
for (int32_t i = 0; i < query.numOfOutput; ++i) {
|
||||||
code = serializeSqlExpr(&query.pExpr1[i].base, pTableMetaInfo, &pMsg, pSql);
|
code = serializeSqlExpr(&query.pExpr1[i].base, pTableMetaInfo, &pMsg, pSql->self, true);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < query.numOfExpr2; ++i) {
|
for (int32_t i = 0; i < query.numOfExpr2; ++i) {
|
||||||
code = serializeSqlExpr(&query.pExpr2[i].base, pTableMetaInfo, &pMsg, pSql);
|
code = serializeSqlExpr(&query.pExpr2[i].base, pTableMetaInfo, &pMsg, pSql->self, false);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
|
|
@ -1104,7 +1103,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
pCmd->payloadLen = sizeof(SCreateDnodeMsg);
|
pCmd->payloadLen = sizeof(SCreateDnodeMsg);
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1122,7 +1121,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
pCmd->payloadLen = sizeof(SCreateAcctMsg);
|
pCmd->payloadLen = sizeof(SCreateAcctMsg);
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1168,7 +1167,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SCreateUserMsg);
|
pCmd->payloadLen = sizeof(SCreateUserMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1207,7 +1206,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SDropDbMsg);
|
pCmd->payloadLen = sizeof(SDropDbMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1240,7 +1239,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SCMDropTableMsg);
|
pCmd->payloadLen = sizeof(SCMDropTableMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1261,7 +1260,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
|
|
||||||
pCmd->payloadLen = sizeof(SDropDnodeMsg);
|
pCmd->payloadLen = sizeof(SDropDnodeMsg);
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1282,7 +1281,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
|
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1297,7 +1296,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SUseDbMsg);
|
pCmd->payloadLen = sizeof(SUseDbMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1314,7 +1313,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SSyncDbMsg);
|
pCmd->payloadLen = sizeof(SSyncDbMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1333,7 +1332,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SShowMsg) + 100;
|
pCmd->payloadLen = sizeof(SShowMsg) + 100;
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1427,7 +1426,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
// Reallocate the payload size
|
// Reallocate the payload size
|
||||||
size = tscEstimateCreateTableMsgLength(pSql, pInfo);
|
size = tscEstimateCreateTableMsgLength(pSql, pInfo);
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||||
tscError("%p failed to malloc for create table msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for create table msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1526,7 +1525,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
|
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
|
||||||
int size = tscEstimateAlterTableMsgLength(pCmd);
|
int size = tscEstimateAlterTableMsgLength(pCmd);
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||||
tscError("%p failed to malloc for alter table msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1600,7 +1599,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SRetrieveTableMsg);
|
pCmd->payloadLen = sizeof(SRetrieveTableMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1722,7 +1721,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pCmd->payloadLen = sizeof(SConnectMsg);
|
pCmd->payloadLen = sizeof(SConnectMsg);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
tscError("%p failed to malloc for query msg", pSql);
|
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1885,7 +1884,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100;
|
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100;
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||||
pthread_mutex_unlock(&pObj->mutex);
|
pthread_mutex_unlock(&pObj->mutex);
|
||||||
tscError("%p failed to create heartbeat msg", pSql);
|
tscError("0x%"PRIx64" failed to create heartbeat msg", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1961,10 +1960,12 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
||||||
|
|
||||||
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
|
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
|
||||||
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
|
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
|
||||||
tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
|
tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE || pTableMeta->tableType == TSDB_NORMAL_TABLE || pTableMeta->tableType == TSDB_STREAM_TABLE);
|
||||||
|
|
||||||
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
|
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
|
||||||
// check if super table hashmap or not
|
// check if super table hashmap or not
|
||||||
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
|
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
|
||||||
|
|
@ -2205,8 +2206,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
|
||||||
|
|
||||||
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
|
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
|
||||||
if (pInfo->vgroupList->numOfVgroups <= 0) {
|
if (pInfo->vgroupList->numOfVgroups <= 0) {
|
||||||
//tfree(pInfo->vgroupList);
|
tscDebug("0x%"PRIx64" empty vgroup info, no corresponding tables for stable", pSql->self);
|
||||||
tscError("%p empty vgroup info", pSql);
|
|
||||||
} else {
|
} else {
|
||||||
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
|
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
|
||||||
// just init, no need to lock
|
// just init, no need to lock
|
||||||
|
|
@ -2522,7 +2522,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
|
||||||
static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
||||||
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
|
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
|
||||||
if (NULL == pNew) {
|
if (NULL == pNew) {
|
||||||
tscError("%p malloc failed for new sqlobj to get table meta", pSql);
|
tscError("0x%"PRIx64" malloc failed for new sqlobj to get table meta", pSql->self);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2536,7 +2536,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
|
||||||
|
|
||||||
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
|
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
|
||||||
tscError("%p malloc failed for payload to get table meta", pSql);
|
tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
|
||||||
tscFreeSqlObj(pNew);
|
tscFreeSqlObj(pNew);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
@ -2549,7 +2549,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
|
||||||
if (pSql->cmd.autoCreated) {
|
if (pSql->cmd.autoCreated) {
|
||||||
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
|
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p malloc failed for new tag data to get table meta", pSql);
|
tscError("0x%"PRIx64" malloc failed for new tag data to get table meta", pSql->self);
|
||||||
tscFreeSqlObj(pNew);
|
tscFreeSqlObj(pNew);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
@ -2577,10 +2577,23 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
|
||||||
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
||||||
assert(tIsValidName(&pTableMetaInfo->name));
|
assert(tIsValidName(&pTableMetaInfo->name));
|
||||||
|
|
||||||
tfree(pTableMetaInfo->pTableMeta);
|
|
||||||
|
|
||||||
uint32_t size = tscGetTableMetaMaxSize();
|
uint32_t size = tscGetTableMetaMaxSize();
|
||||||
|
if (pTableMetaInfo->pTableMeta == NULL) {
|
||||||
pTableMetaInfo->pTableMeta = calloc(1, size);
|
pTableMetaInfo->pTableMeta = calloc(1, size);
|
||||||
|
pTableMetaInfo->tableMetaSize = size;
|
||||||
|
} else if (pTableMetaInfo->tableMetaSize < size) {
|
||||||
|
char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
|
||||||
|
if (tmp == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
|
||||||
|
memset(pTableMetaInfo->pTableMeta, 0, size);
|
||||||
|
pTableMetaInfo->tableMetaSize = size;
|
||||||
|
} else {
|
||||||
|
//uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
|
||||||
|
memset(pTableMetaInfo->pTableMeta, 0, size);
|
||||||
|
pTableMetaInfo->tableMetaSize = size;
|
||||||
|
}
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta->tableType = -1;
|
pTableMetaInfo->pTableMeta->tableType = -1;
|
||||||
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
|
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
|
||||||
|
|
@ -2592,10 +2605,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
||||||
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
|
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
|
||||||
|
|
||||||
// TODO resize the tableMeta
|
// TODO resize the tableMeta
|
||||||
|
char buf[80*1024] = {0};
|
||||||
|
assert(size < 80*1024);
|
||||||
|
|
||||||
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
|
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
|
||||||
if (pMeta->id.uid > 0) {
|
if (pMeta->id.uid > 0) {
|
||||||
if (pMeta->tableType == TSDB_CHILD_TABLE) {
|
if (pMeta->tableType == TSDB_CHILD_TABLE) {
|
||||||
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
|
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return getTableMetaFromMnode(pSql, pTableMetaInfo);
|
return getTableMetaFromMnode(pSql, pTableMetaInfo);
|
||||||
}
|
}
|
||||||
|
|
@ -2681,7 +2697,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
|
||||||
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
||||||
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
|
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p failed to generate the table full name", pSql);
|
tscError("0x%"PRIx64" failed to generate the table full name", pSql->self);
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2835,6 +2851,7 @@ void tscInitMsgsFp() {
|
||||||
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
|
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
|
||||||
|
|
||||||
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
|
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
|
||||||
|
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;
|
||||||
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
|
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
|
||||||
|
|
||||||
tscKeepConn[TSDB_SQL_SHOW] = 1;
|
tscKeepConn[TSDB_SQL_SHOW] = 1;
|
||||||
|
|
|
||||||
|
|
@ -457,6 +457,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
|
||||||
pCmd->command == TSDB_SQL_FETCH ||
|
pCmd->command == TSDB_SQL_FETCH ||
|
||||||
pCmd->command == TSDB_SQL_SHOW ||
|
pCmd->command == TSDB_SQL_SHOW ||
|
||||||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
|
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
|
||||||
|
pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
|
||||||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
|
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
|
||||||
pCmd->command == TSDB_SQL_SELECT ||
|
pCmd->command == TSDB_SQL_SELECT ||
|
||||||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
|
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
|
||||||
|
|
@ -588,7 +589,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
|
||||||
void taos_free_result(TAOS_RES *res) {
|
void taos_free_result(TAOS_RES *res) {
|
||||||
SSqlObj* pSql = (SSqlObj*) res;
|
SSqlObj* pSql = (SSqlObj*) res;
|
||||||
if (pSql == NULL || pSql->signature != pSql) {
|
if (pSql == NULL || pSql->signature != pSql) {
|
||||||
tscError("%p already released sqlObj", res);
|
tscError("0x%"PRIx64" already released sqlObj", pSql ? pSql->self : -1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -881,15 +882,14 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
||||||
|
|
||||||
int32_t sqlLen = (int32_t)strlen(sql);
|
int32_t sqlLen = (int32_t)strlen(sql);
|
||||||
if (sqlLen > tsMaxSQLStringLen) {
|
if (sqlLen > tsMaxSQLStringLen) {
|
||||||
tscError("%p sql too long", pSql);
|
tscError("0x%"PRIx64" sql too long", pSql->self);
|
||||||
tfree(pSql);
|
tfree(pSql);
|
||||||
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||||
if (pSql->sqlstr == NULL) {
|
if (pSql->sqlstr == NULL) {
|
||||||
tscError("%p failed to malloc sql string buffer", pSql);
|
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||||
tscDebug("0x%"PRIx64" Valid SQL result:%d, %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj);
|
|
||||||
tfree(pSql);
|
tfree(pSql);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
@ -914,7 +914,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscDebug("0x%"PRIx64" Valid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
|
tscError("0x%"PRIx64" invalid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
taos_free_result(pSql);
|
taos_free_result(pSql);
|
||||||
|
|
@ -963,7 +963,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
|
||||||
len = (int32_t)strtrim(tblName);
|
len = (int32_t)strtrim(tblName);
|
||||||
|
|
||||||
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
|
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
|
||||||
tSQLGetToken(tblName, &sToken.type);
|
tGetToken(tblName, &sToken.type);
|
||||||
|
|
||||||
// Check if the table name available or not
|
// Check if the table name available or not
|
||||||
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
|
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
|
||||||
|
|
@ -1031,14 +1031,14 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
||||||
|
|
||||||
int32_t tblListLen = (int32_t)strlen(tableNameList);
|
int32_t tblListLen = (int32_t)strlen(tableNameList);
|
||||||
if (tblListLen > MAX_TABLE_NAME_LENGTH) {
|
if (tblListLen > MAX_TABLE_NAME_LENGTH) {
|
||||||
tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH);
|
tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH);
|
||||||
tscFreeSqlObj(pSql);
|
tscFreeSqlObj(pSql);
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *str = calloc(1, tblListLen + 1);
|
char *str = calloc(1, tblListLen + 1);
|
||||||
if (str == NULL) {
|
if (str == NULL) {
|
||||||
tscError("%p failed to malloc sql string buffer", pSql);
|
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||||
tscFreeSqlObj(pSql);
|
tscFreeSqlObj(pSql);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->pVgroupTables == NULL) && (pTableMetaInfo->vgroupList == NULL || pTableMetaInfo->vgroupList->numOfVgroups <= 0)) {
|
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->pVgroupTables == NULL) && (pTableMetaInfo->vgroupList == NULL || pTableMetaInfo->vgroupList->numOfVgroups <= 0)) {
|
||||||
tscDebug("%p empty vgroup list", pSql);
|
tscDebug("0x%"PRIx64" empty vgroup list", pSql->self);
|
||||||
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
||||||
code = TSDB_CODE_TSC_APP_ERROR;
|
code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
}
|
}
|
||||||
|
|
@ -110,10 +110,9 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
|
||||||
// failed to get table Meta or vgroup list, retry in 10sec.
|
// failed to get table Meta or vgroup list, retry in 10sec.
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||||
tscDebug("0x%"PRIx64" stream:%p, start stream query on:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name));
|
tscDebug("0x%"PRIx64" stream:%p started to query table:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name));
|
||||||
|
|
||||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||||
pSql->cmd.active = pQueryInfo;
|
|
||||||
|
|
||||||
pSql->fp = tscProcessStreamQueryCallback;
|
pSql->fp = tscProcessStreamQueryCallback;
|
||||||
pSql->fetchFp = tscProcessStreamQueryCallback;
|
pSql->fetchFp = tscProcessStreamQueryCallback;
|
||||||
|
|
@ -140,7 +139,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
||||||
pStream->numOfRes = 0; // reset the numOfRes.
|
pStream->numOfRes = 0; // reset the numOfRes.
|
||||||
SSqlObj *pSql = pStream->pSql;
|
SSqlObj *pSql = pStream->pSql;
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||||
tscDebug("0x%"PRIx64" add into timer", pSql->self);
|
tscDebug("0x%"PRIx64" timer launch query", pSql->self);
|
||||||
|
|
||||||
if (pStream->isProject) {
|
if (pStream->isProject) {
|
||||||
/*
|
/*
|
||||||
|
|
@ -195,8 +194,8 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
||||||
SSqlStream *pStream = (SSqlStream *)param;
|
SSqlStream *pStream = (SSqlStream *)param;
|
||||||
if (tres == NULL || numOfRows < 0) {
|
if (tres == NULL || numOfRows < 0) {
|
||||||
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||||
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
|
||||||
retryDelay);
|
pStream, numOfRows, retryDelay);
|
||||||
|
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
|
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
|
||||||
|
|
||||||
|
|
@ -204,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
||||||
tNameExtractFullName(&pTableMetaInfo->name, name);
|
tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||||
|
|
||||||
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||||
|
|
||||||
|
tfree(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
|
tscFreeSqlResult(pStream->pSql);
|
||||||
|
tscFreeSubobj(pStream->pSql);
|
||||||
|
tfree(pStream->pSql->pSubs);
|
||||||
|
pStream->pSql->subState.numOfSub = 0;
|
||||||
|
|
||||||
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
||||||
|
|
||||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
||||||
|
|
@ -260,13 +267,14 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
||||||
|
|
||||||
if (pSql == NULL || numOfRows < 0) {
|
if (pSql == NULL || numOfRows < 0) {
|
||||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||||
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
tscError("0x%"PRIx64" stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pSql->self, pStream, numOfRows, retryDelayTime);
|
||||||
|
|
||||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||||
|
STableMetaInfo *pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
|
||||||
|
|
||||||
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
|
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
|
||||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||||
|
|
@ -293,7 +301,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
||||||
/* no resuls in the query range, retry */
|
/* no resuls in the query range, retry */
|
||||||
// todo set retry dynamic time
|
// todo set retry dynamic time
|
||||||
int32_t retry = tsProjectExecInterval;
|
int32_t retry = tsProjectExecInterval;
|
||||||
tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry);
|
tscError("0x%"PRIx64" stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql->self, pStream, numOfRows, retry);
|
||||||
|
|
||||||
tscSetRetryTimer(pStream, pStream->pSql, retry);
|
tscSetRetryTimer(pStream, pStream->pSql, retry);
|
||||||
return;
|
return;
|
||||||
|
|
@ -306,6 +314,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
||||||
pStream->numOfRes);
|
pStream->numOfRes);
|
||||||
|
|
||||||
tfree(pTableMetaInfo->pTableMeta);
|
tfree(pTableMetaInfo->pTableMeta);
|
||||||
|
if (pQueryInfo->pQInfo != NULL) {
|
||||||
|
qDestroyQueryInfo(pQueryInfo->pQInfo);
|
||||||
|
pQueryInfo->pQInfo = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
tscFreeSqlResult(pSql);
|
tscFreeSqlResult(pSql);
|
||||||
tscFreeSubobj(pSql);
|
tscFreeSubobj(pSql);
|
||||||
|
|
@ -338,10 +350,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||||
now + timer, timer, delay, pStream->stime, etime);
|
now + timer, timer, delay, pStream->stime, etime);
|
||||||
} else {
|
} else {
|
||||||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||||
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
|
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -399,7 +411,6 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
|
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
|
||||||
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
|
|
||||||
if (stime >= pStream->etime) {
|
if (stime >= pStream->etime) {
|
||||||
tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql->self, pStream,
|
tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql->self, pStream,
|
||||||
pStream->stime, pStream->etime);
|
pStream->stime, pStream->etime);
|
||||||
|
|
@ -441,7 +452,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
|
||||||
tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
tscWarn("0x%"PRIx64" stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
||||||
(int64_t)pQueryInfo->interval.interval, minIntervalTime);
|
(int64_t)pQueryInfo->interval.interval, minIntervalTime);
|
||||||
pQueryInfo->interval.interval = minIntervalTime;
|
pQueryInfo->interval.interval = minIntervalTime;
|
||||||
}
|
}
|
||||||
|
|
@ -458,14 +469,14 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
||||||
|
|
||||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
||||||
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
||||||
pQueryInfo->interval.sliding, minSlidingTime);
|
pQueryInfo->interval.sliding, minSlidingTime);
|
||||||
|
|
||||||
pQueryInfo->interval.sliding = minSlidingTime;
|
pQueryInfo->interval.sliding = minSlidingTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
|
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
|
||||||
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
|
tscWarn("0x%"PRIx64" stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql->self, pStream,
|
||||||
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
|
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
|
||||||
|
|
||||||
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
|
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
|
||||||
|
|
@ -508,7 +519,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
|
||||||
} else {
|
} else {
|
||||||
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
|
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
|
||||||
if (newStime != stime) {
|
if (newStime != stime) {
|
||||||
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
|
tscWarn("0x%"PRIx64" stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql->self, pStream, stime, newStime);
|
||||||
stime = newStime;
|
stime = newStime;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -539,7 +550,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
pSql->res.code = code;
|
pSql->res.code = code;
|
||||||
tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code));
|
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
|
||||||
|
|
||||||
pStream->fp(pStream->param, NULL, NULL);
|
pStream->fp(pStream->param, NULL, NULL);
|
||||||
return;
|
return;
|
||||||
|
|
@ -558,7 +569,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
||||||
if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) {
|
if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) {
|
||||||
pSql->res.code = code;
|
pSql->res.code = code;
|
||||||
|
|
||||||
tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream);
|
tscError("0x%"PRIx64" stream %p open failed, since the interval value is incorrect", pSql->self, pStream);
|
||||||
pStream->fp(pStream->param, NULL, NULL);
|
pStream->fp(pStream->param, NULL, NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -598,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
||||||
|
|
||||||
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
|
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
|
||||||
if (pStream == NULL) {
|
if (pStream == NULL) {
|
||||||
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code);
|
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:0x%08x", pSql->self, sqlstr, pCmd->payload, pRes->code);
|
||||||
tscFreeSqlObj(pSql);
|
tscFreeSqlObj(pSql);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
@ -614,26 +625,26 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
||||||
|
|
||||||
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
|
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
|
||||||
if (pSql->sqlstr == NULL) {
|
if (pSql->sqlstr == NULL) {
|
||||||
tscError("%p failed to malloc sql string buffer", pSql);
|
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||||
tscFreeSqlObj(pSql);
|
tscFreeSqlObj(pSql);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
strtolower(pSql->sqlstr, sqlstr);
|
strtolower(pSql->sqlstr, sqlstr);
|
||||||
|
|
||||||
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
|
registerSqlObj(pSql);
|
||||||
|
|
||||||
|
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||||
tsem_init(&pSql->rspSem, 0, 0);
|
tsem_init(&pSql->rspSem, 0, 0);
|
||||||
|
|
||||||
pSql->fp = tscCreateStream;
|
pSql->fp = tscCreateStream;
|
||||||
pSql->fetchFp = tscCreateStream;
|
pSql->fetchFp = tscCreateStream;
|
||||||
|
|
||||||
registerSqlObj(pSql);
|
|
||||||
|
|
||||||
int32_t code = tsParseSql(pSql, true);
|
int32_t code = tsParseSql(pSql, true);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
tscCreateStream(pStream, pSql, code);
|
tscCreateStream(pStream, pSql, code);
|
||||||
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(code));
|
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
|
||||||
taosReleaseRef(tscObjRef, pSql->self);
|
taosReleaseRef(tscObjRef, pSql->self);
|
||||||
free(pStream);
|
free(pStream);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
||||||
|
|
@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
|
||||||
|
|
||||||
SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
|
SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
tscError("failed to retrieve table id: cannot create new sql object.");
|
tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
|
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
|
||||||
tscError("failed to retrieve table id: %s", tstrerror(taos_errno(pNew)));
|
tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, i
|
||||||
|
|
||||||
pthread_mutex_lock(&subState->mutex);
|
pthread_mutex_lock(&subState->mutex);
|
||||||
|
|
||||||
tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
|
tscDebug("subquery:0x%"PRIx64",%d state set to %d", pSql->self, idx, state);
|
||||||
|
|
||||||
subState->states[idx] = state;
|
subState->states[idx] = state;
|
||||||
|
|
||||||
|
|
@ -85,12 +85,18 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
|
||||||
//lock in caller
|
//lock in caller
|
||||||
tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub);
|
tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub);
|
||||||
for (int i = 0; i < subState->numOfSub; i++) {
|
for (int i = 0; i < subState->numOfSub; i++) {
|
||||||
|
SSqlObj* pSub = pParentSql->pSubs[i];
|
||||||
if (0 == subState->states[i]) {
|
if (0 == subState->states[i]) {
|
||||||
tscDebug("0x%"PRIx64" subquery:%p, index: %d NOT finished, abort query completion check", pParentSql->self, pParentSql->pSubs[i], i);
|
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished, abort query completion check", pParentSql->self,
|
||||||
|
pSub->self, i);
|
||||||
done = false;
|
done = false;
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pParentSql->pSubs[i], i);
|
if (pSub != NULL) {
|
||||||
|
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i);
|
||||||
|
} else {
|
||||||
|
tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -107,14 +113,15 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
|
||||||
bool done = allSubqueryDone(pParentSql);
|
bool done = allSubqueryDone(pParentSql);
|
||||||
|
|
||||||
if (done) {
|
if (done) {
|
||||||
tscDebug("0x%"PRIx64" subquery:%p,%d all subs already done", pParentSql->self, pSql, idx);
|
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self,
|
||||||
|
pSql->self, idx);
|
||||||
|
|
||||||
pthread_mutex_unlock(&subState->mutex);
|
pthread_mutex_unlock(&subState->mutex);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" subquery:%p,%d state set to 1", pParentSql->self, pSql, idx);
|
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d state set to 1", pParentSql->self, pSql->self, idx);
|
||||||
|
|
||||||
subState->states[idx] = 1;
|
subState->states[idx] = 1;
|
||||||
|
|
||||||
|
|
@ -171,7 +178,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" sub:%p table idx:%d, input group number:%d", pSql->self, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" table idx:%d, input group number:%d", pSql->self,
|
||||||
|
pSql->pSubs[i]->self, i, pSupporter->pTSBuf->numOfGroups);
|
||||||
|
|
||||||
ctxlist[i].p = pSupporter;
|
ctxlist[i].p = pSupporter;
|
||||||
ctxlist[i].res = output;
|
ctxlist[i].res = output;
|
||||||
|
|
@ -377,9 +385,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
|
||||||
TSKEY et = taosGetTimestampUs();
|
TSKEY et = taosGetTimestampUs();
|
||||||
|
|
||||||
for (int32_t i = 0; i < joinNum; ++i) {
|
for (int32_t i = 0; i < joinNum; ++i) {
|
||||||
tscDebug("0x%"PRIx64" sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
|
||||||
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
|
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
|
||||||
pSql->self, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
|
pSql->self, pSql->pSubs[i]->self, i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
|
||||||
tsBufGetNumOfGroup(ctxlist[i].res), et - st);
|
tsBufGetNumOfGroup(ctxlist[i].res), et - st);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -630,7 +638,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
||||||
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
|
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
|
||||||
|
|
||||||
// set the tag column id for executor to extract correct tag value
|
// set the tag column id for executor to extract correct tag value
|
||||||
|
#ifndef _TD_NINGSI_60
|
||||||
pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
|
pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
|
||||||
|
#else
|
||||||
|
pExpr->base.param[0].i64 = colId;
|
||||||
|
pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
|
||||||
|
pExpr->base.param[0].nLen = sizeof(int64_t);
|
||||||
|
#endif
|
||||||
pExpr->base.numOfParams = 1;
|
pExpr->base.numOfParams = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -656,7 +670,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
||||||
//prepare the subqueries object failed, abort
|
//prepare the subqueries object failed, abort
|
||||||
if (!success) {
|
if (!success) {
|
||||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql,
|
tscError("0x%"PRIx64" failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql->self,
|
||||||
pSql->subState.numOfSub, pSql->res.code);
|
pSql->subState.numOfSub, pSql->res.code);
|
||||||
freeJoinSubqueryObj(pSql);
|
freeJoinSubqueryObj(pSql);
|
||||||
|
|
||||||
|
|
@ -701,7 +715,7 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
|
||||||
|
|
||||||
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
|
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
|
||||||
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
|
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
|
||||||
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
|
tscError("0x%"PRIx64" all subquery return and query failed, global code:%s", pSqlObj->self, tstrerror(pSqlObj->res.code));
|
||||||
freeJoinSubqueryObj(pSqlObj);
|
freeJoinSubqueryObj(pSqlObj);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -785,7 +799,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr
|
||||||
STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN};
|
STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN};
|
||||||
taosArrayPush(vgTables, &item);
|
taosArrayPush(vgTables, &item);
|
||||||
|
|
||||||
tscTrace("%p tid:%d, uid:%"PRIu64",vgId:%d added", pSql, tt->tid, tt->uid, tt->vgId);
|
tscTrace("0x%"PRIx64" tid:%d, uid:%"PRIu64",vgId:%d added", pSql->self, tt->tid, tt->uid, tt->vgId);
|
||||||
prev = tt;
|
prev = tt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -851,9 +865,9 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
|
||||||
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
|
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
|
||||||
|
|
||||||
tscDebug(
|
tscDebug(
|
||||||
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
|
"0x%"PRIx64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
|
||||||
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
|
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
|
||||||
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
|
pParent->self, pSql->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
|
||||||
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
|
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
|
||||||
|
|
||||||
tscBuildAndSendRequest(pSql, NULL);
|
tscBuildAndSendRequest(pSql, NULL);
|
||||||
|
|
@ -866,7 +880,7 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
|
||||||
assert(prev->vgId >= 1 && p->vgId >= 1);
|
assert(prev->vgId >= 1 && p->vgId >= 1);
|
||||||
|
|
||||||
if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) {
|
if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) {
|
||||||
tscError("%p join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj);
|
tscError("0x%"PRIx64" join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj->self);
|
||||||
pPSqlObj->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY;
|
pPSqlObj->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
@ -1102,7 +1116,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
|
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
|
||||||
|
|
||||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
|
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -1117,7 +1131,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
// todo retry if other subqueries are not failed
|
// todo retry if other subqueries are not failed
|
||||||
|
|
||||||
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
|
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
|
||||||
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
|
tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
|
||||||
|
|
||||||
pParentSql->res.code = numOfRows;
|
pParentSql->res.code = numOfRows;
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
||||||
|
|
@ -1136,7 +1150,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
// todo handle memory error
|
// todo handle memory error
|
||||||
char* tmp = realloc(pSupporter->pIdTagList, length);
|
char* tmp = realloc(pSupporter->pIdTagList, length);
|
||||||
if (tmp == NULL) {
|
if (tmp == NULL) {
|
||||||
tscError("%p failed to malloc memory", pSql);
|
tscError("0x%"PRIx64" failed to malloc memory", pSql->self);
|
||||||
|
|
||||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
||||||
|
|
@ -1256,7 +1270,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
|
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
|
||||||
|
|
||||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
|
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -1270,7 +1284,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||||
// todo retry if other subqueries are not failed yet
|
// todo retry if other subqueries are not failed yet
|
||||||
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
|
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
|
||||||
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
|
tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
|
||||||
|
|
||||||
pParentSql->res.code = numOfRows;
|
pParentSql->res.code = numOfRows;
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
||||||
|
|
@ -1286,7 +1300,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
pSupporter->f = fopen(pSupporter->path, "wb");
|
pSupporter->f = fopen(pSupporter->path, "wb");
|
||||||
|
|
||||||
if (pSupporter->f == NULL) {
|
if (pSupporter->f == NULL) {
|
||||||
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
|
tscError("0x%"PRIx64" failed to create tmp file:%s, reason:%s", pSql->self, pSupporter->path, strerror(errno));
|
||||||
|
|
||||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||||
|
|
||||||
|
|
@ -1306,7 +1320,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
||||||
|
|
||||||
STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true);
|
STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true);
|
||||||
if (pBuf == NULL) { // in error process, close the fd
|
if (pBuf == NULL) { // in error process, close the fd
|
||||||
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
|
tscError("0x%"PRIx64" invalid ts comp file from vnode, abort subquery, file size:%d", pSql->self, numOfRows);
|
||||||
|
|
||||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
||||||
|
|
@ -1403,7 +1417,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||||
|
|
||||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
|
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -1418,7 +1432,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
||||||
assert(numOfRows == taos_errno(pSql));
|
assert(numOfRows == taos_errno(pSql));
|
||||||
|
|
||||||
pParentSql->res.code = numOfRows;
|
pParentSql->res.code = numOfRows;
|
||||||
tscError("%p retrieve failed, index:%d, code:%s", pSql, pSupporter->subqueryIndex, tstrerror(numOfRows));
|
tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
|
||||||
|
|
||||||
tscAsyncResultOnError(pParentSql);
|
tscAsyncResultOnError(pParentSql);
|
||||||
return;
|
return;
|
||||||
|
|
@ -1454,7 +1468,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
|
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
|
||||||
tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pState->numOfSub);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1475,16 +1489,16 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
||||||
SSqlRes* pRes1 = &pParentSql->pSubs[i]->res;
|
SSqlRes* pRes1 = &pParentSql->pSubs[i]->res;
|
||||||
|
|
||||||
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
|
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
|
||||||
tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i], i,
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
|
||||||
pRes1->numOfRows, pRes1->numOfTotal);
|
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
|
||||||
assert(pRes1->row < pRes1->numOfRows);
|
assert(pRes1->row < pRes1->numOfRows);
|
||||||
} else {
|
} else {
|
||||||
if (!pQueryInfo->globalMerge) {
|
if (!pQueryInfo->globalMerge) {
|
||||||
pRes1->numOfClauseTotal += pRes1->numOfRows;
|
pRes1->numOfClauseTotal += pRes1->numOfRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64, pParentSql->self, pParentSql->pSubs[i], i,
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self,
|
||||||
pRes1->numOfRows, pRes1->numOfTotal);
|
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1665,7 +1679,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
|
|
||||||
if (pRes1->row >= pRes1->numOfRows) {
|
if (pRes1->row >= pRes1->numOfRows) {
|
||||||
tscDebug("0x%"PRIx64" subquery:%p retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1,
|
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64" retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1->self,
|
||||||
pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex);
|
pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex);
|
||||||
|
|
||||||
tscResetForNextRetrieve(pRes1);
|
tscResetForNextRetrieve(pRes1);
|
||||||
|
|
@ -1745,7 +1759,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
||||||
|
|
||||||
// retrieve actual query results from vnode during the second stage join subquery
|
// retrieve actual query results from vnode during the second stage join subquery
|
||||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
|
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, code, pParentSql->res.code);
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -1759,7 +1773,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
|
||||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||||
assert(taos_errno(pSql) == code);
|
assert(taos_errno(pSql) == code);
|
||||||
|
|
||||||
tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code));
|
tscError("0x%"PRIx64" abort query, code:%s, global code:%s", pSql->self, tstrerror(code), tstrerror(pParentSql->res.code));
|
||||||
pParentSql->res.code = code;
|
pParentSql->res.code = code;
|
||||||
|
|
||||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
|
||||||
|
|
@ -1990,7 +2004,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
||||||
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
||||||
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i);
|
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i);
|
||||||
if (pSupporter == NULL) { // failed to create support struct, abort current query
|
if (pSupporter == NULL) { // failed to create support struct, abort current query
|
||||||
tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i);
|
tscError("0x%"PRIx64" tableIndex:%d, failed to allocate join support object, abort further query", pSql->self, i);
|
||||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
@ -2388,9 +2402,9 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
|
||||||
tscTansformFuncForSTableQuery(pNewQueryInfo);
|
tscTansformFuncForSTableQuery(pNewQueryInfo);
|
||||||
|
|
||||||
tscDebug(
|
tscDebug(
|
||||||
"%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
|
"0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
|
||||||
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
|
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
|
||||||
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
|
pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
|
||||||
tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
|
tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
|
||||||
|
|
||||||
tscHandleMasterSTableQuery(pNew);
|
tscHandleMasterSTableQuery(pNew);
|
||||||
|
|
@ -2476,7 +2490,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
for (; i < pState->numOfSub; ++i) {
|
for (; i < pState->numOfSub; ++i) {
|
||||||
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
|
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
|
||||||
if (trs == NULL) {
|
if (trs == NULL) {
|
||||||
tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
tscError("0x%"PRIx64" failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2485,7 +2499,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
|
|
||||||
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
|
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
|
||||||
if (trs->localBuffer == NULL) {
|
if (trs->localBuffer == NULL) {
|
||||||
tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
|
||||||
tfree(trs);
|
tfree(trs);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -2497,7 +2511,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
|
|
||||||
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
|
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
|
||||||
tfree(trs->localBuffer);
|
tfree(trs->localBuffer);
|
||||||
tfree(trs);
|
tfree(trs);
|
||||||
break;
|
break;
|
||||||
|
|
@ -2510,11 +2524,12 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
assert(pNewQueryInfo->tsBuf != NULL);
|
assert(pNewQueryInfo->tsBuf != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" sub:%p create subquery success. orderOfSub:%d", pSql->self, pNew, trs->subqueryIndex);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" create subquery success. orderOfSub:%d", pSql->self, pNew->self,
|
||||||
|
trs->subqueryIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < pState->numOfSub) {
|
if (i < pState->numOfSub) {
|
||||||
tscError("%p failed to prepare subquery structure and launch subqueries", pSql);
|
tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
|
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
|
||||||
|
|
@ -2558,7 +2573,7 @@ static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, i
|
||||||
|
|
||||||
static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) {
|
static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) {
|
||||||
// set no disk space error info
|
// set no disk space error info
|
||||||
tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code));
|
tscError("sub:0x%"PRIx64" failed to flush data to disk, reason:%s", ((SSqlObj *)tres)->self, tstrerror(code));
|
||||||
SSqlObj* pParentSql = trsupport->pParentSql;
|
SSqlObj* pParentSql = trsupport->pParentSql;
|
||||||
|
|
||||||
pParentSql->res.code = code;
|
pParentSql->res.code = code;
|
||||||
|
|
@ -2583,7 +2598,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
|
||||||
const uint32_t nBufferSize = (1u << 16u); // 64KB
|
const uint32_t nBufferSize = (1u << 16u); // 64KB
|
||||||
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
|
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
|
||||||
if (trsupport->localBuffer == NULL) {
|
if (trsupport->localBuffer == NULL) {
|
||||||
tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno));
|
tscError("0x%"PRIx64" failed to malloc buffer for local buffer, reason:%s", pSql->self, strerror(errno));
|
||||||
tfree(trsupport);
|
tfree(trsupport);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
@ -2598,13 +2613,13 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
|
||||||
|
|
||||||
// clear local saved number of results
|
// clear local saved number of results
|
||||||
trsupport->localBuffer->num = 0;
|
trsupport->localBuffer->num = 0;
|
||||||
tscError("%p sub:%p retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql->self, pSql->self,
|
||||||
tstrerror(code), subqueryIndex, trsupport->numOfRetry);
|
tstrerror(code), subqueryIndex, trsupport->numOfRetry);
|
||||||
|
|
||||||
SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
|
SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
|
||||||
oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex);
|
oriTrs->pParentSql->self, pSql->self, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex);
|
||||||
|
|
||||||
pParentSql->res.code = terrno;
|
pParentSql->res.code = terrno;
|
||||||
oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||||
|
|
@ -2658,7 +2673,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
||||||
|
|
||||||
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
|
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
|
||||||
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d,orderOfSub:%d", pParentSql->self, pSql->self, numOfRows, subqueryIndex);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d,orderOfSub:%d", pParentSql->self, pSql->self, numOfRows, subqueryIndex);
|
||||||
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql, pSql,
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql->self, pSql->self,
|
||||||
subqueryIndex, tstrerror(pParentSql->res.code));
|
subqueryIndex, tstrerror(pParentSql->res.code));
|
||||||
} else {
|
} else {
|
||||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
|
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
|
||||||
|
|
@ -2670,20 +2685,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
||||||
}
|
}
|
||||||
} else { // reach the maximum retry count, abort
|
} else { // reach the maximum retry count, abort
|
||||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
|
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
|
||||||
tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql, pSql,
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql->self, pSql->self,
|
||||||
tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code));
|
tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
|
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
|
||||||
tscDebug("0x%"PRIx64" sub:%p,%d freed, not finished, total:%d", pParentSql->self, pSql, trsupport->subqueryIndex, pState->numOfSub);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d freed, not finished, total:%d", pParentSql->self,
|
||||||
|
pSql->self, trsupport->subqueryIndex, pState->numOfSub);
|
||||||
|
|
||||||
tscFreeRetrieveSup(pSql);
|
tscFreeRetrieveSup(pSql);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// all subqueries are failed
|
// all subqueries are failed
|
||||||
tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub,
|
tscError("0x%"PRIx64" retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql->self, pState->numOfSub,
|
||||||
tstrerror(pParentSql->res.code));
|
tstrerror(pParentSql->res.code));
|
||||||
|
|
||||||
// release allocated resource
|
// release allocated resource
|
||||||
|
|
@ -2717,8 +2733,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
||||||
// data in from current vnode is stored in cache and disk
|
// data in from current vnode is stored in cache and disk
|
||||||
uint32_t numOfRowsFromSubquery = (uint32_t)(trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num);
|
uint32_t numOfRowsFromSubquery = (uint32_t)(trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num);
|
||||||
SVgroupsInfo* vgroupsInfo = pTableMetaInfo->vgroupList;
|
SVgroupsInfo* vgroupsInfo = pTableMetaInfo->vgroupList;
|
||||||
tscDebug("0x%"PRIx64" sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self, pSql,
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self,
|
||||||
vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx);
|
pSql->self, vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx);
|
||||||
|
|
||||||
tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity);
|
tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity);
|
||||||
|
|
||||||
|
|
@ -2731,7 +2747,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
|
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
|
||||||
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql,
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self,
|
||||||
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
|
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
|
||||||
tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE);
|
tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||||
return;
|
return;
|
||||||
|
|
@ -2746,7 +2762,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!subAndCheckDone(pSql, pParentSql, idx)) {
|
if (!subAndCheckDone(pSql, pParentSql, idx)) {
|
||||||
tscDebug("0x%"PRIx64" sub:%p orderOfSub:%d freed, not finished", pParentSql->self, pSql, trsupport->subqueryIndex);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d freed, not finished", pParentSql->self, pSql->self,
|
||||||
|
trsupport->subqueryIndex);
|
||||||
|
|
||||||
tscFreeRetrieveSup(pSql);
|
tscFreeRetrieveSup(pSql);
|
||||||
return;
|
return;
|
||||||
|
|
@ -2837,7 +2854,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
||||||
}
|
}
|
||||||
|
|
||||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||||
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(numOfRows), trsupport->numOfRetry);
|
||||||
|
|
||||||
int32_t sent = 0;
|
int32_t sent = 0;
|
||||||
|
|
||||||
|
|
@ -2865,8 +2882,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
||||||
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
|
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
|
||||||
|
|
||||||
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
|
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||||
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
|
||||||
pParentSql, pSql, tsMaxNumOfOrderedResults, num);
|
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
|
||||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
|
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -2881,7 +2898,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
||||||
|
|
||||||
// no disk space for tmp directory
|
// no disk space for tmp directory
|
||||||
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
|
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
|
||||||
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql,
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self,
|
||||||
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
|
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
|
||||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
|
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||||
return;
|
return;
|
||||||
|
|
@ -2951,8 +2968,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
||||||
// stable query killed or other subquery failed, all query stopped
|
// stable query killed or other subquery failed, all query stopped
|
||||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||||
tscError("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
tscError("0x%"PRIx64" query cancelled or failed, sub:0x%"PRIx64", vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||||
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
|
pParentSql->self, pSql->self, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
|
||||||
|
|
||||||
tscHandleSubqueryError(param, tres, code);
|
tscHandleSubqueryError(param, tres, code);
|
||||||
return;
|
return;
|
||||||
|
|
@ -2969,7 +2986,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
||||||
assert(code == taos_errno(pSql));
|
assert(code == taos_errno(pSql));
|
||||||
|
|
||||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||||
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
|
||||||
|
|
||||||
int32_t sent = 0;
|
int32_t sent = 0;
|
||||||
|
|
||||||
|
|
@ -2978,7 +2995,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tscError("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
|
tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
|
||||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
|
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2998,7 +3015,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
||||||
|
|
||||||
static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) {
|
static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) {
|
||||||
if (pParentObj->retry > pParentObj->maxRetry) {
|
if (pParentObj->retry > pParentObj->maxRetry) {
|
||||||
tscError("%p max retry reached, abort the retry effort", pParentObj);
|
tscError("0x%"PRIx64" max retry reached, abort the retry effort", pParentObj->self);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3090,16 +3107,17 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj,
|
tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self,
|
||||||
pParentObj->res.numOfRows, numOfFailed, numOfSub);
|
pParentObj->res.numOfRows, numOfFailed, numOfSub);
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable", pParentObj->self, pParentObj->cmd.numOfTables);
|
tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.numOfTables);
|
||||||
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
|
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
|
||||||
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
||||||
tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
|
tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
|
||||||
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pParentObj->res.code = TSDB_CODE_SUCCESS;
|
||||||
pParentObj->cmd.parseFinished = false;
|
pParentObj->cmd.parseFinished = false;
|
||||||
|
|
||||||
tscResetSqlCmd(&pParentObj->cmd, false);
|
tscResetSqlCmd(&pParentObj->cmd, false);
|
||||||
|
|
@ -3159,7 +3177,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
pSup->pSql = pSql;
|
pSup->pSql = pSql;
|
||||||
|
|
||||||
pSub->param = pSup;
|
pSub->param = pSup;
|
||||||
tscDebug("0x%"PRIx64" sub:%p launch sub insert, orderOfSub:%d", pSql->self, pSub, i);
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch sub insert, orderOfSub:%d", pSql->self, pSub->self, i);
|
||||||
if (pSub->res.code != TSDB_CODE_SUCCESS) {
|
if (pSub->res.code != TSDB_CODE_SUCCESS) {
|
||||||
tscHandleInsertRetry(pSql, pSub);
|
tscHandleInsertRetry(pSql, pSub);
|
||||||
}
|
}
|
||||||
|
|
@ -3207,7 +3225,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
|
|
||||||
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
|
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, numOfSub, strerror(errno));
|
tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, numOfSub, strerror(errno));
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3231,7 +3249,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfSub < pSql->subState.numOfSub) {
|
if (numOfSub < pSql->subState.numOfSub) {
|
||||||
tscError("%p failed to prepare subObj structure and launch sub-insertion", pSql);
|
tscError("0x%"PRIx64" failed to prepare subObj structure and launch sub-insertion", pSql->self);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1113,6 +1113,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
|
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
|
||||||
|
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -1329,6 +1330,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
||||||
|
|
||||||
STableDataBlocks* pOneTableBlock = *p;
|
STableDataBlocks* pOneTableBlock = *p;
|
||||||
while(pOneTableBlock) {
|
while(pOneTableBlock) {
|
||||||
|
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
|
||||||
|
if (pBlocks->numOfRows > 0) {
|
||||||
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
|
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
|
||||||
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
|
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
|
||||||
STableDataBlocks* dataBuf = NULL;
|
STableDataBlocks* dataBuf = NULL;
|
||||||
|
|
@ -1336,13 +1339,12 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
||||||
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
|
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
|
||||||
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
|
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
|
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
|
||||||
taosHashCleanup(pVnodeDataBlockHashList);
|
taosHashCleanup(pVnodeDataBlockHashList);
|
||||||
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
|
|
||||||
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||||
|
|
||||||
if (dataBuf->nAllocSize < destSize) {
|
if (dataBuf->nAllocSize < destSize) {
|
||||||
|
|
@ -1355,7 +1357,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
||||||
dataBuf->pData = tmp;
|
dataBuf->pData = tmp;
|
||||||
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
|
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
|
||||||
} else { // failed to allocate memory, free already allocated memory and return error code
|
} else { // failed to allocate memory, free already allocated memory and return error code
|
||||||
tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize);
|
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
|
||||||
|
|
||||||
taosHashCleanup(pVnodeDataBlockHashList);
|
taosHashCleanup(pVnodeDataBlockHashList);
|
||||||
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
||||||
|
|
@ -1390,6 +1392,11 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
||||||
pBlocks->dataLen = htonl(finalLen);
|
pBlocks->dataLen = htonl(finalLen);
|
||||||
dataBuf->numOfTables += 1;
|
dataBuf->numOfTables += 1;
|
||||||
|
|
||||||
|
pBlocks->numOfRows = 0;
|
||||||
|
}else {
|
||||||
|
tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname);
|
||||||
|
}
|
||||||
|
|
||||||
p = taosHashIterate(pCmd->pTableBlockHashList, p);
|
p = taosHashIterate(pCmd->pTableBlockHashList, p);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
break;
|
break;
|
||||||
|
|
@ -1511,7 +1518,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
|
||||||
return pInfo->pExpr->base.offset;
|
return pInfo->pExpr->base.offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
|
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
|
||||||
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
|
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
|
||||||
|
|
||||||
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
|
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
|
||||||
|
|
@ -1523,15 +1530,36 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
|
||||||
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
|
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
|
||||||
|
|
||||||
if (pField1->type != pField2->type ||
|
if (pField1->type != pField2->type ||
|
||||||
pField1->bytes != pField2->bytes ||
|
|
||||||
strcasecmp(pField1->name, pField2->name) != 0) {
|
strcasecmp(pField1->name, pField2->name) != 0) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pField1->bytes != pField2->bytes) {
|
||||||
|
*diffSize = 1;
|
||||||
|
|
||||||
|
if (pField2->bytes > pField1->bytes) {
|
||||||
|
pField1->bytes = pField2->bytes;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
|
||||||
|
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) {
|
||||||
|
TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i);
|
||||||
|
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
|
||||||
|
|
||||||
|
pField2->bytes = pField1->bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t tscGetResRowLength(SArray* pExprList) {
|
int32_t tscGetResRowLength(SArray* pExprList) {
|
||||||
size_t num = taosArrayGetSize(pExprList);
|
size_t num = taosArrayGetSize(pExprList);
|
||||||
if (num == 0) {
|
if (num == 0) {
|
||||||
|
|
@ -1941,7 +1969,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
|
||||||
static int32_t validateQuoteToken(SStrToken* pToken) {
|
static int32_t validateQuoteToken(SStrToken* pToken) {
|
||||||
tscDequoteAndTrimToken(pToken);
|
tscDequoteAndTrimToken(pToken);
|
||||||
|
|
||||||
int32_t k = tSQLGetToken(pToken->z, &pToken->type);
|
int32_t k = tGetToken(pToken->z, &pToken->type);
|
||||||
|
|
||||||
if (pToken->type == TK_STRING) {
|
if (pToken->type == TK_STRING) {
|
||||||
return tscValidateName(pToken);
|
return tscValidateName(pToken);
|
||||||
|
|
@ -2009,7 +2037,7 @@ int32_t tscValidateName(SStrToken* pToken) {
|
||||||
tscStrToLower(pToken->z, pToken->n);
|
tscStrToLower(pToken->z, pToken->n);
|
||||||
//pToken->n = (uint32_t)strtrim(pToken->z);
|
//pToken->n = (uint32_t)strtrim(pToken->z);
|
||||||
|
|
||||||
int len = tSQLGetToken(pToken->z, &pToken->type);
|
int len = tGetToken(pToken->z, &pToken->type);
|
||||||
|
|
||||||
// single token, validate it
|
// single token, validate it
|
||||||
if (len == pToken->n) {
|
if (len == pToken->n) {
|
||||||
|
|
@ -2035,7 +2063,7 @@ int32_t tscValidateName(SStrToken* pToken) {
|
||||||
pToken->n = (uint32_t)strtrim(pToken->z);
|
pToken->n = (uint32_t)strtrim(pToken->z);
|
||||||
}
|
}
|
||||||
|
|
||||||
pToken->n = tSQLGetToken(pToken->z, &pToken->type);
|
pToken->n = tGetToken(pToken->z, &pToken->type);
|
||||||
if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) {
|
if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
@ -2052,7 +2080,7 @@ int32_t tscValidateName(SStrToken* pToken) {
|
||||||
|
|
||||||
pToken->z = sep + 1;
|
pToken->z = sep + 1;
|
||||||
pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1);
|
pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1);
|
||||||
int32_t len = tSQLGetToken(pToken->z, &pToken->type);
|
int32_t len = tGetToken(pToken->z, &pToken->type);
|
||||||
if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) {
|
if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
@ -2507,6 +2535,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
|
||||||
}
|
}
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = pTableMeta;
|
pTableMetaInfo->pTableMeta = pTableMeta;
|
||||||
|
if (pTableMetaInfo->pTableMeta == NULL) {
|
||||||
|
pTableMetaInfo->tableMetaSize = 0;
|
||||||
|
} else {
|
||||||
|
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
|
||||||
|
}
|
||||||
|
|
||||||
if (vgroupList != NULL) {
|
if (vgroupList != NULL) {
|
||||||
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
|
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
|
||||||
|
|
@ -2565,7 +2598,7 @@ void registerSqlObj(SSqlObj* pSql) {
|
||||||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
|
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
|
||||||
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
tscError("%p new subquery failed, tableIndex:%d", pSql, 0);
|
tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, 0);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2579,7 +2612,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
|
||||||
|
|
||||||
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
|
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p new subquery failed, unable to malloc tag data, tableIndex:%d", pSql, 0);
|
tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0);
|
||||||
free(pNew);
|
free(pNew);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
@ -2655,7 +2688,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
|
|
||||||
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
tscError("%p new subquery failed, tableIndex:%d", pSql, tableIndex);
|
tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, tableIndex);
|
||||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
@ -2755,7 +2788,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
|
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
|
||||||
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
|
tscError("0x%"PRIx64" new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql->self, tableIndex, pTableMetaInfo->vgroupIndex);
|
||||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
@ -2791,6 +2824,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
|
|
||||||
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
|
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
|
||||||
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
|
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
|
||||||
|
|
||||||
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
||||||
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
||||||
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
|
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
|
||||||
|
|
@ -2806,7 +2840,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
|
|
||||||
// this case cannot be happened
|
// this case cannot be happened
|
||||||
if (pFinalInfo->pTableMeta == NULL) {
|
if (pFinalInfo->pTableMeta == NULL) {
|
||||||
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
|
tscError("0x%"PRIx64" new subquery failed since no tableMeta, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
|
||||||
|
|
||||||
if (pPrevSql != NULL) { // pass the previous error to client
|
if (pPrevSql != NULL) { // pass the previous error to client
|
||||||
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
|
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
|
||||||
|
|
@ -2824,13 +2858,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
assert(pFinalInfo->vgroupList != NULL);
|
assert(pFinalInfo->vgroupList != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registerSqlObj(pNew);
|
||||||
|
|
||||||
if (cmd == TSDB_SQL_SELECT) {
|
if (cmd == TSDB_SQL_SELECT) {
|
||||||
size_t size = taosArrayGetSize(pNewQueryInfo->colList);
|
size_t size = taosArrayGetSize(pNewQueryInfo->colList);
|
||||||
|
|
||||||
tscDebug(
|
tscDebug("0x%"PRIx64" new subquery:0x%"PRIx64", tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
|
||||||
"%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
|
|
||||||
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
|
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
|
||||||
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
|
pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
|
||||||
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
|
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
|
||||||
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
|
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
|
||||||
|
|
||||||
|
|
@ -2839,7 +2874,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
||||||
tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex);
|
tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
registerSqlObj(pNew);
|
|
||||||
return pNew;
|
return pNew;
|
||||||
|
|
||||||
_error:
|
_error:
|
||||||
|
|
@ -3185,7 +3219,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
|
||||||
|
|
||||||
//backup the total number of result first
|
//backup the total number of result first
|
||||||
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
|
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
|
||||||
|
|
||||||
|
|
||||||
|
// DON't free final since it may be recoreded and used later in APP
|
||||||
|
TAOS_FIELD* finalBk = pRes->final;
|
||||||
|
pRes->final = NULL;
|
||||||
tscFreeSqlResult(pSql);
|
tscFreeSqlResult(pSql);
|
||||||
|
pRes->final = finalBk;
|
||||||
|
|
||||||
pRes->numOfTotal = num;
|
pRes->numOfTotal = num;
|
||||||
|
|
||||||
|
|
@ -3418,11 +3458,11 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
|
||||||
return cMeta;
|
return cMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
|
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
|
||||||
assert(pChild != NULL);
|
assert(pChild != NULL && buf != NULL);
|
||||||
|
|
||||||
uint32_t size = tscGetTableMetaMaxSize();
|
// uint32_t size = tscGetTableMetaMaxSize();
|
||||||
STableMeta* p = calloc(1, size);
|
STableMeta* p = buf;//calloc(1, size);
|
||||||
|
|
||||||
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
|
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
|
||||||
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
|
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
|
||||||
|
|
@ -3434,12 +3474,12 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
|
||||||
|
|
||||||
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
|
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
|
||||||
|
|
||||||
tfree(p);
|
// tfree(p);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else { // super table has been removed, current tableMeta is also expired. remove it here
|
} else { // super table has been removed, current tableMeta is also expired. remove it here
|
||||||
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||||
|
|
||||||
tfree(p);
|
// tfree(p);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
#include "taos.h"
|
#include "taos.h"
|
||||||
#include "tstoken.h"
|
#include "ttoken.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
|
|
||||||
|
|
@ -83,6 +83,7 @@ enum {
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
|
||||||
|
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
|
||||||
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -15,10 +15,7 @@
|
||||||
#ifndef _TD_DATA_FORMAT_H_
|
#ifndef _TD_DATA_FORMAT_H_
|
||||||
#define _TD_DATA_FORMAT_H_
|
#define _TD_DATA_FORMAT_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include "os.h"
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#include "talgo.h"
|
#include "talgo.h"
|
||||||
#include "ttype.h"
|
#include "ttype.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
|
||||||
|
|
@ -89,9 +89,6 @@ tExprNode* exprdup(tExprNode* pTree);
|
||||||
|
|
||||||
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
|
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
|
||||||
|
|
||||||
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
|
|
||||||
int32_t rightType, void *output, int32_t order);
|
|
||||||
|
|
||||||
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
|
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
|
||||||
char *(*cb)(void *, const char*, int32_t));
|
char *(*cb)(void *, const char*, int32_t));
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,7 @@ extern int32_t tsDnodeId;
|
||||||
// common
|
// common
|
||||||
extern int tsRpcTimer;
|
extern int tsRpcTimer;
|
||||||
extern int tsRpcMaxTime;
|
extern int tsRpcMaxTime;
|
||||||
|
extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
|
||||||
extern int32_t tsMaxConnections;
|
extern int32_t tsMaxConnections;
|
||||||
extern int32_t tsMaxShellConns;
|
extern int32_t tsMaxShellConns;
|
||||||
extern int32_t tsShellActivityTimer;
|
extern int32_t tsShellActivityTimer;
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "taosmsg.h"
|
#include "taosmsg.h"
|
||||||
#include "tstoken.h"
|
#include "ttoken.h"
|
||||||
#include "tvariant.h"
|
#include "tvariant.h"
|
||||||
|
|
||||||
typedef struct SDataStatis {
|
typedef struct SDataStatis {
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,8 @@
|
||||||
#ifndef TDENGINE_TVARIANT_H
|
#ifndef TDENGINE_TVARIANT_H
|
||||||
#define TDENGINE_TVARIANT_H
|
#define TDENGINE_TVARIANT_H
|
||||||
|
|
||||||
#include "tstoken.h"
|
|
||||||
#include "tarray.h"
|
#include "tarray.h"
|
||||||
|
#include "ttoken.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,7 @@ int32_t tsDnodeId = 0;
|
||||||
// common
|
// common
|
||||||
int32_t tsRpcTimer = 1000;
|
int32_t tsRpcTimer = 1000;
|
||||||
int32_t tsRpcMaxTime = 600; // seconds;
|
int32_t tsRpcMaxTime = 600; // seconds;
|
||||||
|
int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
|
||||||
int32_t tsMaxShellConns = 50000;
|
int32_t tsMaxShellConns = 50000;
|
||||||
int32_t tsMaxConnections = 5000;
|
int32_t tsMaxConnections = 5000;
|
||||||
int32_t tsShellActivityTimer = 3; // second
|
int32_t tsShellActivityTimer = 3; // second
|
||||||
|
|
@ -139,7 +140,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
|
||||||
int8_t tsEnableBalance = 1;
|
int8_t tsEnableBalance = 1;
|
||||||
int8_t tsAlternativeRole = 0;
|
int8_t tsAlternativeRole = 0;
|
||||||
int32_t tsBalanceInterval = 300; // seconds
|
int32_t tsBalanceInterval = 300; // seconds
|
||||||
int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
|
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
|
||||||
int32_t tsMnodeEqualVnodeNum = 4;
|
int32_t tsMnodeEqualVnodeNum = 4;
|
||||||
int8_t tsEnableFlowCtrl = 1;
|
int8_t tsEnableFlowCtrl = 1;
|
||||||
int8_t tsEnableSlaveQuery = 1;
|
int8_t tsEnableSlaveQuery = 1;
|
||||||
|
|
@ -625,6 +626,16 @@ static void doInitGlobalConfig(void) {
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_MS;
|
cfg.unitType = TAOS_CFG_UTYPE_MS;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
|
cfg.option = "rpcForceTcp";
|
||||||
|
cfg.ptr = &tsRpcForceTcp;
|
||||||
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
|
||||||
|
cfg.minValue = 0;
|
||||||
|
cfg.maxValue = 1;
|
||||||
|
cfg.ptrLength = 0;
|
||||||
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "rpcMaxTime";
|
cfg.option = "rpcMaxTime";
|
||||||
cfg.ptr = &tsRpcMaxTime;
|
cfg.ptr = &tsRpcMaxTime;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
|
|
@ -921,7 +932,7 @@ static void doInitGlobalConfig(void) {
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
cfg.minValue = -1;
|
cfg.minValue = -1;
|
||||||
cfg.maxValue = 10000000;
|
cfg.maxValue = 100000000.0f;
|
||||||
cfg.ptrLength = 0;
|
cfg.ptrLength = 0;
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
||||||
#include "tname.h"
|
#include "tname.h"
|
||||||
#include "tstoken.h"
|
#include "ttoken.h"
|
||||||
#include "tvariant.h"
|
#include "tvariant.h"
|
||||||
|
|
||||||
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
|
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
|
||||||
|
|
|
||||||
|
|
@ -14,14 +14,14 @@
|
||||||
*/
|
*/
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
|
|
||||||
#include "tvariant.h"
|
|
||||||
#include "hash.h"
|
#include "hash.h"
|
||||||
#include "taos.h"
|
#include "taos.h"
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
#include "tstoken.h"
|
#include "ttoken.h"
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
#include "tutil.h"
|
|
||||||
#include "ttype.h"
|
#include "ttype.h"
|
||||||
|
#include "tutil.h"
|
||||||
|
#include "tvariant.h"
|
||||||
|
|
||||||
void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
@ -49,7 +49,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
||||||
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
|
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
SStrToken t = {0};
|
SStrToken t = {0};
|
||||||
tSQLGetToken(token->z, &t.type);
|
tGetToken(token->z, &t.type);
|
||||||
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
|
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
|
||||||
pVar->nType = -1; // -1 means error type
|
pVar->nType = -1; // -1 means error type
|
||||||
return;
|
return;
|
||||||
|
|
@ -460,7 +460,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
||||||
*result = (int64_t) pVariant->dKey;
|
*result = (int64_t) pVariant->dKey;
|
||||||
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
|
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
|
||||||
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
|
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
|
||||||
/*int32_t n = */tSQLGetToken(pVariant->pz, &token.type);
|
/*int32_t n = */tGetToken(pVariant->pz, &token.type);
|
||||||
|
|
||||||
if (token.type == TK_NULL) {
|
if (token.type == TK_NULL) {
|
||||||
if (releaseVariantPtr) {
|
if (releaseVariantPtr) {
|
||||||
|
|
@ -495,10 +495,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
||||||
wchar_t *endPtr = NULL;
|
wchar_t *endPtr = NULL;
|
||||||
|
|
||||||
SStrToken token = {0};
|
SStrToken token = {0};
|
||||||
token.n = tSQLGetToken(pVariant->pz, &token.type);
|
token.n = tGetToken(pVariant->pz, &token.type);
|
||||||
|
|
||||||
if (token.type == TK_MINUS || token.type == TK_PLUS) {
|
if (token.type == TK_MINUS || token.type == TK_PLUS) {
|
||||||
token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
|
token.n = tGetToken(pVariant->pz + token.n, &token.type);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (token.type == TK_FLOAT) {
|
if (token.type == TK_FLOAT) {
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
|
||||||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||||
POST_BUILD
|
POST_BUILD
|
||||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
|
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH}
|
||||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMENT "build jdbc driver")
|
COMMENT "build jdbc driver")
|
||||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.28</version>
|
<version>2.0.29</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.28</version>
|
<version>2.0.29</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||||
|
|
|
||||||
|
|
@ -84,9 +84,11 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@Deprecated
|
||||||
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
|
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
|
||||||
|
}
|
||||||
|
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
@ -171,6 +173,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@Deprecated
|
||||||
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
|
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
|
||||||
return getUnicodeStream(findColumn(columnLabel));
|
return getUnicodeStream(findColumn(columnLabel));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ public class TSDBConnection extends AbstractConnection {
|
||||||
this.databaseMetaData.setConnection(this);
|
this.databaseMetaData.setConnection(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public TSDBJNIConnector getConnection() {
|
public TSDBJNIConnector getConnector() {
|
||||||
return this.connector;
|
return this.connector;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -58,7 +58,7 @@ public class TSDBConnection extends AbstractConnection {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new TSDBStatement(this, this.connector);
|
return new TSDBStatement(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
|
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
|
||||||
|
|
@ -74,14 +74,18 @@ public class TSDBConnection extends AbstractConnection {
|
||||||
}
|
}
|
||||||
|
|
||||||
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
|
||||||
return new TSDBPreparedStatement(this, this.connector, sql);
|
}
|
||||||
|
|
||||||
|
return new TSDBPreparedStatement(this, sql);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void close() throws SQLException {
|
public void close() throws SQLException {
|
||||||
if (isClosed)
|
if (isClosed) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
this.connector.closeConnection();
|
this.connector.closeConnection();
|
||||||
this.isClosed = true;
|
this.isClosed = true;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ public class TSDBDriver extends AbstractDriver {
|
||||||
|
|
||||||
static {
|
static {
|
||||||
try {
|
try {
|
||||||
java.sql.DriverManager.registerDriver(new TSDBDriver());
|
DriverManager.registerDriver(new TSDBDriver());
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
|
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ package com.taosdata.jdbc;
|
||||||
|
|
||||||
import com.taosdata.jdbc.utils.TaosInfo;
|
import com.taosdata.jdbc.utils.TaosInfo;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.SQLWarning;
|
import java.sql.SQLWarning;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
@ -29,10 +30,13 @@ public class TSDBJNIConnector {
|
||||||
private static volatile Boolean isInitialized = false;
|
private static volatile Boolean isInitialized = false;
|
||||||
|
|
||||||
private TaosInfo taosInfo = TaosInfo.getInstance();
|
private TaosInfo taosInfo = TaosInfo.getInstance();
|
||||||
|
|
||||||
// Connection pointer used in C
|
// Connection pointer used in C
|
||||||
private long taos = TSDBConstants.JNI_NULL_POINTER;
|
private long taos = TSDBConstants.JNI_NULL_POINTER;
|
||||||
|
|
||||||
// result set status in current connection
|
// result set status in current connection
|
||||||
private boolean isResultsetClosed = true;
|
private boolean isResultsetClosed;
|
||||||
|
|
||||||
private int affectedRows = -1;
|
private int affectedRows = -1;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
|
@ -75,7 +79,6 @@ public class TSDBJNIConnector {
|
||||||
|
|
||||||
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
||||||
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
|
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
|
||||||
// this.closeConnectionImp(this.taos);
|
|
||||||
closeConnection();
|
closeConnection();
|
||||||
this.taos = TSDBConstants.JNI_NULL_POINTER;
|
this.taos = TSDBConstants.JNI_NULL_POINTER;
|
||||||
}
|
}
|
||||||
|
|
@ -97,12 +100,6 @@ public class TSDBJNIConnector {
|
||||||
* @throws SQLException
|
* @throws SQLException
|
||||||
*/
|
*/
|
||||||
public long executeQuery(String sql) throws SQLException {
|
public long executeQuery(String sql) throws SQLException {
|
||||||
// close previous result set if the user forgets to invoke the
|
|
||||||
// free method to close previous result set.
|
|
||||||
// if (!this.isResultsetClosed) {
|
|
||||||
// freeResultSet(taosResultSetPointer);
|
|
||||||
// }
|
|
||||||
|
|
||||||
Long pSql = 0l;
|
Long pSql = 0l;
|
||||||
try {
|
try {
|
||||||
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
|
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
|
||||||
|
|
@ -135,6 +132,7 @@ public class TSDBJNIConnector {
|
||||||
|
|
||||||
// Try retrieving result set for the executed SQL using the current connection pointer.
|
// Try retrieving result set for the executed SQL using the current connection pointer.
|
||||||
pSql = this.getResultSetImp(this.taos, pSql);
|
pSql = this.getResultSetImp(this.taos, pSql);
|
||||||
|
// if pSql == 0L that means resultset is closed
|
||||||
isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER);
|
isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER);
|
||||||
|
|
||||||
return pSql;
|
return pSql;
|
||||||
|
|
@ -169,37 +167,14 @@ public class TSDBJNIConnector {
|
||||||
private native long isUpdateQueryImp(long connection, long pSql);
|
private native long isUpdateQueryImp(long connection, long pSql);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Free resultset operation from C to release resultset pointer by JNI
|
* Free result set operation from C to release result set pointer by JNI
|
||||||
*/
|
*/
|
||||||
public int freeResultSet(long pSql) {
|
public int freeResultSet(long pSql) {
|
||||||
int res = TSDBConstants.JNI_SUCCESS;
|
int res = this.freeResultSetImp(this.taos, pSql);
|
||||||
// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
|
||||||
// throw new RuntimeException("Invalid result set pointer");
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
|
||||||
res = this.freeResultSetImp(this.taos, pSql);
|
|
||||||
// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
|
|
||||||
// }
|
|
||||||
|
|
||||||
isResultsetClosed = true;
|
isResultsetClosed = true;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Close the open result set which is associated to the current connection. If the result set is already
|
|
||||||
* closed, return 0 for success.
|
|
||||||
*/
|
|
||||||
// public int freeResultSet() {
|
|
||||||
// int resCode = TSDBConstants.JNI_SUCCESS;
|
|
||||||
// if (!isResultsetClosed) {
|
|
||||||
// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer);
|
|
||||||
// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
|
|
||||||
// isResultsetClosed = true;
|
|
||||||
// }
|
|
||||||
// return resCode;
|
|
||||||
// }
|
|
||||||
|
|
||||||
private native int freeResultSetImp(long connection, long result);
|
private native int freeResultSetImp(long connection, long result);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -246,6 +221,7 @@ public class TSDBJNIConnector {
|
||||||
*/
|
*/
|
||||||
public void closeConnection() throws SQLException {
|
public void closeConnection() throws SQLException {
|
||||||
int code = this.closeConnectionImp(this.taos);
|
int code = this.closeConnectionImp(this.taos);
|
||||||
|
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||||
} else if (code == 0) {
|
} else if (code == 0) {
|
||||||
|
|
@ -253,6 +229,7 @@ public class TSDBJNIConnector {
|
||||||
} else {
|
} else {
|
||||||
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
|
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
|
||||||
}
|
}
|
||||||
|
|
||||||
// invoke closeConnectionImpl only here
|
// invoke closeConnectionImpl only here
|
||||||
taosInfo.connect_close_increment();
|
taosInfo.connect_close_increment();
|
||||||
}
|
}
|
||||||
|
|
@ -289,7 +266,7 @@ public class TSDBJNIConnector {
|
||||||
private native void unsubscribeImp(long subscription, boolean isKeep);
|
private native void unsubscribeImp(long subscription, boolean isKeep);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Validate if a <I>create table</I> sql statement is correct without actually creating that table
|
* Validate if a <I>create table</I> SQL statement is correct without actually creating that table
|
||||||
*/
|
*/
|
||||||
public boolean validateCreateTableSql(String sql) {
|
public boolean validateCreateTableSql(String sql) {
|
||||||
int res = validateCreateTableSqlImp(taos, sql.getBytes());
|
int res = validateCreateTableSqlImp(taos, sql.getBytes());
|
||||||
|
|
@ -297,4 +274,66 @@ public class TSDBJNIConnector {
|
||||||
}
|
}
|
||||||
|
|
||||||
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
||||||
|
|
||||||
|
public long prepareStmt(String sql) throws SQLException {
|
||||||
|
Long stmt = 0L;
|
||||||
|
try {
|
||||||
|
stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||||
|
} catch (Exception e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
|
||||||
|
}
|
||||||
|
|
||||||
|
return stmt;
|
||||||
|
}
|
||||||
|
|
||||||
|
private native long prepareStmtImp(byte[] sql, long con);
|
||||||
|
|
||||||
|
public void setBindTableName(long stmt, String tableName) throws SQLException {
|
||||||
|
int code = setBindTableNameImp(stmt, tableName, this.taos);
|
||||||
|
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private native int setBindTableNameImp(long stmt, String name, long conn);
|
||||||
|
|
||||||
|
public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException {
|
||||||
|
int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
|
||||||
|
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn);
|
||||||
|
|
||||||
|
public void executeBatch(long stmt) throws SQLException {
|
||||||
|
int code = executeBatchImp(stmt, this.taos);
|
||||||
|
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private native int executeBatchImp(long stmt, long con);
|
||||||
|
|
||||||
|
public void closeBatch(long stmt) throws SQLException {
|
||||||
|
int code = closeStmt(stmt, this.taos);
|
||||||
|
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private native int closeStmt(long stmt, long con);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,36 +14,44 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.utils.Utils;
|
||||||
|
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.Reader;
|
import java.io.Reader;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.math.BigDecimal;
|
import java.math.BigDecimal;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.nio.charset.Charset;
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.ByteOrder;
|
||||||
import java.sql.*;
|
import java.sql.*;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Calendar;
|
import java.util.Calendar;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
|
* TDengine only supports a subset of the standard SQL, thus this implementation of the
|
||||||
* standard JDBC API contains more or less some adjustments customized for certain
|
* standard JDBC API contains more or less some adjustments customized for certain
|
||||||
* compatibility needs.
|
* compatibility needs.
|
||||||
*/
|
*/
|
||||||
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
|
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
|
||||||
|
|
||||||
private String rawSql;
|
private String rawSql;
|
||||||
private Object[] parameters;
|
private Object[] parameters;
|
||||||
private boolean isPrepared;
|
private boolean isPrepared;
|
||||||
|
|
||||||
|
private ArrayList<ColumnInfo> colData;
|
||||||
|
private String tableName;
|
||||||
|
private long nativeStmtHandle = 0;
|
||||||
|
|
||||||
private volatile TSDBParameterMetaData parameterMetaData;
|
private volatile TSDBParameterMetaData parameterMetaData;
|
||||||
|
|
||||||
TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
|
TSDBPreparedStatement(TSDBConnection connection, String sql) {
|
||||||
super(connection, connecter);
|
super(connection);
|
||||||
init(sql);
|
init(sql);
|
||||||
|
|
||||||
if (sql.contains("?")) {
|
|
||||||
int parameterCnt = 0;
|
int parameterCnt = 0;
|
||||||
|
if (sql.contains("?")) {
|
||||||
for (int i = 0; i < sql.length(); i++) {
|
for (int i = 0; i < sql.length(); i++) {
|
||||||
if ('?' == sql.charAt(i)) {
|
if ('?' == sql.charAt(i)) {
|
||||||
parameterCnt++;
|
parameterCnt++;
|
||||||
|
|
@ -52,6 +60,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
||||||
parameters = new Object[parameterCnt];
|
parameters = new Object[parameterCnt];
|
||||||
this.isPrepared = true;
|
this.isPrepared = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (parameterCnt > 1) {
|
||||||
|
// the table name is also a parameter, so ignore it.
|
||||||
|
this.colData = new ArrayList<ColumnInfo>(parameterCnt - 1);
|
||||||
|
this.colData.addAll(Collections.nCopies(parameterCnt - 1, null));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void init(String sql) {
|
private void init(String sql) {
|
||||||
|
|
@ -126,28 +140,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
||||||
* @return a string of the native sql statement for TSDB
|
* @return a string of the native sql statement for TSDB
|
||||||
*/
|
*/
|
||||||
private String getNativeSql(String rawSql) throws SQLException {
|
private String getNativeSql(String rawSql) throws SQLException {
|
||||||
String sql = rawSql;
|
return Utils.getNativeSql(rawSql, this.parameters);
|
||||||
for (int i = 0; i < parameters.length; ++i) {
|
|
||||||
Object para = parameters[i];
|
|
||||||
if (para != null) {
|
|
||||||
String paraStr;
|
|
||||||
if (para instanceof byte[]) {
|
|
||||||
paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
|
|
||||||
} else {
|
|
||||||
paraStr = para.toString();
|
|
||||||
}
|
|
||||||
// if para is timestamp or String or byte[] need to translate ' character
|
|
||||||
if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
|
|
||||||
paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
|
|
||||||
paraStr = "'" + paraStr + "'";
|
|
||||||
}
|
|
||||||
sql = sql.replaceFirst("[?]", paraStr);
|
|
||||||
} else {
|
|
||||||
sql = sql.replaceFirst("[?]", "NULL");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clearParameters();
|
|
||||||
return sql;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
@ -275,15 +268,19 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
||||||
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
|
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed())
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
setObject(parameterIndex,x);
|
setObject(parameterIndex, x);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setObject(int parameterIndex, Object x) throws SQLException {
|
public void setObject(int parameterIndex, Object x) throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
if (parameterIndex < 1 && parameterIndex >= parameters.length)
|
}
|
||||||
|
|
||||||
|
if (parameterIndex < 1 && parameterIndex >= parameters.length) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
|
||||||
|
}
|
||||||
|
|
||||||
parameters[parameterIndex - 1] = x;
|
parameters[parameterIndex - 1] = x;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -320,8 +317,9 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setRef(int parameterIndex, Ref x) throws SQLException {
|
public void setRef(int parameterIndex, Ref x) throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
|
}
|
||||||
|
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
@ -535,4 +533,276 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
// NOTE: the following APIs are not JDBC compatible
|
||||||
|
// set the bind table name
|
||||||
|
private static class ColumnInfo {
|
||||||
|
@SuppressWarnings("rawtypes")
|
||||||
|
private ArrayList data;
|
||||||
|
private int type;
|
||||||
|
private int bytes;
|
||||||
|
private boolean typeIsSet;
|
||||||
|
|
||||||
|
public ColumnInfo() {
|
||||||
|
this.typeIsSet = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setType(int type) throws SQLException {
|
||||||
|
if (this.isTypeSet()) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type has been set");
|
||||||
|
}
|
||||||
|
|
||||||
|
this.typeIsSet = true;
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isTypeSet() {
|
||||||
|
return this.typeIsSet;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
public void setTableName(String name) {
|
||||||
|
this.tableName = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> void setValueImpl(int columnIndex, ArrayList<T> list, int type, int bytes) throws SQLException {
|
||||||
|
ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex);
|
||||||
|
if (col == null) {
|
||||||
|
ColumnInfo p = new ColumnInfo();
|
||||||
|
p.setType(type);
|
||||||
|
p.bytes = bytes;
|
||||||
|
p.data = (ArrayList<?>) list.clone();
|
||||||
|
this.colData.set(columnIndex, p);
|
||||||
|
} else {
|
||||||
|
if (col.type != type) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch");
|
||||||
|
}
|
||||||
|
col.data.addAll(list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT, Integer.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT, Float.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP, Long.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT, Long.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE, Double.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL, Byte.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT, Byte.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT, Short.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// note: expand the required space for each NChar character
|
||||||
|
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException {
|
||||||
|
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void columnDataAddBatch() throws SQLException {
|
||||||
|
// pass the data block to native code
|
||||||
|
if (rawSql == null) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "sql statement not set yet");
|
||||||
|
}
|
||||||
|
|
||||||
|
// table name is not set yet, abort
|
||||||
|
if (this.tableName == null) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet");
|
||||||
|
}
|
||||||
|
|
||||||
|
int numOfCols = this.colData.size();
|
||||||
|
if (numOfCols == 0) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
|
||||||
|
}
|
||||||
|
|
||||||
|
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
|
||||||
|
this.nativeStmtHandle = connector.prepareStmt(rawSql);
|
||||||
|
connector.setBindTableName(this.nativeStmtHandle, this.tableName);
|
||||||
|
|
||||||
|
ColumnInfo colInfo = (ColumnInfo) this.colData.get(0);
|
||||||
|
if (colInfo == null) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
|
||||||
|
}
|
||||||
|
|
||||||
|
int rows = colInfo.data.size();
|
||||||
|
for (int i = 0; i < numOfCols; ++i) {
|
||||||
|
ColumnInfo col1 = this.colData.get(i);
|
||||||
|
if (col1 == null || !col1.isTypeSet()) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rows != col1.data.size()) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical");
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteBuffer colDataList = ByteBuffer.allocate(rows * col1.bytes);
|
||||||
|
colDataList.order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
|
||||||
|
ByteBuffer lengthList = ByteBuffer.allocate(rows * Integer.BYTES);
|
||||||
|
lengthList.order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
|
||||||
|
ByteBuffer isNullList = ByteBuffer.allocate(rows * Byte.BYTES);
|
||||||
|
isNullList.order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
|
||||||
|
switch (col1.type) {
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_INT: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Integer val = (Integer) col1.data.get(j);
|
||||||
|
colDataList.putInt(val == null? Integer.MIN_VALUE:val);
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Byte val = (Byte) col1.data.get(j);
|
||||||
|
colDataList.put(val == null? 0:val);
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Boolean val = (Boolean) col1.data.get(j);
|
||||||
|
if (val == null) {
|
||||||
|
colDataList.put((byte) 0);
|
||||||
|
} else {
|
||||||
|
colDataList.put((byte) (val? 1:0));
|
||||||
|
}
|
||||||
|
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Short val = (Short) col1.data.get(j);
|
||||||
|
colDataList.putShort(val == null? 0:val);
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Long val = (Long) col1.data.get(j);
|
||||||
|
colDataList.putLong(val == null? 0:val);
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Float val = (Float) col1.data.get(j);
|
||||||
|
colDataList.putFloat(val == null? 0:val);
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
Double val = (Double) col1.data.get(j);
|
||||||
|
colDataList.putDouble(val == null? 0:val);
|
||||||
|
isNullList.put((byte) (val == null? 1:0));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
|
||||||
|
String charset = TaosGlobalConfig.getCharset();
|
||||||
|
for (int j = 0; j < rows; ++j) {
|
||||||
|
String val = (String) col1.data.get(j);
|
||||||
|
|
||||||
|
colDataList.position(j * col1.bytes); // seek to the correct position
|
||||||
|
if (val != null) {
|
||||||
|
byte[] b = null;
|
||||||
|
try {
|
||||||
|
if (col1.type == TSDBConstants.TSDB_DATA_TYPE_BINARY) {
|
||||||
|
b = val.getBytes();
|
||||||
|
} else {
|
||||||
|
b = val.getBytes(charset);
|
||||||
|
}
|
||||||
|
} catch (UnsupportedEncodingException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (val.length() > col1.bytes) {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long");
|
||||||
|
}
|
||||||
|
|
||||||
|
colDataList.put(b);
|
||||||
|
lengthList.putInt(b.length);
|
||||||
|
isNullList.put((byte) 0);
|
||||||
|
} else {
|
||||||
|
lengthList.putInt(0);
|
||||||
|
isNullList.put((byte) 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_USMALLINT:
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_UINT:
|
||||||
|
case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void columnDataExecuteBatch() throws SQLException {
|
||||||
|
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
|
||||||
|
connector.executeBatch(this.nativeStmtHandle);
|
||||||
|
this.columnDataClearBatch();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void columnDataClearBatch() {
|
||||||
|
int size = this.colData.size();
|
||||||
|
this.colData.clear();
|
||||||
|
|
||||||
|
this.colData.addAll(Collections.nCopies(size, null));
|
||||||
|
this.tableName = null; // clear the table name
|
||||||
|
}
|
||||||
|
|
||||||
|
public void columnDataCloseBatch() throws SQLException {
|
||||||
|
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
|
||||||
|
connector.closeBatch(this.nativeStmtHandle);
|
||||||
|
|
||||||
|
this.nativeStmtHandle = 0L;
|
||||||
|
this.tableName = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -109,6 +109,8 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
||||||
public void close() throws SQLException {
|
public void close() throws SQLException {
|
||||||
if (isClosed)
|
if (isClosed)
|
||||||
return;
|
return;
|
||||||
|
if (this.statement == null)
|
||||||
|
return;
|
||||||
if (this.jniConnector != null) {
|
if (this.jniConnector != null) {
|
||||||
int code = this.jniConnector.freeResultSet(this.resultSetPointer);
|
int code = this.jniConnector.freeResultSet(this.resultSetPointer);
|
||||||
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
|
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
|
|
@ -461,12 +463,13 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isClosed() throws SQLException {
|
public boolean isClosed() throws SQLException {
|
||||||
if (isClosed)
|
|
||||||
return true;
|
|
||||||
if (jniConnector != null) {
|
|
||||||
isClosed = jniConnector.isResultsetClosed();
|
|
||||||
}
|
|
||||||
return isClosed;
|
return isClosed;
|
||||||
|
// if (isClosed)
|
||||||
|
// return true;
|
||||||
|
// if (jniConnector != null) {
|
||||||
|
// isClosed = jniConnector.isResultsetClosed();
|
||||||
|
// }
|
||||||
|
// return isClosed;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getNString(int columnIndex) throws SQLException {
|
public String getNString(int columnIndex) throws SQLException {
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,8 @@ import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.utils.NullType;
|
||||||
|
|
||||||
public class TSDBResultSetBlockData {
|
public class TSDBResultSetBlockData {
|
||||||
private int numOfRows = 0;
|
private int numOfRows = 0;
|
||||||
private int rowIndex = 0;
|
private int rowIndex = 0;
|
||||||
|
|
@ -164,59 +166,7 @@ public class TSDBResultSetBlockData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class NullType {
|
|
||||||
private static final byte NULL_BOOL_VAL = 0x2;
|
|
||||||
private static final String NULL_STR = "null";
|
|
||||||
|
|
||||||
public String toString() {
|
|
||||||
return NullType.NULL_STR;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static boolean isBooleanNull(byte val) {
|
|
||||||
return val == NullType.NULL_BOOL_VAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isTinyIntNull(byte val) {
|
|
||||||
return val == Byte.MIN_VALUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isSmallIntNull(short val) {
|
|
||||||
return val == Short.MIN_VALUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isIntNull(int val) {
|
|
||||||
return val == Integer.MIN_VALUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isBigIntNull(long val) {
|
|
||||||
return val == Long.MIN_VALUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isFloatNull(float val) {
|
|
||||||
return Float.isNaN(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isDoubleNull(double val) {
|
|
||||||
return Double.isNaN(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isBinaryNull(byte[] val, int length) {
|
|
||||||
if (length != Byte.BYTES) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return val[0] == 0xFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isNcharNull(byte[] val, int length) {
|
|
||||||
if (length != Integer.BYTES) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The original type may not be a string type, but will be converted to by
|
* The original type may not be a string type, but will be converted to by
|
||||||
|
|
@ -488,8 +438,8 @@ public class TSDBResultSetBlockData {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
String ss = TaosGlobalConfig.getCharset();
|
String charset = TaosGlobalConfig.getCharset();
|
||||||
return new String(dest, ss);
|
return new String(dest, charset);
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,7 @@ public class TSDBResultSetRowData {
|
||||||
data.set(col, value);
|
data.set(col, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
public int getInt(int col, int srcType) throws SQLException {
|
public int getInt(int col, int srcType) throws SQLException {
|
||||||
Object obj = data.get(col);
|
Object obj = data.get(col);
|
||||||
|
|
||||||
|
|
@ -128,7 +129,7 @@ public class TSDBResultSetRowData {
|
||||||
long value = (long) obj;
|
long value = (long) obj;
|
||||||
if (value < 0)
|
if (value < 0)
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
|
||||||
return new Long(value).intValue();
|
return Long.valueOf(value).intValue();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,8 +19,6 @@ import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
|
|
||||||
public class TSDBStatement extends AbstractStatement {
|
public class TSDBStatement extends AbstractStatement {
|
||||||
|
|
||||||
private TSDBJNIConnector connector;
|
|
||||||
/**
|
/**
|
||||||
* Status of current statement
|
* Status of current statement
|
||||||
*/
|
*/
|
||||||
|
|
@ -29,29 +27,26 @@ public class TSDBStatement extends AbstractStatement {
|
||||||
private TSDBConnection connection;
|
private TSDBConnection connection;
|
||||||
private TSDBResultSet resultSet;
|
private TSDBResultSet resultSet;
|
||||||
|
|
||||||
public void setConnection(TSDBConnection connection) {
|
TSDBStatement(TSDBConnection connection) {
|
||||||
this.connection = connection;
|
this.connection = connection;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
|
|
||||||
this.connection = connection;
|
|
||||||
this.connector = connector;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ResultSet executeQuery(String sql) throws SQLException {
|
public ResultSet executeQuery(String sql) throws SQLException {
|
||||||
// check if closed
|
// check if closed
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
|
}
|
||||||
|
|
||||||
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
|
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
|
||||||
|
|
||||||
// execute query
|
// execute query
|
||||||
long pSql = this.connector.executeQuery(sql);
|
long pSql = this.connection.getConnector().executeQuery(sql);
|
||||||
// if pSql is create/insert/update/delete/alter SQL
|
// if pSql is create/insert/update/delete/alter SQL
|
||||||
if (this.connector.isUpdateQuery(pSql)) {
|
if (this.connection.getConnector().isUpdateQuery(pSql)) {
|
||||||
this.connector.freeResultSet(pSql);
|
this.connection.getConnector().freeResultSet(pSql);
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY);
|
||||||
}
|
}
|
||||||
TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql);
|
TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql);
|
||||||
res.setBatchFetch(this.connection.getBatchFetch());
|
res.setBatchFetch(this.connection.getBatchFetch());
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -60,14 +55,14 @@ public class TSDBStatement extends AbstractStatement {
|
||||||
if (isClosed())
|
if (isClosed())
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
|
|
||||||
long pSql = this.connector.executeQuery(sql);
|
long pSql = this.connection.getConnector().executeQuery(sql);
|
||||||
// if pSql is create/insert/update/delete/alter SQL
|
// if pSql is create/insert/update/delete/alter SQL
|
||||||
if (!this.connector.isUpdateQuery(pSql)) {
|
if (!this.connection.getConnector().isUpdateQuery(pSql)) {
|
||||||
this.connector.freeResultSet(pSql);
|
this.connection.getConnector().freeResultSet(pSql);
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE);
|
||||||
}
|
}
|
||||||
int affectedRows = this.connector.getAffectedRows(pSql);
|
int affectedRows = this.connection.getConnector().getAffectedRows(pSql);
|
||||||
this.connector.freeResultSet(pSql);
|
this.connection.getConnector().freeResultSet(pSql);
|
||||||
return affectedRows;
|
return affectedRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -81,30 +76,29 @@ public class TSDBStatement extends AbstractStatement {
|
||||||
|
|
||||||
public boolean execute(String sql) throws SQLException {
|
public boolean execute(String sql) throws SQLException {
|
||||||
// check if closed
|
// check if closed
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
|
}
|
||||||
|
|
||||||
// execute query
|
// execute query
|
||||||
long pSql = this.connector.executeQuery(sql);
|
long pSql = this.connection.getConnector().executeQuery(sql);
|
||||||
// if pSql is create/insert/update/delete/alter SQL
|
// if pSql is create/insert/update/delete/alter SQL
|
||||||
if (this.connector.isUpdateQuery(pSql)) {
|
if (this.connection.getConnector().isUpdateQuery(pSql)) {
|
||||||
this.affectedRows = this.connector.getAffectedRows(pSql);
|
this.affectedRows = this.connection.getConnector().getAffectedRows(pSql);
|
||||||
this.connector.freeResultSet(pSql);
|
this.connection.getConnector().freeResultSet(pSql);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.resultSet = new TSDBResultSet(this, this.connector, pSql);
|
this.resultSet = new TSDBResultSet(this, this.connection.getConnector(), pSql);
|
||||||
this.resultSet.setBatchFetch(this.connection.getBatchFetch());
|
this.resultSet.setBatchFetch(this.connection.getBatchFetch());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ResultSet getResultSet() throws SQLException {
|
public ResultSet getResultSet() throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
// long resultSetPointer = connector.getResultSet();
|
}
|
||||||
// TSDBResultSet resSet = null;
|
|
||||||
// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
|
||||||
// resSet = new TSDBResultSet(connector, resultSetPointer);
|
|
||||||
// }
|
|
||||||
return this.resultSet;
|
return this.resultSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -115,13 +109,21 @@ public class TSDBStatement extends AbstractStatement {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Connection getConnection() throws SQLException {
|
public Connection getConnection() throws SQLException {
|
||||||
if (isClosed())
|
if (isClosed()) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
if (this.connector == null)
|
}
|
||||||
|
|
||||||
|
if (this.connection.getConnector() == null) {
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||||
|
}
|
||||||
|
|
||||||
return this.connection;
|
return this.connection;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setConnection(TSDBConnection connection) {
|
||||||
|
this.connection = connection;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isClosed() throws SQLException {
|
public boolean isClosed() throws SQLException {
|
||||||
return isClosed;
|
return isClosed;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver {
|
||||||
|
|
||||||
static {
|
static {
|
||||||
try {
|
try {
|
||||||
java.sql.DriverManager.registerDriver(new RestfulDriver());
|
DriverManager.registerDriver(new RestfulDriver());
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
|
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,12 +2,12 @@ package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
import com.taosdata.jdbc.TSDBError;
|
import com.taosdata.jdbc.TSDBError;
|
||||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||||
|
import com.taosdata.jdbc.utils.Utils;
|
||||||
|
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.Reader;
|
import java.io.Reader;
|
||||||
import java.math.BigDecimal;
|
import java.math.BigDecimal;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.nio.charset.Charset;
|
|
||||||
import java.sql.*;
|
import java.sql.*;
|
||||||
import java.util.Calendar;
|
import java.util.Calendar;
|
||||||
|
|
||||||
|
|
@ -21,6 +21,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
|
||||||
public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) {
|
public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) {
|
||||||
super(conn, database);
|
super(conn, database);
|
||||||
this.rawSql = sql;
|
this.rawSql = sql;
|
||||||
|
|
||||||
if (sql.contains("?")) {
|
if (sql.contains("?")) {
|
||||||
int parameterCnt = 0;
|
int parameterCnt = 0;
|
||||||
for (int i = 0; i < sql.length(); i++) {
|
for (int i = 0; i < sql.length(); i++) {
|
||||||
|
|
@ -58,29 +59,14 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
|
||||||
return executeUpdate(sql);
|
return executeUpdate(sql);
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getNativeSql(String rawSql) throws SQLException {
|
/****
|
||||||
String sql = rawSql;
|
* 将rawSql转换成一条可执行的sql语句,使用属性parameters中的变脸进行替换
|
||||||
for (int i = 0; i < parameters.length; ++i) {
|
* 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?)
|
||||||
Object para = parameters[i];
|
* @param rawSql,可能是insert、select或其他,使用?做占位符
|
||||||
if (para != null) {
|
* @return
|
||||||
String paraStr;
|
*/
|
||||||
if (para instanceof byte[]) {
|
private String getNativeSql(String rawSql) {
|
||||||
paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
|
return Utils.getNativeSql(rawSql, this.parameters);
|
||||||
} else {
|
|
||||||
paraStr = para.toString();
|
|
||||||
}
|
|
||||||
// if para is timestamp or String or byte[] need to translate ' character
|
|
||||||
if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
|
|
||||||
paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
|
|
||||||
paraStr = "'" + paraStr + "'";
|
|
||||||
}
|
|
||||||
sql = sql.replaceFirst("[?]", paraStr);
|
|
||||||
} else {
|
|
||||||
sql = sql.replaceFirst("[?]", "NULL");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clearParameters();
|
|
||||||
return sql;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
@ -221,7 +207,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
|
||||||
if (isClosed())
|
if (isClosed())
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||||
|
|
||||||
setObject(parameterIndex,x);
|
setObject(parameterIndex, x);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
||||||
|
|
@ -136,21 +136,21 @@ public class RestfulStatement extends AbstractStatement {
|
||||||
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
|
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
|
||||||
}
|
}
|
||||||
this.resultSet = null;
|
this.resultSet = null;
|
||||||
this.affectedRows = checkJsonResultSet(jsonObject);
|
this.affectedRows = getAffectedRows(jsonObject);
|
||||||
return this.affectedRows;
|
return this.affectedRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
private int checkJsonResultSet(JSONObject jsonObject) {
|
private int getAffectedRows(JSONObject jsonObject) throws SQLException {
|
||||||
// create ... SQLs should return 0 , and Restful result is this:
|
// create ... SQLs should return 0 , and Restful result is this:
|
||||||
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
|
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
|
||||||
JSONArray head = jsonObject.getJSONArray("head");
|
JSONArray head = jsonObject.getJSONArray("head");
|
||||||
|
if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
|
||||||
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
|
||||||
JSONArray data = jsonObject.getJSONArray("data");
|
JSONArray data = jsonObject.getJSONArray("data");
|
||||||
int rows = Integer.parseInt(jsonObject.getString("rows"));
|
if (data != null)
|
||||||
if (head.size() == 1 && "affected_rows".equals(head.getString(0))
|
return data.getJSONArray(0).getInteger(0);
|
||||||
&& data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) {
|
|
||||||
return 0;
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
|
||||||
}
|
|
||||||
return rows;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,91 @@
|
||||||
|
package com.taosdata.jdbc.utils;
|
||||||
|
|
||||||
|
public class NullType {
|
||||||
|
private static final byte NULL_BOOL_VAL = 0x2;
|
||||||
|
private static final String NULL_STR = "null";
|
||||||
|
|
||||||
|
public String toString() {
|
||||||
|
return NullType.NULL_STR;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isBooleanNull(byte val) {
|
||||||
|
return val == NullType.NULL_BOOL_VAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isTinyIntNull(byte val) {
|
||||||
|
return val == Byte.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isSmallIntNull(short val) {
|
||||||
|
return val == Short.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isIntNull(int val) {
|
||||||
|
return val == Integer.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isBigIntNull(long val) {
|
||||||
|
return val == Long.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isFloatNull(float val) {
|
||||||
|
return Float.isNaN(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isDoubleNull(double val) {
|
||||||
|
return Double.isNaN(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isBinaryNull(byte[] val, int length) {
|
||||||
|
if (length != Byte.BYTES) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return val[0] == 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isNcharNull(byte[] val, int length) {
|
||||||
|
if (length != Integer.BYTES) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte getBooleanNull() {
|
||||||
|
return NullType.NULL_BOOL_VAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte getTinyintNull() {
|
||||||
|
return Byte.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int getIntNull() {
|
||||||
|
return Integer.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static short getSmallIntNull() {
|
||||||
|
return Short.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static long getBigIntNull() {
|
||||||
|
return Long.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int getFloatNull() {
|
||||||
|
return 0x7FF00000;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static long getDoubleNull() {
|
||||||
|
return 0x7FFFFF0000000000L;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte getBinaryNull() {
|
||||||
|
return (byte) 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte[] getNcharNull() {
|
||||||
|
return new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,136 @@
|
||||||
|
package com.taosdata.jdbc.utils;
|
||||||
|
|
||||||
|
import com.google.common.collect.Range;
|
||||||
|
import com.google.common.collect.RangeSet;
|
||||||
|
import com.google.common.collect.TreeRangeSet;
|
||||||
|
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
|
public class Utils {
|
||||||
|
|
||||||
|
private static Pattern ptn = Pattern.compile(".*?'");
|
||||||
|
|
||||||
|
public static String escapeSingleQuota(String origin) {
|
||||||
|
Matcher m = ptn.matcher(origin);
|
||||||
|
StringBuffer sb = new StringBuffer();
|
||||||
|
int end = 0;
|
||||||
|
while (m.find()) {
|
||||||
|
end = m.end();
|
||||||
|
String seg = origin.substring(m.start(), end);
|
||||||
|
int len = seg.length();
|
||||||
|
if (len == 1) {
|
||||||
|
if ('\'' == seg.charAt(0)) {
|
||||||
|
sb.append("\\'");
|
||||||
|
} else {
|
||||||
|
sb.append(seg);
|
||||||
|
}
|
||||||
|
} else { // len > 1
|
||||||
|
sb.append(seg.substring(0, seg.length() - 2));
|
||||||
|
char lastcSec = seg.charAt(seg.length() - 2);
|
||||||
|
if (lastcSec == '\\') {
|
||||||
|
sb.append("\\'");
|
||||||
|
} else {
|
||||||
|
sb.append(lastcSec);
|
||||||
|
sb.append("\\'");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (end < origin.length()) {
|
||||||
|
sb.append(origin.substring(end));
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String getNativeSql(String rawSql, Object[] parameters) {
|
||||||
|
// toLowerCase
|
||||||
|
String preparedSql = rawSql.trim().toLowerCase();
|
||||||
|
|
||||||
|
String[] clause = new String[0];
|
||||||
|
if (SqlSyntaxValidator.isInsertSql(preparedSql)) {
|
||||||
|
// insert or import
|
||||||
|
clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"};
|
||||||
|
}
|
||||||
|
if (SqlSyntaxValidator.isSelectSql(preparedSql)) {
|
||||||
|
// select
|
||||||
|
clause = new String[]{"where\\s*.*"};
|
||||||
|
}
|
||||||
|
Map<Integer, Integer> placeholderPositions = new HashMap<>();
|
||||||
|
RangeSet<Integer> clauseRangeSet = TreeRangeSet.create();
|
||||||
|
findPlaceholderPosition(preparedSql, placeholderPositions);
|
||||||
|
findClauseRangeSet(preparedSql, clause, clauseRangeSet);
|
||||||
|
|
||||||
|
return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet<Integer> clauseRangeSet) {
|
||||||
|
clauseRangeSet.clear();
|
||||||
|
for (String regex : regexArr) {
|
||||||
|
Matcher matcher = Pattern.compile(regex).matcher(preparedSql);
|
||||||
|
while (matcher.find()) {
|
||||||
|
int start = matcher.start();
|
||||||
|
int end = matcher.end();
|
||||||
|
clauseRangeSet.add(Range.closed(start, end));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void findPlaceholderPosition(String preparedSql, Map<Integer, Integer> placeholderPosition) {
|
||||||
|
placeholderPosition.clear();
|
||||||
|
Matcher matcher = Pattern.compile("\\?").matcher(preparedSql);
|
||||||
|
int index = 0;
|
||||||
|
while (matcher.find()) {
|
||||||
|
int pos = matcher.start();
|
||||||
|
placeholderPosition.put(index, pos);
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
*
|
||||||
|
* @param rawSql
|
||||||
|
* @param paramArr
|
||||||
|
* @param placeholderPosition
|
||||||
|
* @param clauseRangeSet
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private static String transformSql(String rawSql, Object[] paramArr, Map<Integer, Integer> placeholderPosition, RangeSet<Integer> clauseRangeSet) {
|
||||||
|
String[] sqlArr = rawSql.split("\\?");
|
||||||
|
|
||||||
|
return IntStream.range(0, sqlArr.length).mapToObj(index -> {
|
||||||
|
if (index == paramArr.length)
|
||||||
|
return sqlArr[index];
|
||||||
|
|
||||||
|
Object para = paramArr[index];
|
||||||
|
String paraStr;
|
||||||
|
if (para != null) {
|
||||||
|
if (para instanceof byte[]) {
|
||||||
|
paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
|
||||||
|
} else {
|
||||||
|
paraStr = para.toString();
|
||||||
|
}
|
||||||
|
// if para is timestamp or String or byte[] need to translate ' character
|
||||||
|
if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
|
||||||
|
paraStr = Utils.escapeSingleQuota(paraStr);
|
||||||
|
|
||||||
|
Integer pos = placeholderPosition.get(index);
|
||||||
|
boolean contains = clauseRangeSet.contains(pos);
|
||||||
|
if (contains) {
|
||||||
|
paraStr = "'" + paraStr + "'";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
paraStr = "NULL";
|
||||||
|
}
|
||||||
|
return sqlArr[index] + paraStr;
|
||||||
|
}).collect(Collectors.joining());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
@ -12,17 +13,61 @@ import java.util.Properties;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public class SubscribeTest {
|
public class SubscribeTest {
|
||||||
|
|
||||||
Connection connection;
|
Connection connection;
|
||||||
Statement statement;
|
Statement statement;
|
||||||
String dbName = "test";
|
String dbName = "test";
|
||||||
String tName = "t0";
|
String tName = "t0";
|
||||||
String host = "127.0.0.1";
|
String host = "127.0.0.1";
|
||||||
String topic = "test";
|
String topic = "test";
|
||||||
|
private long ts;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void subscribe() {
|
||||||
|
try {
|
||||||
|
String rawSql = "select * from " + dbName + "." + tName + ";";
|
||||||
|
TSDBConnection conn = connection.unwrap(TSDBConnection.class);
|
||||||
|
TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
|
||||||
|
|
||||||
|
for (int j = 0; j < 10; j++) {
|
||||||
|
TimeUnit.SECONDS.sleep(1);
|
||||||
|
TSDBResultSet resSet = subscribe.consume();
|
||||||
|
|
||||||
|
int rowCnt = 0;
|
||||||
|
while (resSet.next()) {
|
||||||
|
if (rowCnt == 0) {
|
||||||
|
long cur_ts = resSet.getTimestamp(1).getTime();
|
||||||
|
int k = resSet.getInt(2);
|
||||||
|
int v = resSet.getInt(3);
|
||||||
|
Assert.assertEquals(ts, cur_ts);
|
||||||
|
Assert.assertEquals(100, k);
|
||||||
|
Assert.assertEquals(1, v);
|
||||||
|
}
|
||||||
|
if (rowCnt == 1) {
|
||||||
|
long cur_ts = resSet.getTimestamp(1).getTime();
|
||||||
|
int k = resSet.getInt(2);
|
||||||
|
int v = resSet.getInt(3);
|
||||||
|
Assert.assertEquals(ts + 1, cur_ts);
|
||||||
|
Assert.assertEquals(101, k);
|
||||||
|
Assert.assertEquals(2, v);
|
||||||
|
|
||||||
|
}
|
||||||
|
rowCnt++;
|
||||||
|
}
|
||||||
|
if (j == 0)
|
||||||
|
Assert.assertEquals(2, rowCnt);
|
||||||
|
resSet.close();
|
||||||
|
}
|
||||||
|
subscribe.close(true);
|
||||||
|
|
||||||
|
|
||||||
|
} catch (SQLException | InterruptedException throwables) {
|
||||||
|
throwables.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void createDatabase() {
|
public void createDatabase() throws SQLException {
|
||||||
try {
|
|
||||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||||
|
|
@ -33,46 +78,9 @@ public class SubscribeTest {
|
||||||
statement.execute("drop database if exists " + dbName);
|
statement.execute("drop database if exists " + dbName);
|
||||||
statement.execute("create database if not exists " + dbName);
|
statement.execute("create database if not exists " + dbName);
|
||||||
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
|
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
|
||||||
long ts = System.currentTimeMillis();
|
ts = System.currentTimeMillis();
|
||||||
for (int i = 0; i < 2; i++) {
|
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
|
||||||
ts += i;
|
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
|
||||||
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
|
|
||||||
statement.executeUpdate(sql);
|
|
||||||
}
|
|
||||||
|
|
||||||
} catch (ClassNotFoundException | SQLException e) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void subscribe() {
|
|
||||||
try {
|
|
||||||
String rawSql = "select * from " + dbName + "." + tName + ";";
|
|
||||||
System.out.println(rawSql);
|
|
||||||
// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
|
|
||||||
|
|
||||||
// int a = 0;
|
|
||||||
// while (true) {
|
|
||||||
// TimeUnit.MILLISECONDS.sleep(1000);
|
|
||||||
// TSDBResultSet resSet = subscribe.consume();
|
|
||||||
// while (resSet.next()) {
|
|
||||||
// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
|
|
||||||
// System.out.printf(i + ": " + resSet.getString(i) + "\t");
|
|
||||||
// }
|
|
||||||
// System.out.println("\n======" + a + "==========");
|
|
||||||
// }
|
|
||||||
// a++;
|
|
||||||
// if (a >= 2) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// resSet.close();
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// subscribe.close(true);
|
|
||||||
} catch (Exception e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
@ -86,6 +94,5 @@ public class SubscribeTest {
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -3,7 +3,6 @@ package com.taosdata.jdbc;
|
||||||
import com.google.common.primitives.Ints;
|
import com.google.common.primitives.Ints;
|
||||||
import com.google.common.primitives.Longs;
|
import com.google.common.primitives.Longs;
|
||||||
import com.google.common.primitives.Shorts;
|
import com.google.common.primitives.Shorts;
|
||||||
import com.taosdata.jdbc.rs.RestfulResultSet;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
|
@ -177,6 +176,7 @@ public class TSDBResultSetTest {
|
||||||
rs.getAsciiStream("f1");
|
rs.getAsciiStream("f1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
public void getUnicodeStream() throws SQLException {
|
public void getUnicodeStream() throws SQLException {
|
||||||
rs.getUnicodeStream("f1");
|
rs.getUnicodeStream("f1");
|
||||||
|
|
@ -326,7 +326,7 @@ public class TSDBResultSetTest {
|
||||||
|
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
public void getRow() throws SQLException {
|
public void getRow() throws SQLException {
|
||||||
int row = rs.getRow();
|
rs.getRow();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
|
|
@ -405,12 +405,12 @@ public class TSDBResultSetTest {
|
||||||
|
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
public void updateByte() throws SQLException {
|
public void updateByte() throws SQLException {
|
||||||
rs.updateByte(1, new Byte("0"));
|
rs.updateByte(1, (byte) 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
public void updateShort() throws SQLException {
|
public void updateShort() throws SQLException {
|
||||||
rs.updateShort(1, new Short("0"));
|
rs.updateShort(1, (short) 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,401 @@
|
||||||
|
package com.taosdata.jdbc.cases;
|
||||||
|
|
||||||
|
import org.junit.*;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
|
||||||
|
public class InsertSpecialCharacterJniTest {
|
||||||
|
|
||||||
|
private static final String host = "127.0.0.1";
|
||||||
|
private static Connection conn;
|
||||||
|
private static String dbName = "spec_char_test";
|
||||||
|
private static String tbname1 = "test";
|
||||||
|
private static String tbname2 = "weather";
|
||||||
|
private static String special_character_str_1 = "$asd$$fsfsf$";
|
||||||
|
private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
|
||||||
|
private static String special_character_str_3 = "\\\\asdfsfsf\\";
|
||||||
|
private static String special_character_str_4 = "?asd??fsf?sf?";
|
||||||
|
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase01() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_1.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from ?";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setString(1, tbname1);
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_1, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase02() throws SQLException {
|
||||||
|
//TODO:
|
||||||
|
// Expected :\asdfsfsf\\
|
||||||
|
// Actual :\asdfsfsf\
|
||||||
|
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_2.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
//TODO: bug to be fixed
|
||||||
|
// Assert.assertEquals(special_character_str_2, f1);
|
||||||
|
Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SQLException.class)
|
||||||
|
public void testCase03() throws SQLException {
|
||||||
|
//TODO:
|
||||||
|
// TDengine ERROR (216): Syntax error in SQL
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_3.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_3, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase04() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_4.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_4, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase05() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_5.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_5, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase06() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setString(2, special_character_str_4);
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(now));
|
||||||
|
pstmt.setBytes(4, special_character_str_4.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query t1
|
||||||
|
final String query = "select * from t1";
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_4, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase07() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_4.getBytes());
|
||||||
|
pstmt.setString(3, special_character_str_4);
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_4, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertEquals(special_character_str_4, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SQLException.class)
|
||||||
|
public void testCase08() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setString(2, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(now));
|
||||||
|
pstmt.setBytes(4, special_character_str_5.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase09() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
// t1
|
||||||
|
pstmt.setString(1, dbName);
|
||||||
|
pstmt.setInt(2, 1);
|
||||||
|
pstmt.setString(3, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(4, new Timestamp(now));
|
||||||
|
pstmt.setBytes(5, special_character_str_5.getBytes());
|
||||||
|
// t2
|
||||||
|
pstmt.setInt(7, 2);
|
||||||
|
pstmt.setString(8, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(9, new Timestamp(now));
|
||||||
|
pstmt.setString(11, special_character_str_5);
|
||||||
|
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(2, ret);
|
||||||
|
}
|
||||||
|
// query t1
|
||||||
|
String query = "select * from t?";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_5, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
// query t2
|
||||||
|
query = "select * from t2";
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
byte[] f1 = rs.getBytes(2);
|
||||||
|
Assert.assertNull(f1);
|
||||||
|
String f2 = new String(rs.getBytes(3));
|
||||||
|
Assert.assertEquals(special_character_str_5, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase10() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
// t1
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setString(2, tbname2);
|
||||||
|
pstmt.setString(3, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(4, new Timestamp(now));
|
||||||
|
pstmt.setBytes(5, special_character_str_5.getBytes());
|
||||||
|
// t2
|
||||||
|
pstmt.setInt(7, 2);
|
||||||
|
pstmt.setString(8, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(9, new Timestamp(now));
|
||||||
|
pstmt.setString(11, special_character_str_5);
|
||||||
|
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(2, ret);
|
||||||
|
}
|
||||||
|
//query t1
|
||||||
|
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setString(1, dbName);
|
||||||
|
pstmt.setInt(2, 1);
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
|
||||||
|
pstmt.setTimestamp(4, new Timestamp(0));
|
||||||
|
pstmt.setString(5, "f1");
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_5, f1);
|
||||||
|
byte[] f2 = rs.getBytes(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
// query t2
|
||||||
|
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setInt(1, 2);
|
||||||
|
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(0));
|
||||||
|
pstmt.setString(4, "f2");
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
byte[] f1 = rs.getBytes(2);
|
||||||
|
Assert.assertNull(f1);
|
||||||
|
String f2 = new String(rs.getBytes(3));
|
||||||
|
Assert.assertEquals(special_character_str_5, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SQLException.class)
|
||||||
|
public void testCase11() throws SQLException {
|
||||||
|
final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
|
||||||
|
final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setTimestamp(2, new Timestamp(now));
|
||||||
|
pstmt.setBytes(3, speicalCharacterStr.getBytes());
|
||||||
|
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase12() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setString(2, special_character_str_4);
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals("HelloTDengine", f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertEquals(special_character_str_4, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() throws SQLException {
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute("drop table if exists " + tbname1 + "");
|
||||||
|
stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
|
||||||
|
stmt.execute("drop table if exists " + tbname2);
|
||||||
|
stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void beforeClass() throws SQLException {
|
||||||
|
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||||
|
conn = DriverManager.getConnection(url);
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute("drop database if exists " + dbName);
|
||||||
|
stmt.execute("create database if not exists " + dbName);
|
||||||
|
stmt.execute("use " + dbName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void afterClass() throws SQLException {
|
||||||
|
if (conn != null)
|
||||||
|
conn.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,401 @@
|
||||||
|
package com.taosdata.jdbc.cases;
|
||||||
|
|
||||||
|
import org.junit.*;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
|
||||||
|
public class InsertSpecialCharacterRestfulTest {
|
||||||
|
|
||||||
|
private static final String host = "127.0.0.1";
|
||||||
|
// private static final String host = "master";
|
||||||
|
private static Connection conn;
|
||||||
|
private static String dbName = "spec_char_test";
|
||||||
|
private static String tbname1 = "test";
|
||||||
|
private static String tbname2 = "weather";
|
||||||
|
private static String special_character_str_1 = "$asd$$fsfsf$";
|
||||||
|
private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
|
||||||
|
private static String special_character_str_3 = "\\\\asdfsfsf\\";
|
||||||
|
private static String special_character_str_4 = "?asd??fsf?sf?";
|
||||||
|
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase01() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_1.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from ?";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setString(1, tbname1);
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_1, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase02() throws SQLException {
|
||||||
|
//TODO:
|
||||||
|
// Expected :\asdfsfsf\
|
||||||
|
// Actual :\asdfsfsf\
|
||||||
|
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_2.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
//TODO: bug to be fixed
|
||||||
|
// Assert.assertEquals(special_character_str_2, f1);
|
||||||
|
Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SQLException.class)
|
||||||
|
public void testCase03() throws SQLException {
|
||||||
|
//TODO:
|
||||||
|
// TDengine ERROR (216): Syntax error in SQL
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_3.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_3, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase04() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_4.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_4, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase05() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_5.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_5, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase06() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setString(2, special_character_str_4);
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(now));
|
||||||
|
pstmt.setBytes(4, special_character_str_4.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query t1
|
||||||
|
final String query = "select * from t1";
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_4, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase07() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setBytes(2, special_character_str_4.getBytes());
|
||||||
|
pstmt.setString(3, special_character_str_4);
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_4, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertEquals(special_character_str_4, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SQLException.class)
|
||||||
|
public void testCase08() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setString(2, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(now));
|
||||||
|
pstmt.setBytes(4, special_character_str_5.getBytes());
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase09() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
// t1
|
||||||
|
pstmt.setString(1, dbName);
|
||||||
|
pstmt.setInt(2, 1);
|
||||||
|
pstmt.setString(3, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(4, new Timestamp(now));
|
||||||
|
pstmt.setBytes(5, special_character_str_5.getBytes());
|
||||||
|
// t2
|
||||||
|
pstmt.setInt(7, 2);
|
||||||
|
pstmt.setString(8, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(9, new Timestamp(now));
|
||||||
|
pstmt.setString(11, special_character_str_5);
|
||||||
|
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(2, ret);
|
||||||
|
}
|
||||||
|
// query t1
|
||||||
|
String query = "select * from t?";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_5, f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
// query t2
|
||||||
|
query = "select * from t2";
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
byte[] f1 = rs.getBytes(2);
|
||||||
|
Assert.assertNull(f1);
|
||||||
|
String f2 = new String(rs.getBytes(3));
|
||||||
|
Assert.assertEquals(special_character_str_5, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase10() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
// t1
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setString(2, tbname2);
|
||||||
|
pstmt.setString(3, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(4, new Timestamp(now));
|
||||||
|
pstmt.setBytes(5, special_character_str_5.getBytes());
|
||||||
|
// t2
|
||||||
|
pstmt.setInt(7, 2);
|
||||||
|
pstmt.setString(8, special_character_str_5);
|
||||||
|
pstmt.setTimestamp(9, new Timestamp(now));
|
||||||
|
pstmt.setString(11, special_character_str_5);
|
||||||
|
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(2, ret);
|
||||||
|
}
|
||||||
|
//query t1
|
||||||
|
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setString(1, dbName);
|
||||||
|
pstmt.setInt(2, 1);
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
|
||||||
|
pstmt.setTimestamp(4, new Timestamp(0));
|
||||||
|
pstmt.setString(5, "f1");
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals(special_character_str_5, f1);
|
||||||
|
byte[] f2 = rs.getBytes(3);
|
||||||
|
Assert.assertNull(f2);
|
||||||
|
}
|
||||||
|
// query t2
|
||||||
|
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||||
|
pstmt.setInt(1, 2);
|
||||||
|
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
|
||||||
|
pstmt.setTimestamp(3, new Timestamp(0));
|
||||||
|
pstmt.setString(4, "f2");
|
||||||
|
|
||||||
|
ResultSet rs = pstmt.executeQuery();
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
byte[] f1 = rs.getBytes(2);
|
||||||
|
Assert.assertNull(f1);
|
||||||
|
String f2 = new String(rs.getBytes(3));
|
||||||
|
Assert.assertEquals(special_character_str_5, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SQLException.class)
|
||||||
|
public void testCase11() throws SQLException {
|
||||||
|
final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
|
||||||
|
final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setInt(1, 1);
|
||||||
|
pstmt.setTimestamp(2, new Timestamp(now));
|
||||||
|
pstmt.setBytes(3, speicalCharacterStr.getBytes());
|
||||||
|
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase12() throws SQLException {
|
||||||
|
final long now = System.currentTimeMillis();
|
||||||
|
// insert
|
||||||
|
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
|
||||||
|
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(now));
|
||||||
|
pstmt.setString(2, special_character_str_4);
|
||||||
|
int ret = pstmt.executeUpdate();
|
||||||
|
Assert.assertEquals(1, ret);
|
||||||
|
}
|
||||||
|
// query
|
||||||
|
final String query = "select * from " + tbname1;
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery(query);
|
||||||
|
rs.next();
|
||||||
|
long timestamp = rs.getTimestamp(1).getTime();
|
||||||
|
Assert.assertEquals(now, timestamp);
|
||||||
|
String f1 = new String(rs.getBytes(2));
|
||||||
|
Assert.assertEquals("HelloTDengine", f1);
|
||||||
|
String f2 = rs.getString(3);
|
||||||
|
Assert.assertEquals(special_character_str_4, f2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() throws SQLException {
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute("drop table if exists " + tbname1 + "");
|
||||||
|
stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
|
||||||
|
stmt.execute("drop table if exists " + tbname2);
|
||||||
|
stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void beforeClass() throws SQLException {
|
||||||
|
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||||
|
conn = DriverManager.getConnection(url);
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute("drop database if exists " + dbName);
|
||||||
|
stmt.execute("create database if not exists " + dbName);
|
||||||
|
stmt.execute("use " + dbName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void afterClass() throws SQLException {
|
||||||
|
if (conn != null)
|
||||||
|
conn.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -6,11 +6,11 @@ import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.Serializable;
|
|
||||||
import java.sql.*;
|
import java.sql.*;
|
||||||
|
|
||||||
public class RestfulPreparedStatementTest {
|
public class RestfulPreparedStatementTest {
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
|
// private static final String host = "master";
|
||||||
private static Connection conn;
|
private static Connection conn;
|
||||||
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||||
private static PreparedStatement pstmt_insert;
|
private static PreparedStatement pstmt_insert;
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,24 @@
|
||||||
|
package com.taosdata.jdbc.utils;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
public class UtilsTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void escapeSingleQuota() {
|
||||||
|
String s = "'''''a\\'";
|
||||||
|
String news = Utils.escapeSingleQuota(s);
|
||||||
|
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
|
||||||
|
|
||||||
|
s = "\'''''a\\'";
|
||||||
|
news = Utils.escapeSingleQuota(s);
|
||||||
|
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
|
||||||
|
|
||||||
|
s = "\'\'\'\''a\\'";
|
||||||
|
news = Utils.escapeSingleQuota(s);
|
||||||
|
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,154 @@
|
||||||
|
|
||||||
|
# Created by https://www.toptal.com/developers/gitignore/api/python
|
||||||
|
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
||||||
|
|
||||||
|
### Python ###
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
pip-wheel-metadata/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
pytestdebug.log
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
doc/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
.python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
# .env
|
||||||
|
.env/
|
||||||
|
.venv/
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
pythonenv*
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# operating system-related files
|
||||||
|
# file properties cache/storage on macOS
|
||||||
|
*.DS_Store
|
||||||
|
# thumbnail cache on Windows
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# profiling data
|
||||||
|
.prof
|
||||||
|
|
||||||
|
|
||||||
|
# End of https://www.toptal.com/developers/gitignore/api/python
|
||||||
|
|
@ -0,0 +1,17 @@
|
||||||
|
# TDengine Connector for Python
|
||||||
|
|
||||||
|
[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install git+https://github.com/taosdata/TDengine-connector-python
|
||||||
|
```
|
||||||
|
|
||||||
|
## Source Code
|
||||||
|
|
||||||
|
[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python).
|
||||||
|
|
||||||
|
## License - AGPL
|
||||||
|
|
||||||
|
Keep same with [TDengine](https://github.com/taosdata/TDengine).
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
import taos
|
||||||
|
|
||||||
|
conn = taos.connect(host='127.0.0.1',
|
||||||
|
user='root',
|
||||||
|
passworkd='taodata',
|
||||||
|
database='log')
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
sql = "select * from log.log limit 10"
|
||||||
|
cursor.execute(sql)
|
||||||
|
for row in cursor:
|
||||||
|
print(row)
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
../
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
# TDengine python client interface
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
import setuptools
|
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
|
||||||
long_description = fh.read()
|
|
||||||
|
|
||||||
setuptools.setup(
|
|
||||||
name="taos",
|
|
||||||
version="2.0.8",
|
|
||||||
author="Taosdata Inc.",
|
|
||||||
author_email="support@taosdata.com",
|
|
||||||
description="TDengine python client package",
|
|
||||||
long_description=long_description,
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
url="https://github.com/pypa/sampleproject",
|
|
||||||
packages=setuptools.find_packages(),
|
|
||||||
classifiers=[
|
|
||||||
"Programming Language :: Python :: 2",
|
|
||||||
"Operating System :: Linux",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
@ -1,642 +0,0 @@
|
||||||
import ctypes
|
|
||||||
from .constants import FieldType
|
|
||||||
from .error import *
|
|
||||||
import math
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_millisecond_to_datetime(milli):
|
|
||||||
return datetime.datetime.fromtimestamp(milli / 1000.0)
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_microsecond_to_datetime(micro):
|
|
||||||
return datetime.datetime.fromtimestamp(micro / 1000000.0)
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C bool row to python row
|
|
||||||
"""
|
|
||||||
_timestamp_converter = _convert_millisecond_to_datetime
|
|
||||||
if micro:
|
|
||||||
_timestamp_converter = _convert_microsecond_to_datetime
|
|
||||||
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return list(map(_timestamp_converter, ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
|
||||||
else:
|
|
||||||
return list(map(_timestamp_converter, ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C bool row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_byte))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_bool))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C tinyint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_tinyint_unsigned_to_python(
|
|
||||||
data,
|
|
||||||
num_of_rows,
|
|
||||||
nbytes=None,
|
|
||||||
micro=False):
|
|
||||||
"""Function to convert C tinyint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ubyte))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ubyte))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C smallint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_smallint_unsigned_to_python(
|
|
||||||
data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C smallint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ushort))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ushort))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C int row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C int row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C bigint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_bigint_unsigned_to_python(
|
|
||||||
data,
|
|
||||||
num_of_rows,
|
|
||||||
nbytes=None,
|
|
||||||
micro=False):
|
|
||||||
"""Function to convert C bigint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint64))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint64))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C float row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C double row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C binary row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
|
|
||||||
'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
|
|
||||||
'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C nchar row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
res = []
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
if num_of_rows >= 0:
|
|
||||||
tmpstr = ctypes.c_char_p(data)
|
|
||||||
res.append(tmpstr.value.decode())
|
|
||||||
else:
|
|
||||||
res.append((ctypes.cast(data + nbytes * i,
|
|
||||||
ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C binary row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
res = []
|
|
||||||
if num_of_rows > 0:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
rbyte = ctypes.cast(
|
|
||||||
data + nbytes * i,
|
|
||||||
ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:1].pop()
|
|
||||||
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
|
|
||||||
res.append(tmpstr.value.decode()[0:rbyte])
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
else:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
rbyte = ctypes.cast(
|
|
||||||
data + nbytes * i,
|
|
||||||
ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:1].pop()
|
|
||||||
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
|
|
||||||
res.append(tmpstr.value.decode()[0:rbyte])
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C nchar row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
res = []
|
|
||||||
if num_of_rows >= 0:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
|
|
||||||
res.append(tmpstr.value.decode())
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
else:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
res.append((ctypes.cast(data + nbytes * i + 2,
|
|
||||||
ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
_CONVERT_FUNC = {
|
|
||||||
FieldType.C_BOOL: _crow_bool_to_python,
|
|
||||||
FieldType.C_TINYINT: _crow_tinyint_to_python,
|
|
||||||
FieldType.C_SMALLINT: _crow_smallint_to_python,
|
|
||||||
FieldType.C_INT: _crow_int_to_python,
|
|
||||||
FieldType.C_BIGINT: _crow_bigint_to_python,
|
|
||||||
FieldType.C_FLOAT: _crow_float_to_python,
|
|
||||||
FieldType.C_DOUBLE: _crow_double_to_python,
|
|
||||||
FieldType.C_BINARY: _crow_binary_to_python,
|
|
||||||
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
|
|
||||||
FieldType.C_NCHAR: _crow_nchar_to_python,
|
|
||||||
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
|
|
||||||
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
|
|
||||||
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
|
|
||||||
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
|
|
||||||
}
|
|
||||||
|
|
||||||
_CONVERT_FUNC_BLOCK = {
|
|
||||||
FieldType.C_BOOL: _crow_bool_to_python,
|
|
||||||
FieldType.C_TINYINT: _crow_tinyint_to_python,
|
|
||||||
FieldType.C_SMALLINT: _crow_smallint_to_python,
|
|
||||||
FieldType.C_INT: _crow_int_to_python,
|
|
||||||
FieldType.C_BIGINT: _crow_bigint_to_python,
|
|
||||||
FieldType.C_FLOAT: _crow_float_to_python,
|
|
||||||
FieldType.C_DOUBLE: _crow_double_to_python,
|
|
||||||
FieldType.C_BINARY: _crow_binary_to_python_block,
|
|
||||||
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
|
|
||||||
FieldType.C_NCHAR: _crow_nchar_to_python_block,
|
|
||||||
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
|
|
||||||
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
|
|
||||||
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
|
|
||||||
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
|
|
||||||
}
|
|
||||||
|
|
||||||
# Corresponding TAOS_FIELD structure in C
|
|
||||||
|
|
||||||
|
|
||||||
class TaosField(ctypes.Structure):
|
|
||||||
_fields_ = [('name', ctypes.c_char * 65),
|
|
||||||
('type', ctypes.c_char),
|
|
||||||
('bytes', ctypes.c_short)]
|
|
||||||
|
|
||||||
# C interface class
|
|
||||||
|
|
||||||
|
|
||||||
class CTaosInterface(object):
|
|
||||||
|
|
||||||
libtaos = ctypes.CDLL('libtaos.so')
|
|
||||||
|
|
||||||
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
|
|
||||||
libtaos.taos_init.restype = None
|
|
||||||
libtaos.taos_connect.restype = ctypes.c_void_p
|
|
||||||
#libtaos.taos_use_result.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
|
|
||||||
libtaos.taos_errstr.restype = ctypes.c_char_p
|
|
||||||
libtaos.taos_subscribe.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_consume.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_free_result.restype = None
|
|
||||||
libtaos.taos_errno.restype = ctypes.c_int
|
|
||||||
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
|
|
||||||
|
|
||||||
def __init__(self, config=None):
|
|
||||||
'''
|
|
||||||
Function to initialize the class
|
|
||||||
@host : str, hostname to connect
|
|
||||||
@user : str, username to connect to server
|
|
||||||
@password : str, password to connect to server
|
|
||||||
@db : str, default db to use when log in
|
|
||||||
@config : str, config directory
|
|
||||||
|
|
||||||
@rtype : None
|
|
||||||
'''
|
|
||||||
if config is None:
|
|
||||||
self._config = ctypes.c_char_p(None)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
self._config = ctypes.c_char_p(config.encode('utf-8'))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("config is expected as a str")
|
|
||||||
|
|
||||||
if config is not None:
|
|
||||||
CTaosInterface.libtaos.taos_options(3, self._config)
|
|
||||||
|
|
||||||
CTaosInterface.libtaos.taos_init()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def config(self):
|
|
||||||
""" Get current config
|
|
||||||
"""
|
|
||||||
return self._config
|
|
||||||
|
|
||||||
def connect(
|
|
||||||
self,
|
|
||||||
host=None,
|
|
||||||
user="root",
|
|
||||||
password="taosdata",
|
|
||||||
db=None,
|
|
||||||
port=0):
|
|
||||||
'''
|
|
||||||
Function to connect to server
|
|
||||||
|
|
||||||
@rtype: c_void_p, TDengine handle
|
|
||||||
'''
|
|
||||||
# host
|
|
||||||
try:
|
|
||||||
_host = ctypes.c_char_p(host.encode(
|
|
||||||
"utf-8")) if host is not None else ctypes.c_char_p(None)
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("host is expected as a str")
|
|
||||||
|
|
||||||
# user
|
|
||||||
try:
|
|
||||||
_user = ctypes.c_char_p(user.encode("utf-8"))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("user is expected as a str")
|
|
||||||
|
|
||||||
# password
|
|
||||||
try:
|
|
||||||
_password = ctypes.c_char_p(password.encode("utf-8"))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("password is expected as a str")
|
|
||||||
|
|
||||||
# db
|
|
||||||
try:
|
|
||||||
_db = ctypes.c_char_p(
|
|
||||||
db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("db is expected as a str")
|
|
||||||
|
|
||||||
# port
|
|
||||||
try:
|
|
||||||
_port = ctypes.c_int(port)
|
|
||||||
except TypeError:
|
|
||||||
raise TypeError("port is expected as an int")
|
|
||||||
|
|
||||||
connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
|
|
||||||
_host, _user, _password, _db, _port))
|
|
||||||
|
|
||||||
if connection.value is None:
|
|
||||||
print('connect to TDengine failed')
|
|
||||||
raise ConnectionError("connect to TDengine failed")
|
|
||||||
# sys.exit(1)
|
|
||||||
# else:
|
|
||||||
# print('connect to TDengine success')
|
|
||||||
|
|
||||||
return connection
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def close(connection):
|
|
||||||
'''Close the TDengine handle
|
|
||||||
'''
|
|
||||||
CTaosInterface.libtaos.taos_close(connection)
|
|
||||||
#print('connection is closed')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def query(connection, sql):
|
|
||||||
'''Run SQL
|
|
||||||
|
|
||||||
@sql: str, sql string to run
|
|
||||||
|
|
||||||
@rtype: 0 on success and -1 on failure
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
return CTaosInterface.libtaos.taos_query(
|
|
||||||
connection, ctypes.c_char_p(sql.encode('utf-8')))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("sql is expected as a string")
|
|
||||||
# finally:
|
|
||||||
# CTaosInterface.libtaos.close(connection)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def affectedRows(result):
|
|
||||||
"""The affected rows after runing query
|
|
||||||
"""
|
|
||||||
return CTaosInterface.libtaos.taos_affected_rows(result)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def subscribe(connection, restart, topic, sql, interval):
|
|
||||||
"""Create a subscription
|
|
||||||
@restart boolean,
|
|
||||||
@sql string, sql statement for data query, must be a 'select' statement.
|
|
||||||
@topic string, name of this subscription
|
|
||||||
"""
|
|
||||||
return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
|
|
||||||
connection,
|
|
||||||
1 if restart else 0,
|
|
||||||
ctypes.c_char_p(topic.encode('utf-8')),
|
|
||||||
ctypes.c_char_p(sql.encode('utf-8')),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
interval))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def consume(sub):
|
|
||||||
"""Consume data of a subscription
|
|
||||||
"""
|
|
||||||
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
|
|
||||||
fields = []
|
|
||||||
pfields = CTaosInterface.fetchFields(result)
|
|
||||||
for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
|
|
||||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
|
||||||
'bytes': pfields[i].bytes,
|
|
||||||
'type': ord(pfields[i].type)})
|
|
||||||
return result, fields
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def unsubscribe(sub, keepProgress):
|
|
||||||
"""Cancel a subscription
|
|
||||||
"""
|
|
||||||
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def useResult(result):
|
|
||||||
'''Use result after calling self.query
|
|
||||||
'''
|
|
||||||
fields = []
|
|
||||||
pfields = CTaosInterface.fetchFields(result)
|
|
||||||
for i in range(CTaosInterface.fieldsCount(result)):
|
|
||||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
|
||||||
'bytes': pfields[i].bytes,
|
|
||||||
'type': ord(pfields[i].type)})
|
|
||||||
|
|
||||||
return fields
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fetchBlock(result, fields):
|
|
||||||
pblock = ctypes.c_void_p(0)
|
|
||||||
num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
|
|
||||||
result, ctypes.byref(pblock))
|
|
||||||
if num_of_rows == 0:
|
|
||||||
return None, 0
|
|
||||||
isMicro = (CTaosInterface.libtaos.taos_result_precision(
|
|
||||||
result) == FieldType.C_TIMESTAMP_MICRO)
|
|
||||||
blocks = [None] * len(fields)
|
|
||||||
fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
|
|
||||||
fieldLen = [
|
|
||||||
ele for ele in ctypes.cast(
|
|
||||||
fieldL, ctypes.POINTER(
|
|
||||||
ctypes.c_int))[
|
|
||||||
:len(fields)]]
|
|
||||||
for i in range(len(fields)):
|
|
||||||
data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
|
|
||||||
if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
|
|
||||||
raise DatabaseError("Invalid data type returned from database")
|
|
||||||
blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
|
|
||||||
data, num_of_rows, fieldLen[i], isMicro)
|
|
||||||
|
|
||||||
return blocks, abs(num_of_rows)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fetchRow(result, fields):
|
|
||||||
pblock = ctypes.c_void_p(0)
|
|
||||||
pblock = CTaosInterface.libtaos.taos_fetch_row(result)
|
|
||||||
if pblock:
|
|
||||||
num_of_rows = 1
|
|
||||||
isMicro = (CTaosInterface.libtaos.taos_result_precision(
|
|
||||||
result) == FieldType.C_TIMESTAMP_MICRO)
|
|
||||||
blocks = [None] * len(fields)
|
|
||||||
fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
|
|
||||||
fieldLen = [
|
|
||||||
ele for ele in ctypes.cast(
|
|
||||||
fieldL, ctypes.POINTER(
|
|
||||||
ctypes.c_int))[
|
|
||||||
:len(fields)]]
|
|
||||||
for i in range(len(fields)):
|
|
||||||
data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
|
|
||||||
if fields[i]['type'] not in _CONVERT_FUNC:
|
|
||||||
raise DatabaseError(
|
|
||||||
"Invalid data type returned from database")
|
|
||||||
if data is None:
|
|
||||||
blocks[i] = [None]
|
|
||||||
else:
|
|
||||||
blocks[i] = _CONVERT_FUNC[fields[i]['type']](
|
|
||||||
data, num_of_rows, fieldLen[i], isMicro)
|
|
||||||
else:
|
|
||||||
return None, 0
|
|
||||||
return blocks, abs(num_of_rows)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def freeResult(result):
|
|
||||||
CTaosInterface.libtaos.taos_free_result(result)
|
|
||||||
result.value = None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fieldsCount(result):
|
|
||||||
return CTaosInterface.libtaos.taos_field_count(result)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fetchFields(result):
|
|
||||||
return CTaosInterface.libtaos.taos_fetch_fields(result)
|
|
||||||
|
|
||||||
# @staticmethod
|
|
||||||
# def fetchRow(result, fields):
|
|
||||||
# l = []
|
|
||||||
# row = CTaosInterface.libtaos.taos_fetch_row(result)
|
|
||||||
# if not row:
|
|
||||||
# return None
|
|
||||||
|
|
||||||
# for i in range(len(fields)):
|
|
||||||
# l.append(CTaosInterface.getDataValue(
|
|
||||||
# row[i], fields[i]['type'], fields[i]['bytes']))
|
|
||||||
|
|
||||||
# return tuple(l)
|
|
||||||
|
|
||||||
# @staticmethod
|
|
||||||
# def getDataValue(data, dtype, byte):
|
|
||||||
# '''
|
|
||||||
# '''
|
|
||||||
# if not data:
|
|
||||||
# return None
|
|
||||||
|
|
||||||
# if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
|
||||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
|
||||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def errno(result):
|
|
||||||
"""Return the error number.
|
|
||||||
"""
|
|
||||||
return CTaosInterface.libtaos.taos_errno(result)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def errStr(result):
|
|
||||||
"""Return the error styring
|
|
||||||
"""
|
|
||||||
return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
cinter = CTaosInterface()
|
|
||||||
conn = cinter.connect()
|
|
||||||
result = cinter.query(conn, 'show databases')
|
|
||||||
|
|
||||||
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
|
|
||||||
|
|
||||||
fields = CTaosInterface.useResult(result)
|
|
||||||
|
|
||||||
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
|
|
||||||
|
|
||||||
print(data)
|
|
||||||
|
|
||||||
cinter.freeResult(result)
|
|
||||||
cinter.close(conn)
|
|
||||||
|
|
@ -1,278 +0,0 @@
|
||||||
from .cinterface import CTaosInterface
|
|
||||||
from .error import *
|
|
||||||
from .constants import FieldType
|
|
||||||
|
|
||||||
|
|
||||||
class TDengineCursor(object):
|
|
||||||
"""Database cursor which is used to manage the context of a fetch operation.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
.description: Read-only attribute consists of 7-item sequences:
|
|
||||||
|
|
||||||
> name (mondatory)
|
|
||||||
> type_code (mondatory)
|
|
||||||
> display_size
|
|
||||||
> internal_size
|
|
||||||
> precision
|
|
||||||
> scale
|
|
||||||
> null_ok
|
|
||||||
|
|
||||||
This attribute will be None for operations that do not return rows or
|
|
||||||
if the cursor has not had an operation invoked via the .execute*() method yet.
|
|
||||||
|
|
||||||
.rowcount:This read-only attribute specifies the number of rows that the last
|
|
||||||
.execute*() produced (for DQL statements like SELECT) or affected
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, connection=None):
|
|
||||||
self._description = []
|
|
||||||
self._rowcount = -1
|
|
||||||
self._connection = None
|
|
||||||
self._result = None
|
|
||||||
self._fields = None
|
|
||||||
self._block = None
|
|
||||||
self._block_rows = -1
|
|
||||||
self._block_iter = 0
|
|
||||||
self._affected_rows = 0
|
|
||||||
self._logfile = ""
|
|
||||||
|
|
||||||
if connection is not None:
|
|
||||||
self._connection = connection
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def next(self):
|
|
||||||
if self._result is None or self._fields is None:
|
|
||||||
raise OperationalError("Invalid use of fetch iterator")
|
|
||||||
|
|
||||||
if self._block_rows <= self._block_iter:
|
|
||||||
block, self._block_rows = CTaosInterface.fetchRow(
|
|
||||||
self._result, self._fields)
|
|
||||||
if self._block_rows == 0:
|
|
||||||
raise StopIteration
|
|
||||||
self._block = list(map(tuple, zip(*block)))
|
|
||||||
self._block_iter = 0
|
|
||||||
|
|
||||||
data = self._block[self._block_iter]
|
|
||||||
self._block_iter += 1
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
@property
|
|
||||||
def description(self):
|
|
||||||
"""Return the description of the object.
|
|
||||||
"""
|
|
||||||
return self._description
|
|
||||||
|
|
||||||
@property
|
|
||||||
def rowcount(self):
|
|
||||||
"""Return the rowcount of the object
|
|
||||||
"""
|
|
||||||
return self._rowcount
|
|
||||||
|
|
||||||
@property
|
|
||||||
def affected_rows(self):
|
|
||||||
"""Return the affected_rows of the object
|
|
||||||
"""
|
|
||||||
return self._affected_rows
|
|
||||||
|
|
||||||
def callproc(self, procname, *args):
|
|
||||||
"""Call a stored database procedure with the given name.
|
|
||||||
|
|
||||||
Void functionality since no stored procedures.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def log(self, logfile):
|
|
||||||
self._logfile = logfile
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Close the cursor.
|
|
||||||
"""
|
|
||||||
if self._connection is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
self._reset_result()
|
|
||||||
self._connection = None
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def execute(self, operation, params=None):
|
|
||||||
"""Prepare and execute a database operation (query or command).
|
|
||||||
"""
|
|
||||||
if not operation:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if not self._connection:
|
|
||||||
# TODO : change the exception raised here
|
|
||||||
raise ProgrammingError("Cursor is not connected")
|
|
||||||
|
|
||||||
self._reset_result()
|
|
||||||
|
|
||||||
stmt = operation
|
|
||||||
if params is not None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# global querySeqNum
|
|
||||||
# querySeqNum += 1
|
|
||||||
# localSeqNum = querySeqNum # avoid raice condition
|
|
||||||
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
|
|
||||||
self._result = CTaosInterface.query(self._connection._conn, stmt)
|
|
||||||
# print(" << Query ({}) Exec Done".format(localSeqNum))
|
|
||||||
if (self._logfile):
|
|
||||||
with open(self._logfile, "a") as logfile:
|
|
||||||
logfile.write("%s;\n" % operation)
|
|
||||||
|
|
||||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
|
||||||
if errno == 0:
|
|
||||||
if CTaosInterface.fieldsCount(self._result) == 0:
|
|
||||||
self._affected_rows += CTaosInterface.affectedRows(
|
|
||||||
self._result)
|
|
||||||
return CTaosInterface.affectedRows(self._result)
|
|
||||||
else:
|
|
||||||
self._fields = CTaosInterface.useResult(
|
|
||||||
self._result)
|
|
||||||
return self._handle_result()
|
|
||||||
else:
|
|
||||||
raise ProgrammingError(
|
|
||||||
CTaosInterface.errStr(
|
|
||||||
self._result), errno)
|
|
||||||
|
|
||||||
def executemany(self, operation, seq_of_parameters):
|
|
||||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def fetchone(self):
|
|
||||||
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def fetchmany(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def istype(self, col, dataType):
|
|
||||||
if (dataType.upper() == "BOOL"):
|
|
||||||
if (self._description[col][1] == FieldType.C_BOOL):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "TINYINT"):
|
|
||||||
if (self._description[col][1] == FieldType.C_TINYINT):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "TINYINT UNSIGNED"):
|
|
||||||
if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "SMALLINT"):
|
|
||||||
if (self._description[col][1] == FieldType.C_SMALLINT):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "SMALLINT UNSIGNED"):
|
|
||||||
if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "INT"):
|
|
||||||
if (self._description[col][1] == FieldType.C_INT):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "INT UNSIGNED"):
|
|
||||||
if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "BIGINT"):
|
|
||||||
if (self._description[col][1] == FieldType.C_BIGINT):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "BIGINT UNSIGNED"):
|
|
||||||
if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "FLOAT"):
|
|
||||||
if (self._description[col][1] == FieldType.C_FLOAT):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "DOUBLE"):
|
|
||||||
if (self._description[col][1] == FieldType.C_DOUBLE):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "BINARY"):
|
|
||||||
if (self._description[col][1] == FieldType.C_BINARY):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "TIMESTAMP"):
|
|
||||||
if (self._description[col][1] == FieldType.C_TIMESTAMP):
|
|
||||||
return True
|
|
||||||
if (dataType.upper() == "NCHAR"):
|
|
||||||
if (self._description[col][1] == FieldType.C_NCHAR):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def fetchall_row(self):
|
|
||||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
|
||||||
"""
|
|
||||||
if self._result is None or self._fields is None:
|
|
||||||
raise OperationalError("Invalid use of fetchall")
|
|
||||||
|
|
||||||
buffer = [[] for i in range(len(self._fields))]
|
|
||||||
self._rowcount = 0
|
|
||||||
while True:
|
|
||||||
block, num_of_fields = CTaosInterface.fetchRow(
|
|
||||||
self._result, self._fields)
|
|
||||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
|
||||||
if errno != 0:
|
|
||||||
raise ProgrammingError(
|
|
||||||
CTaosInterface.errStr(
|
|
||||||
self._result), errno)
|
|
||||||
if num_of_fields == 0:
|
|
||||||
break
|
|
||||||
self._rowcount += num_of_fields
|
|
||||||
for i in range(len(self._fields)):
|
|
||||||
buffer[i].extend(block[i])
|
|
||||||
return list(map(tuple, zip(*buffer)))
|
|
||||||
|
|
||||||
def fetchall(self):
|
|
||||||
if self._result is None or self._fields is None:
|
|
||||||
raise OperationalError("Invalid use of fetchall")
|
|
||||||
|
|
||||||
buffer = [[] for i in range(len(self._fields))]
|
|
||||||
self._rowcount = 0
|
|
||||||
while True:
|
|
||||||
block, num_of_fields = CTaosInterface.fetchBlock(
|
|
||||||
self._result, self._fields)
|
|
||||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
|
||||||
if errno != 0:
|
|
||||||
raise ProgrammingError(
|
|
||||||
CTaosInterface.errStr(
|
|
||||||
self._result), errno)
|
|
||||||
if num_of_fields == 0:
|
|
||||||
break
|
|
||||||
self._rowcount += num_of_fields
|
|
||||||
for i in range(len(self._fields)):
|
|
||||||
buffer[i].extend(block[i])
|
|
||||||
return list(map(tuple, zip(*buffer)))
|
|
||||||
|
|
||||||
def nextset(self):
|
|
||||||
"""
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def setinputsize(self, sizes):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def setutputsize(self, size, column=None):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _reset_result(self):
|
|
||||||
"""Reset the result to unused version.
|
|
||||||
"""
|
|
||||||
self._description = []
|
|
||||||
self._rowcount = -1
|
|
||||||
if self._result is not None:
|
|
||||||
CTaosInterface.freeResult(self._result)
|
|
||||||
self._result = None
|
|
||||||
self._fields = None
|
|
||||||
self._block = None
|
|
||||||
self._block_rows = -1
|
|
||||||
self._block_iter = 0
|
|
||||||
self._affected_rows = 0
|
|
||||||
|
|
||||||
def _handle_result(self):
|
|
||||||
"""Handle the return result from query.
|
|
||||||
"""
|
|
||||||
self._description = []
|
|
||||||
for ele in self._fields:
|
|
||||||
self._description.append(
|
|
||||||
(ele['name'], ele['type'], None, None, None, None, False))
|
|
||||||
|
|
||||||
return self._result
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
../
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
|
||||||
|
|
||||||
This program is free software: you can use, redistribute, and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License, version 3
|
|
||||||
or later ("AGPL"), as published by the Free Software Foundation.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
# TDengine python client interface
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
import setuptools
|
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
|
||||||
long_description = fh.read()
|
|
||||||
|
|
||||||
setuptools.setup(
|
|
||||||
name="taos",
|
|
||||||
version="2.0.7",
|
|
||||||
author="Taosdata Inc.",
|
|
||||||
author_email="support@taosdata.com",
|
|
||||||
description="TDengine python client package",
|
|
||||||
long_description=long_description,
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
url="https://github.com/pypa/sampleproject",
|
|
||||||
packages=setuptools.find_packages(),
|
|
||||||
classifiers=[
|
|
||||||
"Programming Language :: Python :: 3",
|
|
||||||
"Operating System :: Linux",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
|
|
||||||
from .connection import TDengineConnection
|
|
||||||
from .cursor import TDengineCursor
|
|
||||||
|
|
||||||
# Globals
|
|
||||||
threadsafety = 0
|
|
||||||
paramstyle = 'pyformat'
|
|
||||||
|
|
||||||
__all__ = ['connection', 'cursor']
|
|
||||||
|
|
||||||
|
|
||||||
def connect(*args, **kwargs):
|
|
||||||
""" Function to return a TDengine connector object
|
|
||||||
|
|
||||||
Current supporting keyword parameters:
|
|
||||||
@dsn: Data source name as string
|
|
||||||
@user: Username as string(optional)
|
|
||||||
@password: Password as string(optional)
|
|
||||||
@host: Hostname(optional)
|
|
||||||
@database: Database name(optional)
|
|
||||||
|
|
||||||
@rtype: TDengineConnector
|
|
||||||
"""
|
|
||||||
return TDengineConnection(*args, **kwargs)
|
|
||||||
|
|
@ -1,95 +0,0 @@
|
||||||
from .cursor import TDengineCursor
|
|
||||||
from .subscription import TDengineSubscription
|
|
||||||
from .cinterface import CTaosInterface
|
|
||||||
|
|
||||||
|
|
||||||
class TDengineConnection(object):
|
|
||||||
""" TDengine connection object
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self._conn = None
|
|
||||||
self._host = None
|
|
||||||
self._user = "root"
|
|
||||||
self._password = "taosdata"
|
|
||||||
self._database = None
|
|
||||||
self._port = 0
|
|
||||||
self._config = None
|
|
||||||
self._chandle = None
|
|
||||||
|
|
||||||
self.config(**kwargs)
|
|
||||||
|
|
||||||
def config(self, **kwargs):
|
|
||||||
# host
|
|
||||||
if 'host' in kwargs:
|
|
||||||
self._host = kwargs['host']
|
|
||||||
|
|
||||||
# user
|
|
||||||
if 'user' in kwargs:
|
|
||||||
self._user = kwargs['user']
|
|
||||||
|
|
||||||
# password
|
|
||||||
if 'password' in kwargs:
|
|
||||||
self._password = kwargs['password']
|
|
||||||
|
|
||||||
# database
|
|
||||||
if 'database' in kwargs:
|
|
||||||
self._database = kwargs['database']
|
|
||||||
|
|
||||||
# port
|
|
||||||
if 'port' in kwargs:
|
|
||||||
self._port = kwargs['port']
|
|
||||||
|
|
||||||
# config
|
|
||||||
if 'config' in kwargs:
|
|
||||||
self._config = kwargs['config']
|
|
||||||
|
|
||||||
self._chandle = CTaosInterface(self._config)
|
|
||||||
self._conn = self._chandle.connect(
|
|
||||||
self._host,
|
|
||||||
self._user,
|
|
||||||
self._password,
|
|
||||||
self._database,
|
|
||||||
self._port)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Close current connection.
|
|
||||||
"""
|
|
||||||
return CTaosInterface.close(self._conn)
|
|
||||||
|
|
||||||
def subscribe(self, restart, topic, sql, interval):
|
|
||||||
"""Create a subscription.
|
|
||||||
"""
|
|
||||||
if self._conn is None:
|
|
||||||
return None
|
|
||||||
sub = CTaosInterface.subscribe(
|
|
||||||
self._conn, restart, topic, sql, interval)
|
|
||||||
return TDengineSubscription(sub)
|
|
||||||
|
|
||||||
def cursor(self):
|
|
||||||
"""Return a new Cursor object using the connection.
|
|
||||||
"""
|
|
||||||
return TDengineCursor(self)
|
|
||||||
|
|
||||||
def commit(self):
|
|
||||||
"""Commit any pending transaction to the database.
|
|
||||||
|
|
||||||
Since TDengine do not support transactions, the implement is void functionality.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def rollback(self):
|
|
||||||
"""Void functionality
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def clear_result_set(self):
|
|
||||||
"""Clear unused result set on this connection.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
conn = TDengineConnection(host='192.168.1.107')
|
|
||||||
conn.close()
|
|
||||||
print("Hello world")
|
|
||||||
|
|
@ -1,42 +0,0 @@
|
||||||
"""Constants in TDengine python
|
|
||||||
"""
|
|
||||||
|
|
||||||
from .dbapi import *
|
|
||||||
|
|
||||||
|
|
||||||
class FieldType(object):
|
|
||||||
"""TDengine Field Types
|
|
||||||
"""
|
|
||||||
# type_code
|
|
||||||
C_NULL = 0
|
|
||||||
C_BOOL = 1
|
|
||||||
C_TINYINT = 2
|
|
||||||
C_SMALLINT = 3
|
|
||||||
C_INT = 4
|
|
||||||
C_BIGINT = 5
|
|
||||||
C_FLOAT = 6
|
|
||||||
C_DOUBLE = 7
|
|
||||||
C_BINARY = 8
|
|
||||||
C_TIMESTAMP = 9
|
|
||||||
C_NCHAR = 10
|
|
||||||
C_TINYINT_UNSIGNED = 11
|
|
||||||
C_SMALLINT_UNSIGNED = 12
|
|
||||||
C_INT_UNSIGNED = 13
|
|
||||||
C_BIGINT_UNSIGNED = 14
|
|
||||||
# NULL value definition
|
|
||||||
# NOTE: These values should change according to C definition in tsdb.h
|
|
||||||
C_BOOL_NULL = 0x02
|
|
||||||
C_TINYINT_NULL = -128
|
|
||||||
C_TINYINT_UNSIGNED_NULL = 255
|
|
||||||
C_SMALLINT_NULL = -32768
|
|
||||||
C_SMALLINT_UNSIGNED_NULL = 65535
|
|
||||||
C_INT_NULL = -2147483648
|
|
||||||
C_INT_UNSIGNED_NULL = 4294967295
|
|
||||||
C_BIGINT_NULL = -9223372036854775808
|
|
||||||
C_BIGINT_UNSIGNED_NULL = 18446744073709551615
|
|
||||||
C_FLOAT_NULL = float('nan')
|
|
||||||
C_DOUBLE_NULL = float('nan')
|
|
||||||
C_BINARY_NULL = bytearray([int('0xff', 16)])
|
|
||||||
# Timestamp precision definition
|
|
||||||
C_TIMESTAMP_MILLI = 0
|
|
||||||
C_TIMESTAMP_MICRO = 1
|
|
||||||
|
|
@ -1,44 +0,0 @@
|
||||||
"""Type Objects and Constructors.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import time
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
class DBAPITypeObject(object):
|
|
||||||
def __init__(self, *values):
|
|
||||||
self.values = values
|
|
||||||
|
|
||||||
def __com__(self, other):
|
|
||||||
if other in self.values:
|
|
||||||
return 0
|
|
||||||
if other < self.values:
|
|
||||||
return 1
|
|
||||||
else:
|
|
||||||
return -1
|
|
||||||
|
|
||||||
|
|
||||||
Date = datetime.date
|
|
||||||
Time = datetime.time
|
|
||||||
Timestamp = datetime.datetime
|
|
||||||
|
|
||||||
|
|
||||||
def DataFromTicks(ticks):
|
|
||||||
return Date(*time.localtime(ticks)[:3])
|
|
||||||
|
|
||||||
|
|
||||||
def TimeFromTicks(ticks):
|
|
||||||
return Time(*time.localtime(ticks)[3:6])
|
|
||||||
|
|
||||||
|
|
||||||
def TimestampFromTicks(ticks):
|
|
||||||
return Timestamp(*time.localtime(ticks)[:6])
|
|
||||||
|
|
||||||
|
|
||||||
Binary = bytes
|
|
||||||
|
|
||||||
# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
|
|
||||||
# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
|
|
||||||
# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
|
|
||||||
# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
|
|
||||||
# ROWID = DBAPITypeObject()
|
|
||||||
|
|
@ -1,66 +0,0 @@
|
||||||
"""Python exceptions
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class Error(Exception):
|
|
||||||
def __init__(self, msg=None, errno=None):
|
|
||||||
self.msg = msg
|
|
||||||
self._full_msg = self.msg
|
|
||||||
self.errno = errno
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self._full_msg
|
|
||||||
|
|
||||||
|
|
||||||
class Warning(Exception):
|
|
||||||
"""Exception raised for important warnings like data truncations while inserting.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class InterfaceError(Error):
|
|
||||||
"""Exception raised for errors that are related to the database interface rather than the database itself.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseError(Error):
|
|
||||||
"""Exception raised for errors that are related to the database.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class DataError(DatabaseError):
|
|
||||||
"""Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class OperationalError(DatabaseError):
|
|
||||||
"""Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class IntegrityError(DatabaseError):
|
|
||||||
"""Exception raised when the relational integrity of the database is affected.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class InternalError(DatabaseError):
|
|
||||||
"""Exception raised when the database encounters an internal error.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ProgrammingError(DatabaseError):
|
|
||||||
"""Exception raised for programming errors.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class NotSupportedError(DatabaseError):
|
|
||||||
"""Exception raised in case a method or database API was used which is not supported by the database,.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
@ -1,57 +0,0 @@
|
||||||
from .cinterface import CTaosInterface
|
|
||||||
from .error import *
|
|
||||||
|
|
||||||
|
|
||||||
class TDengineSubscription(object):
|
|
||||||
"""TDengine subscription object
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, sub):
|
|
||||||
self._sub = sub
|
|
||||||
|
|
||||||
def consume(self):
|
|
||||||
"""Consume rows of a subscription
|
|
||||||
"""
|
|
||||||
if self._sub is None:
|
|
||||||
raise OperationalError("Invalid use of consume")
|
|
||||||
|
|
||||||
result, fields = CTaosInterface.consume(self._sub)
|
|
||||||
buffer = [[] for i in range(len(fields))]
|
|
||||||
while True:
|
|
||||||
block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
|
|
||||||
if num_of_fields == 0:
|
|
||||||
break
|
|
||||||
for i in range(len(fields)):
|
|
||||||
buffer[i].extend(block[i])
|
|
||||||
|
|
||||||
self.fields = fields
|
|
||||||
return list(map(tuple, zip(*buffer)))
|
|
||||||
|
|
||||||
def close(self, keepProgress=True):
|
|
||||||
"""Close the Subscription.
|
|
||||||
"""
|
|
||||||
if self._sub is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
CTaosInterface.unsubscribe(self._sub, keepProgress)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
from .connection import TDengineConnection
|
|
||||||
conn = TDengineConnection(
|
|
||||||
host="127.0.0.1",
|
|
||||||
user="root",
|
|
||||||
password="taosdata",
|
|
||||||
database="test")
|
|
||||||
|
|
||||||
# Generate a cursor object to run SQL commands
|
|
||||||
sub = conn.subscribe(True, "test", "select * from meters;", 1000)
|
|
||||||
|
|
||||||
for i in range(0, 10):
|
|
||||||
data = sub.consume()
|
|
||||||
for d in data:
|
|
||||||
print(d)
|
|
||||||
|
|
||||||
sub.close()
|
|
||||||
conn.close()
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
../
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
|
||||||
|
|
||||||
This program is free software: you can use, redistribute, and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License, version 3
|
|
||||||
or later ("AGPL"), as published by the Free Software Foundation.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
# TDengine python client interface
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
import setuptools
|
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
|
||||||
long_description = fh.read()
|
|
||||||
|
|
||||||
setuptools.setup(
|
|
||||||
name="taos",
|
|
||||||
version="2.0.7",
|
|
||||||
author="Taosdata Inc.",
|
|
||||||
author_email="support@taosdata.com",
|
|
||||||
description="TDengine python client package",
|
|
||||||
long_description=long_description,
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
url="https://github.com/pypa/sampleproject",
|
|
||||||
packages=setuptools.find_packages(),
|
|
||||||
classifiers=[
|
|
||||||
"Programming Language :: Python :: 3",
|
|
||||||
"Operating System :: MacOS X",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
|
|
||||||
from .connection import TDengineConnection
|
|
||||||
from .cursor import TDengineCursor
|
|
||||||
|
|
||||||
# Globals
|
|
||||||
threadsafety = 0
|
|
||||||
paramstyle = 'pyformat'
|
|
||||||
|
|
||||||
__all__ = ['connection', 'cursor']
|
|
||||||
|
|
||||||
|
|
||||||
def connect(*args, **kwargs):
|
|
||||||
""" Function to return a TDengine connector object
|
|
||||||
|
|
||||||
Current supporting keyword parameters:
|
|
||||||
@dsn: Data source name as string
|
|
||||||
@user: Username as string(optional)
|
|
||||||
@password: Password as string(optional)
|
|
||||||
@host: Hostname(optional)
|
|
||||||
@database: Database name(optional)
|
|
||||||
|
|
||||||
@rtype: TDengineConnector
|
|
||||||
"""
|
|
||||||
return TDengineConnection(*args, **kwargs)
|
|
||||||
|
|
@ -1,642 +0,0 @@
|
||||||
import ctypes
|
|
||||||
from .constants import FieldType
|
|
||||||
from .error import *
|
|
||||||
import math
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_millisecond_to_datetime(milli):
|
|
||||||
return datetime.datetime.fromtimestamp(milli / 1000.0)
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_microsecond_to_datetime(micro):
|
|
||||||
return datetime.datetime.fromtimestamp(micro / 1000000.0)
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C bool row to python row
|
|
||||||
"""
|
|
||||||
_timestamp_converter = _convert_millisecond_to_datetime
|
|
||||||
if micro:
|
|
||||||
_timestamp_converter = _convert_microsecond_to_datetime
|
|
||||||
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return list(map(_timestamp_converter, ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
|
||||||
else:
|
|
||||||
return list(map(_timestamp_converter, ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C bool row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_byte))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_bool))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C tinyint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_tinyint_unsigned_to_python(
|
|
||||||
data,
|
|
||||||
num_of_rows,
|
|
||||||
nbytes=None,
|
|
||||||
micro=False):
|
|
||||||
"""Function to convert C tinyint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ubyte))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ubyte))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C smallint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_smallint_unsigned_to_python(
|
|
||||||
data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C smallint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ushort))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_ushort))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C int row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C int row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C bigint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_bigint_unsigned_to_python(
|
|
||||||
data,
|
|
||||||
num_of_rows,
|
|
||||||
nbytes=None,
|
|
||||||
micro=False):
|
|
||||||
"""Function to convert C bigint row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint64))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [
|
|
||||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(
|
|
||||||
ctypes.c_uint64))[
|
|
||||||
:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C float row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C double row to python row
|
|
||||||
"""
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if math.isnan(ele) else ele for ele in ctypes.cast(
|
|
||||||
data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C binary row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
if num_of_rows > 0:
|
|
||||||
return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
|
|
||||||
'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
|
|
||||||
else:
|
|
||||||
return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
|
|
||||||
'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C nchar row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
res = []
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
if num_of_rows >= 0:
|
|
||||||
tmpstr = ctypes.c_char_p(data)
|
|
||||||
res.append(tmpstr.value.decode())
|
|
||||||
else:
|
|
||||||
res.append((ctypes.cast(data + nbytes * i,
|
|
||||||
ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C binary row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
res = []
|
|
||||||
if num_of_rows > 0:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
rbyte = ctypes.cast(
|
|
||||||
data + nbytes * i,
|
|
||||||
ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:1].pop()
|
|
||||||
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
|
|
||||||
res.append(tmpstr.value.decode()[0:rbyte])
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
else:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
rbyte = ctypes.cast(
|
|
||||||
data + nbytes * i,
|
|
||||||
ctypes.POINTER(
|
|
||||||
ctypes.c_short))[
|
|
||||||
:1].pop()
|
|
||||||
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
|
|
||||||
res.append(tmpstr.value.decode()[0:rbyte])
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
|
|
||||||
"""Function to convert C nchar row to python row
|
|
||||||
"""
|
|
||||||
assert(nbytes is not None)
|
|
||||||
res = []
|
|
||||||
if num_of_rows >= 0:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
|
|
||||||
res.append(tmpstr.value.decode())
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
else:
|
|
||||||
for i in range(abs(num_of_rows)):
|
|
||||||
try:
|
|
||||||
res.append((ctypes.cast(data + nbytes * i + 2,
|
|
||||||
ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
|
|
||||||
except ValueError:
|
|
||||||
res.append(None)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
_CONVERT_FUNC = {
|
|
||||||
FieldType.C_BOOL: _crow_bool_to_python,
|
|
||||||
FieldType.C_TINYINT: _crow_tinyint_to_python,
|
|
||||||
FieldType.C_SMALLINT: _crow_smallint_to_python,
|
|
||||||
FieldType.C_INT: _crow_int_to_python,
|
|
||||||
FieldType.C_BIGINT: _crow_bigint_to_python,
|
|
||||||
FieldType.C_FLOAT: _crow_float_to_python,
|
|
||||||
FieldType.C_DOUBLE: _crow_double_to_python,
|
|
||||||
FieldType.C_BINARY: _crow_binary_to_python,
|
|
||||||
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
|
|
||||||
FieldType.C_NCHAR: _crow_nchar_to_python,
|
|
||||||
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
|
|
||||||
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
|
|
||||||
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
|
|
||||||
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
|
|
||||||
}
|
|
||||||
|
|
||||||
_CONVERT_FUNC_BLOCK = {
|
|
||||||
FieldType.C_BOOL: _crow_bool_to_python,
|
|
||||||
FieldType.C_TINYINT: _crow_tinyint_to_python,
|
|
||||||
FieldType.C_SMALLINT: _crow_smallint_to_python,
|
|
||||||
FieldType.C_INT: _crow_int_to_python,
|
|
||||||
FieldType.C_BIGINT: _crow_bigint_to_python,
|
|
||||||
FieldType.C_FLOAT: _crow_float_to_python,
|
|
||||||
FieldType.C_DOUBLE: _crow_double_to_python,
|
|
||||||
FieldType.C_BINARY: _crow_binary_to_python_block,
|
|
||||||
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
|
|
||||||
FieldType.C_NCHAR: _crow_nchar_to_python_block,
|
|
||||||
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
|
|
||||||
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
|
|
||||||
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
|
|
||||||
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
|
|
||||||
}
|
|
||||||
|
|
||||||
# Corresponding TAOS_FIELD structure in C
|
|
||||||
|
|
||||||
|
|
||||||
class TaosField(ctypes.Structure):
|
|
||||||
_fields_ = [('name', ctypes.c_char * 65),
|
|
||||||
('type', ctypes.c_char),
|
|
||||||
('bytes', ctypes.c_short)]
|
|
||||||
|
|
||||||
# C interface class
|
|
||||||
|
|
||||||
|
|
||||||
class CTaosInterface(object):
|
|
||||||
|
|
||||||
libtaos = ctypes.CDLL('libtaos.dylib')
|
|
||||||
|
|
||||||
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
|
|
||||||
libtaos.taos_init.restype = None
|
|
||||||
libtaos.taos_connect.restype = ctypes.c_void_p
|
|
||||||
#libtaos.taos_use_result.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
|
|
||||||
libtaos.taos_errstr.restype = ctypes.c_char_p
|
|
||||||
libtaos.taos_subscribe.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_consume.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
|
|
||||||
libtaos.taos_free_result.restype = None
|
|
||||||
libtaos.taos_errno.restype = ctypes.c_int
|
|
||||||
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
|
|
||||||
|
|
||||||
def __init__(self, config=None):
|
|
||||||
'''
|
|
||||||
Function to initialize the class
|
|
||||||
@host : str, hostname to connect
|
|
||||||
@user : str, username to connect to server
|
|
||||||
@password : str, password to connect to server
|
|
||||||
@db : str, default db to use when log in
|
|
||||||
@config : str, config directory
|
|
||||||
|
|
||||||
@rtype : None
|
|
||||||
'''
|
|
||||||
if config is None:
|
|
||||||
self._config = ctypes.c_char_p(None)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
self._config = ctypes.c_char_p(config.encode('utf-8'))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("config is expected as a str")
|
|
||||||
|
|
||||||
if config is not None:
|
|
||||||
CTaosInterface.libtaos.taos_options(3, self._config)
|
|
||||||
|
|
||||||
CTaosInterface.libtaos.taos_init()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def config(self):
|
|
||||||
""" Get current config
|
|
||||||
"""
|
|
||||||
return self._config
|
|
||||||
|
|
||||||
def connect(
|
|
||||||
self,
|
|
||||||
host=None,
|
|
||||||
user="root",
|
|
||||||
password="taosdata",
|
|
||||||
db=None,
|
|
||||||
port=0):
|
|
||||||
'''
|
|
||||||
Function to connect to server
|
|
||||||
|
|
||||||
@rtype: c_void_p, TDengine handle
|
|
||||||
'''
|
|
||||||
# host
|
|
||||||
try:
|
|
||||||
_host = ctypes.c_char_p(host.encode(
|
|
||||||
"utf-8")) if host is not None else ctypes.c_char_p(None)
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("host is expected as a str")
|
|
||||||
|
|
||||||
# user
|
|
||||||
try:
|
|
||||||
_user = ctypes.c_char_p(user.encode("utf-8"))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("user is expected as a str")
|
|
||||||
|
|
||||||
# password
|
|
||||||
try:
|
|
||||||
_password = ctypes.c_char_p(password.encode("utf-8"))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("password is expected as a str")
|
|
||||||
|
|
||||||
# db
|
|
||||||
try:
|
|
||||||
_db = ctypes.c_char_p(
|
|
||||||
db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("db is expected as a str")
|
|
||||||
|
|
||||||
# port
|
|
||||||
try:
|
|
||||||
_port = ctypes.c_int(port)
|
|
||||||
except TypeError:
|
|
||||||
raise TypeError("port is expected as an int")
|
|
||||||
|
|
||||||
connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
|
|
||||||
_host, _user, _password, _db, _port))
|
|
||||||
|
|
||||||
if connection.value is None:
|
|
||||||
print('connect to TDengine failed')
|
|
||||||
raise ConnectionError("connect to TDengine failed")
|
|
||||||
# sys.exit(1)
|
|
||||||
# else:
|
|
||||||
# print('connect to TDengine success')
|
|
||||||
|
|
||||||
return connection
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def close(connection):
|
|
||||||
'''Close the TDengine handle
|
|
||||||
'''
|
|
||||||
CTaosInterface.libtaos.taos_close(connection)
|
|
||||||
#print('connection is closed')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def query(connection, sql):
|
|
||||||
'''Run SQL
|
|
||||||
|
|
||||||
@sql: str, sql string to run
|
|
||||||
|
|
||||||
@rtype: 0 on success and -1 on failure
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
return CTaosInterface.libtaos.taos_query(
|
|
||||||
connection, ctypes.c_char_p(sql.encode('utf-8')))
|
|
||||||
except AttributeError:
|
|
||||||
raise AttributeError("sql is expected as a string")
|
|
||||||
# finally:
|
|
||||||
# CTaosInterface.libtaos.close(connection)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def affectedRows(result):
|
|
||||||
"""The affected rows after runing query
|
|
||||||
"""
|
|
||||||
return CTaosInterface.libtaos.taos_affected_rows(result)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def subscribe(connection, restart, topic, sql, interval):
|
|
||||||
"""Create a subscription
|
|
||||||
@restart boolean,
|
|
||||||
@sql string, sql statement for data query, must be a 'select' statement.
|
|
||||||
@topic string, name of this subscription
|
|
||||||
"""
|
|
||||||
return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
|
|
||||||
connection,
|
|
||||||
1 if restart else 0,
|
|
||||||
ctypes.c_char_p(topic.encode('utf-8')),
|
|
||||||
ctypes.c_char_p(sql.encode('utf-8')),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
interval))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def consume(sub):
|
|
||||||
"""Consume data of a subscription
|
|
||||||
"""
|
|
||||||
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
|
|
||||||
fields = []
|
|
||||||
pfields = CTaosInterface.fetchFields(result)
|
|
||||||
for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
|
|
||||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
|
||||||
'bytes': pfields[i].bytes,
|
|
||||||
'type': ord(pfields[i].type)})
|
|
||||||
return result, fields
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def unsubscribe(sub, keepProgress):
|
|
||||||
"""Cancel a subscription
|
|
||||||
"""
|
|
||||||
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def useResult(result):
|
|
||||||
'''Use result after calling self.query
|
|
||||||
'''
|
|
||||||
fields = []
|
|
||||||
pfields = CTaosInterface.fetchFields(result)
|
|
||||||
for i in range(CTaosInterface.fieldsCount(result)):
|
|
||||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
|
||||||
'bytes': pfields[i].bytes,
|
|
||||||
'type': ord(pfields[i].type)})
|
|
||||||
|
|
||||||
return fields
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fetchBlock(result, fields):
|
|
||||||
pblock = ctypes.c_void_p(0)
|
|
||||||
num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
|
|
||||||
result, ctypes.byref(pblock))
|
|
||||||
if num_of_rows == 0:
|
|
||||||
return None, 0
|
|
||||||
isMicro = (CTaosInterface.libtaos.taos_result_precision(
|
|
||||||
result) == FieldType.C_TIMESTAMP_MICRO)
|
|
||||||
blocks = [None] * len(fields)
|
|
||||||
fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
|
|
||||||
fieldLen = [
|
|
||||||
ele for ele in ctypes.cast(
|
|
||||||
fieldL, ctypes.POINTER(
|
|
||||||
ctypes.c_int))[
|
|
||||||
:len(fields)]]
|
|
||||||
for i in range(len(fields)):
|
|
||||||
data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
|
|
||||||
if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
|
|
||||||
raise DatabaseError("Invalid data type returned from database")
|
|
||||||
blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
|
|
||||||
data, num_of_rows, fieldLen[i], isMicro)
|
|
||||||
|
|
||||||
return blocks, abs(num_of_rows)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fetchRow(result, fields):
|
|
||||||
pblock = ctypes.c_void_p(0)
|
|
||||||
pblock = CTaosInterface.libtaos.taos_fetch_row(result)
|
|
||||||
if pblock:
|
|
||||||
num_of_rows = 1
|
|
||||||
isMicro = (CTaosInterface.libtaos.taos_result_precision(
|
|
||||||
result) == FieldType.C_TIMESTAMP_MICRO)
|
|
||||||
blocks = [None] * len(fields)
|
|
||||||
fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
|
|
||||||
fieldLen = [
|
|
||||||
ele for ele in ctypes.cast(
|
|
||||||
fieldL, ctypes.POINTER(
|
|
||||||
ctypes.c_int))[
|
|
||||||
:len(fields)]]
|
|
||||||
for i in range(len(fields)):
|
|
||||||
data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
|
|
||||||
if fields[i]['type'] not in _CONVERT_FUNC:
|
|
||||||
raise DatabaseError(
|
|
||||||
"Invalid data type returned from database")
|
|
||||||
if data is None:
|
|
||||||
blocks[i] = [None]
|
|
||||||
else:
|
|
||||||
blocks[i] = _CONVERT_FUNC[fields[i]['type']](
|
|
||||||
data, num_of_rows, fieldLen[i], isMicro)
|
|
||||||
else:
|
|
||||||
return None, 0
|
|
||||||
return blocks, abs(num_of_rows)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def freeResult(result):
|
|
||||||
CTaosInterface.libtaos.taos_free_result(result)
|
|
||||||
result.value = None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fieldsCount(result):
|
|
||||||
return CTaosInterface.libtaos.taos_field_count(result)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fetchFields(result):
|
|
||||||
return CTaosInterface.libtaos.taos_fetch_fields(result)
|
|
||||||
|
|
||||||
# @staticmethod
|
|
||||||
# def fetchRow(result, fields):
|
|
||||||
# l = []
|
|
||||||
# row = CTaosInterface.libtaos.taos_fetch_row(result)
|
|
||||||
# if not row:
|
|
||||||
# return None
|
|
||||||
|
|
||||||
# for i in range(len(fields)):
|
|
||||||
# l.append(CTaosInterface.getDataValue(
|
|
||||||
# row[i], fields[i]['type'], fields[i]['bytes']))
|
|
||||||
|
|
||||||
# return tuple(l)
|
|
||||||
|
|
||||||
# @staticmethod
|
|
||||||
# def getDataValue(data, dtype, byte):
|
|
||||||
# '''
|
|
||||||
# '''
|
|
||||||
# if not data:
|
|
||||||
# return None
|
|
||||||
|
|
||||||
# if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
|
||||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
|
||||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
|
||||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
|
||||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def errno(result):
|
|
||||||
"""Return the error number.
|
|
||||||
"""
|
|
||||||
return CTaosInterface.libtaos.taos_errno(result)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def errStr(result):
|
|
||||||
"""Return the error styring
|
|
||||||
"""
|
|
||||||
return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
cinter = CTaosInterface()
|
|
||||||
conn = cinter.connect()
|
|
||||||
result = cinter.query(conn, 'show databases')
|
|
||||||
|
|
||||||
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
|
|
||||||
|
|
||||||
fields = CTaosInterface.useResult(result)
|
|
||||||
|
|
||||||
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
|
|
||||||
|
|
||||||
print(data)
|
|
||||||
|
|
||||||
cinter.freeResult(result)
|
|
||||||
cinter.close(conn)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue