[TD-5797]<fix> merge develop
This commit is contained in:
commit
90aba21f27
124
.drone.yml
124
.drone.yml
|
@ -15,7 +15,7 @@ steps:
|
|||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -25,7 +25,32 @@ steps:
|
|||
- master
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64
|
||||
name: test_arm64_bionic
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_focal
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
|
@ -33,14 +58,15 @@ platform:
|
|||
|
||||
steps:
|
||||
- name: build
|
||||
image: gcc
|
||||
image: arm64v8/ubuntu:focal
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
- apt-get update
|
||||
- apt-get install -y -qq cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -48,9 +74,60 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm
|
||||
name: test_arm64_centos7
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/centos:7
|
||||
commands:
|
||||
- yum install -y gcc gcc-c++ make cmake git
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_centos8
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/centos:8
|
||||
commands:
|
||||
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm_bionic
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
|
@ -65,7 +142,7 @@ steps:
|
|||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch32 > /dev/null
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -73,7 +150,6 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_trusty
|
||||
|
@ -92,7 +168,7 @@ steps:
|
|||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -117,7 +193,7 @@ steps:
|
|||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -142,7 +218,7 @@ steps:
|
|||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -165,7 +241,7 @@ steps:
|
|||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
|
@ -174,25 +250,3 @@ steps:
|
|||
- develop
|
||||
- master
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
name: goodbye
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: 64-bit
|
||||
image: alpine
|
||||
commands:
|
||||
- echo 64-bit is good.
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
|
||||
|
||||
depends_on:
|
||||
- test_arm64
|
||||
- test_amd64
|
|
@ -5,7 +5,7 @@ node {
|
|||
git url: 'https://github.com/taosdata/TDengine.git'
|
||||
}
|
||||
|
||||
def skipstage=0
|
||||
def skipbuild=0
|
||||
|
||||
def abortPreviousBuilds() {
|
||||
def currentJobName = env.JOB_NAME
|
||||
|
@ -33,8 +33,7 @@ def abort_previous(){
|
|||
milestone(buildNumber)
|
||||
}
|
||||
def pre_test(){
|
||||
|
||||
|
||||
sh'hostname'
|
||||
sh '''
|
||||
sudo rmtaos || echo "taosd has not installed"
|
||||
'''
|
||||
|
@ -52,12 +51,18 @@ def pre_test(){
|
|||
git checkout master
|
||||
'''
|
||||
}
|
||||
else {
|
||||
else if(env.CHANGE_TARGET == '2.0'){
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git checkout 2.0
|
||||
'''
|
||||
}
|
||||
else{
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git checkout develop
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
sh'''
|
||||
cd ${WKC}
|
||||
|
@ -75,7 +80,13 @@ def pre_test(){
|
|||
git checkout master
|
||||
'''
|
||||
}
|
||||
else {
|
||||
else if(env.CHANGE_TARGET == '2.0'){
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout 2.0
|
||||
'''
|
||||
}
|
||||
else{
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout develop
|
||||
|
@ -95,19 +106,17 @@ def pre_test(){
|
|||
make > /dev/null
|
||||
make install > /dev/null
|
||||
cd ${WKC}/tests
|
||||
pip3 install ${WKC}/src/connector/python
|
||||
pip3 install ${WKC}/src/connector/python/
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
environment{
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('pre_build'){
|
||||
agent{label 'master'}
|
||||
|
@ -123,19 +132,22 @@ pipeline {
|
|||
rm -rf ${WORKSPACE}.tes
|
||||
cp -r ${WORKSPACE} ${WORKSPACE}.tes
|
||||
cd ${WORKSPACE}.tes
|
||||
|
||||
git fetch
|
||||
'''
|
||||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
sh '''
|
||||
git checkout master
|
||||
git pull origin master
|
||||
'''
|
||||
}
|
||||
else {
|
||||
else if(env.CHANGE_TARGET == '2.0'){
|
||||
sh '''
|
||||
git checkout 2.0
|
||||
'''
|
||||
}
|
||||
else{
|
||||
sh '''
|
||||
git checkout develop
|
||||
git pull origin develop
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
@ -143,32 +155,34 @@ pipeline {
|
|||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
'''
|
||||
|
||||
script{
|
||||
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
|
||||
|
||||
script{
|
||||
skipbuild='2'
|
||||
skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
|
||||
println skipbuild
|
||||
}
|
||||
println env.skipstage
|
||||
sh'''
|
||||
rm -rf ${WORKSPACE}.tes
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('Parallel test stage') {
|
||||
//only build pr
|
||||
when {
|
||||
allOf{
|
||||
changeRequest()
|
||||
expression {
|
||||
env.skipstage != 0
|
||||
expression{
|
||||
return skipbuild.trim() == '2'
|
||||
}
|
||||
}
|
||||
}
|
||||
parallel {
|
||||
stage('python_1_s1') {
|
||||
agent{label 'p1'}
|
||||
agent{label " slave1 || slave11 "}
|
||||
steps {
|
||||
|
||||
pre_test()
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
|
@ -179,11 +193,11 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('python_2_s5') {
|
||||
agent{label 'p2'}
|
||||
agent{label " slave5 || slave15 "}
|
||||
steps {
|
||||
|
||||
pre_test()
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
|
@ -193,9 +207,9 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('python_3_s6') {
|
||||
agent{label 'p3'}
|
||||
agent{label " slave6 || slave16 "}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh '''
|
||||
date
|
||||
|
@ -206,9 +220,9 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('test_b1_s2') {
|
||||
agent{label 'b1'}
|
||||
agent{label " slave2 || slave12 "}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
|
@ -217,9 +231,8 @@ pipeline {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_crash_gen_s3') {
|
||||
agent{label "b2"}
|
||||
agent{label " slave3 || slave13 "}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
|
@ -245,20 +258,18 @@ pipeline {
|
|||
./handle_taosd_val_log.sh
|
||||
'''
|
||||
}
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2fq
|
||||
date
|
||||
'''
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_valgrind_s4') {
|
||||
agent{label "b3"}
|
||||
agent{label " slave4 || slave14 "}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
|
@ -269,7 +280,7 @@ pipeline {
|
|||
./handle_val_log.sh
|
||||
'''
|
||||
}
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
|
@ -284,9 +295,9 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('test_b4_s7') {
|
||||
agent{label 'b4'}
|
||||
agent{label " slave7 || slave17 "}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh '''
|
||||
date
|
||||
|
@ -303,9 +314,9 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('test_b5_s8') {
|
||||
agent{label 'b5'}
|
||||
agent{label " slave8 || slave18 "}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh '''
|
||||
date
|
||||
|
@ -316,9 +327,9 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('test_b6_s9') {
|
||||
agent{label 'b6'}
|
||||
agent{label " slave9 || slave19 "}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh '''
|
||||
date
|
||||
|
@ -329,9 +340,9 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('test_b7_s10') {
|
||||
agent{label 'b7'}
|
||||
agent{label " slave10 || slave20 "}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 55, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh '''
|
||||
date
|
||||
|
@ -421,6 +432,5 @@ pipeline {
|
|||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package expr
|
||||
|
||||
import "testing"
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package models
|
||||
|
||||
import "time"
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.1.5.0")
|
||||
SET(TD_VER_NUMBER "2.1.6.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -126,7 +126,7 @@ taos> source <filename>;
|
|||
$ taosdemo
|
||||
```
|
||||
|
||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
||||
|
||||
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
|
||||
|
||||
|
@ -150,10 +150,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters;
|
|||
taos> select count(*) from test.meters where location="beijing";
|
||||
```
|
||||
|
||||
- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等:
|
||||
- 查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
||||
|
||||
```mysql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
|
||||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
```
|
||||
|
||||
- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
|
||||
|
|
|
@ -33,7 +33,7 @@ USE power;
|
|||
一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
|
||||
|
||||
```mysql
|
||||
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
|
||||
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
|
||||
```
|
||||
|
||||
**注意:**这一指令中的 STABLE 关键字,在 2.0.15 之前的版本中需写作 TABLE 。
|
||||
|
|
|
@ -17,7 +17,7 @@ TDengine提供的连续查询与普通流计算中的时间窗口计算具有以
|
|||
下面以智能电表场景为例介绍连续查询的具体使用方法。假设我们通过下列SQL语句创建了超级表和子表:
|
||||
|
||||
```sql
|
||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
|
||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
|
||||
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
|
||||
create table D1002 using meters tags ("Beijing.Haidian", 2);
|
||||
...
|
||||
|
|
|
@ -213,7 +213,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
|
|||
|
||||
- `int taos_result_precision(TAOS_RES *res)`
|
||||
|
||||
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。
|
||||
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。
|
||||
|
||||
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
|
||||
|
||||
|
|
|
@ -3,17 +3,17 @@
|
|||
|
||||
## <a class="anchor" id="grafana"></a>Grafana
|
||||
|
||||
TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
|
||||
TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
|
||||
|
||||
### 安装Grafana
|
||||
|
||||
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统,到Grafana官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download。
|
||||
目前 TDengine 支持 Grafana 6.2 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download。
|
||||
|
||||
### 配置Grafana
|
||||
|
||||
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
|
||||
TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。
|
||||
|
||||
以CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
|
||||
以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
|
||||
|
||||
```bash
|
||||
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# TDengine 集群安装、管理
|
||||
|
||||
多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。
|
||||
多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看《TDengine整体架构》一章。而且在安装集群之前,建议先按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。
|
||||
|
||||
集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||
|
||||
|
@ -12,7 +12,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
|||
|
||||
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】
|
||||
|
||||
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`);
|
||||
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
||||
|
||||
|
@ -23,23 +23,23 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
|||
**第四步**:检查所有数据节点,以及应用程序所在物理节点的网络设置:
|
||||
|
||||
1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查);
|
||||
2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
|
||||
2. 每个物理节点上执行`ping host`,其中host是其他物理节点的hostname,看能否ping通其它物理节点;如果不能ping通,需要检查网络设置,或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
|
||||
3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件;
|
||||
4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030
|
||||
|
||||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下:
|
||||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030,其与集群配置相关参数如下:
|
||||
|
||||
```
|
||||
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
|
||||
firstEp h1.taosdata.com:6030
|
||||
|
||||
// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本配置
|
||||
// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本项
|
||||
fqdn h1.taosdata.com
|
||||
|
||||
// 配置本数据节点的端口号,缺省是6030
|
||||
serverPort 6030
|
||||
|
||||
// 使用场景,请参考《Arbitrator的使用》的部分
|
||||
// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
||||
arbitrator ha.taosdata.com:6042
|
||||
```
|
||||
|
||||
|
@ -53,7 +53,7 @@ arbitrator ha.taosdata.com:6042
|
|||
| 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 |
|
||||
| 3 | offlineThreshold | dnode离线阈值,超过该时间将导致Dnode离线 |
|
||||
| 4 | statusInterval | dnode向mnode报告状态时长 |
|
||||
| 5 | arbitrator | 系统中裁决器的end point |
|
||||
| 5 | arbitrator | 系统中裁决器的End Point |
|
||||
| 6 | timezone | 时区 |
|
||||
| 7 | balance | 是否启动负载均衡 |
|
||||
| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
|
||||
|
@ -87,7 +87,7 @@ taos>
|
|||
|
||||
1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
|
||||
|
||||
2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
|
||||
2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令:
|
||||
|
||||
```
|
||||
CREATE DNODE "h2.taos.com:6030";
|
||||
|
@ -101,7 +101,7 @@ taos>
|
|||
SHOW DNODES;
|
||||
```
|
||||
|
||||
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
|
||||
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查:
|
||||
|
||||
- 查看该数据节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么
|
||||
- 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。
|
||||
|
@ -121,7 +121,7 @@ taos>
|
|||
|
||||
### 添加数据节点
|
||||
|
||||
执行CLI程序taos, 使用root账号登录进系统, 执行:
|
||||
执行CLI程序taos,使用root账号登录进系统,执行:
|
||||
|
||||
```
|
||||
CREATE DNODE "fqdn:port";
|
||||
|
@ -131,13 +131,13 @@ CREATE DNODE "fqdn:port";
|
|||
|
||||
### 删除数据节点
|
||||
|
||||
执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
|
||||
```
|
||||
DROP DNODE "fqdn:port";
|
||||
```mysql
|
||||
DROP DNODE "fqdn:port | dnodeID";
|
||||
```
|
||||
|
||||
其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
|
||||
通过"fqdn:port"或"dnodeID"来指定一个具体的节点都是可以的。其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号;dnodeID可以通过SHOW DNODES获得。
|
||||
|
||||
<font color=green>**【注意】**</font>
|
||||
|
||||
|
@ -147,25 +147,41 @@ DROP DNODE "fqdn:port";
|
|||
|
||||
- 一个数据节点被drop之后,其他节点都会感知到这个dnodeID的删除操作,任何集群中的节点都不会再接收此dnodeID的请求。
|
||||
|
||||
- dnodeID的是集群自动分配的,不得人工指定。它在生成时递增的,不会重复。
|
||||
- dnodeID是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
|
||||
|
||||
### 手动迁移数据节点
|
||||
|
||||
手动将某个vnode迁移到指定的dnode。
|
||||
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
|
||||
```mysql
|
||||
ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
|
||||
```
|
||||
|
||||
其中:source-dnodeId是源dnodeId,也就是待迁移的vnode所在的dnodeID;vgId可以通过SHOW VGROUPS获得,列表的第一列;dest-dnodeId是目标dnodeId。
|
||||
|
||||
<font color=green>**【注意】**</font>
|
||||
|
||||
- 只有在集群的自动负载均衡选项关闭时(balance设置为0),才允许手动迁移。
|
||||
- 只有处于正常工作状态的vnode才能被迁移:master/slave,当处于offline/unsynced/syncing状态时,是不能迁移的。
|
||||
- 迁移前,务必核实目标dnode的资源足够:CPU、内存、硬盘。
|
||||
|
||||
### 查看数据节点
|
||||
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
|
||||
```
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
```mysql
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
|
||||
它将列出集群中所有的dnode,每个dnode的ID,end_point(fqdn:port),状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
|
||||
|
||||
### 查看虚拟节点组
|
||||
|
||||
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
|
||||
```
|
||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||
```mysql
|
||||
SHOW VGROUPS;
|
||||
```
|
||||
|
||||
|
@ -173,9 +189,9 @@ SHOW VGROUPS;
|
|||
|
||||
TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。
|
||||
|
||||
vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
|
||||
vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误"more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
|
||||
|
||||
```
|
||||
```mysql
|
||||
CREATE DATABASE demo replica 3;
|
||||
```
|
||||
|
||||
|
@ -183,20 +199,19 @@ CREATE DATABASE demo replica 3;
|
|||
|
||||
一个数据节点dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。
|
||||
|
||||
因为vnode的引入,无法简单的给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
|
||||
因为vnode的引入,无法简单地给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
|
||||
|
||||
## <a class="anchor" id="mnode"></a>Mnode的高可用性
|
||||
|
||||
TDengine集群是由mnode (taosd的一个模块,管理节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
|
||||
|
||||
一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
|
||||
一个集群有多个数据节点dnode,但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
|
||||
|
||||
```
|
||||
```mysql
|
||||
SHOW MNODES;
|
||||
```
|
||||
|
||||
来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
|
||||
当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
|
||||
来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
|
||||
|
||||
为保证mnode服务的高可用性,numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的,如果numOfMnodes大于2,复制参数quorum自动设为2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。
|
||||
|
||||
|
@ -210,7 +225,7 @@ SHOW MNODES;
|
|||
- 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
|
||||
- 如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些vnode自动挪到其他节点。
|
||||
|
||||
当上述三种情况发生时,系统将启动一各个数据节点的负载计算,从而决定如何挪动。
|
||||
当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。
|
||||
|
||||
**【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。**
|
||||
|
||||
|
@ -225,7 +240,7 @@ SHOW MNODES;
|
|||
|
||||
## <a class="anchor" id="arbitrator"></a>Arbitrator的使用
|
||||
|
||||
如果副本数为偶数,当一个 vnode group 里一半 vnode 不工作时,是无法从中选出 master 的。同理,一半 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。
|
||||
如果副本数为偶数,当一个 vnode group 里一半或超过一半的 vnode 不工作时,是无法从中选出 master 的。同理,一半或超过一半的 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。
|
||||
|
||||
总之,在目前版本下,TDengine 建议在双副本环境要配置 Arbitrator,以提升系统的可用性。
|
||||
|
||||
|
@ -235,3 +250,9 @@ Arbitrator 的执行程序名为 tarbitrator。该程序对系统资源几乎没
|
|||
3. 修改每个 taosd 实例的配置文件,在 taos.cfg 里将参数 arbitrator 设置为 tarbitrator 程序所对应的 End Point。(如果该参数配置了,当副本数为偶数时,系统将自动连接配置的 Arbitrator。如果副本数为奇数,即使配置了 Arbitrator,系统也不会去建立连接。)
|
||||
4. 在配置文件中配置了的 Arbitrator,会出现在 `SHOW DNODES;` 指令的返回结果中,对应的 role 列的值会是“arb”。
|
||||
|
||||
|
||||
查看集群 Arbitrator 的状态【2.0.14.0 以后支持】
|
||||
|
||||
```mysql
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# TDengine的运营与维护
|
||||
# TDengine的运营与运维
|
||||
|
||||
## <a class="anchor" id="planning"></a>容量规划
|
||||
|
||||
|
@ -28,12 +28,28 @@ taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
|
|||
|
||||
最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
|
||||
|
||||
#### 客户端内存需求
|
||||
|
||||
客户端应用采用 taosc 客户端驱动连接服务端,会有内存需求的开销。
|
||||
|
||||
客户端的内存开销主要由写入过程中的 SQL 语句、表的元数据信息缓存、以及结构性开销构成。系统最大容纳的表数量为 N(每个通过超级表创建的表的 meta data 开销约 256 字节),最大并行写入线程数量 T,最大 SQL 语句长度 S(通常是 1 Mbytes)。由此可以进行客户端内存开销的估算(单位 MBytes):
|
||||
```
|
||||
M = (T * S * 3 + (N / 4096) + 100)
|
||||
```
|
||||
|
||||
举例如下:用户最大并发写入线程数 100,子表数总数 10,000,000,那么客户端的内存最低要求是:
|
||||
```
|
||||
100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes)
|
||||
```
|
||||
|
||||
即配置 3 GBytes 内存是最低要求。
|
||||
|
||||
### CPU 需求
|
||||
|
||||
CPU 的需求取决于如下两方面:
|
||||
|
||||
* __数据插入__ TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。
|
||||
* __查询需求__ TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。
|
||||
* **数据插入** TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。
|
||||
* **查询需求** TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。
|
||||
|
||||
因此仅对数据插入而言,CPU 是可以估算出来的,但查询所耗的计算资源无法估算。在实际运营过程中,不建议 CPU 使用率超过 50%,超过后,需要增加新的节点,以获得更多计算资源。
|
||||
|
||||
|
@ -96,51 +112,170 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
|
|||
taosd -C
|
||||
```
|
||||
|
||||
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
|
||||
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是可以工作的,一般无需设置。**注意:配置文件参数修改后,需要重启*taosd*服务,或客户端应用才能生效。**
|
||||
|
||||
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
|
||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
|
||||
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
||||
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
||||
- arbitrator:系统中裁决器的end point, 缺省值为空。
|
||||
- role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode
|
||||
- debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。
|
||||
- numOfLogLines:单个日志文件允许的最大行数。默认值:10,000,000行。
|
||||
- logKeepDays:日志文件的最长保存时间。大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳,单位为秒。默认值:0天。
|
||||
- maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。
|
||||
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。
|
||||
- stream: 是否启用连续查询(流计算功能),0表示不允许,1表示允许。 默认值:1。
|
||||
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。
|
||||
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。
|
||||
| **#** | **配置参数名称** | **内部** | **S\|C** | **单位** | **含义** | **取值范围** | **缺省值** | **备注** |
|
||||
| ----- | ----------------------- | -------- | -------- | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | |
|
||||
| 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | |
|
||||
| 3 | fqdn | | **SC** | | 数据节点的FQDN。如果习惯IP地址访问,可设置为该节点的IP地址。 | | 缺省为操作系统配置的第一个hostname。 | 这个参数值的长度需要控制在 96 个字符以内。 |
|
||||
| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041。 |
|
||||
| 5 | logDir | | **SC** | | 日志文件目录,客户端和服务器的运行日志将写入该目录 | | /var/log/taos | |
|
||||
| 6 | scriptDir | YES | **S** | | | | | |
|
||||
| 7 | dataDir | | **S** | | 数据文件目录,所有的数据文件都将写入该目录 | | /var/lib/taos | |
|
||||
| 8 | arbitrator | | **S** | | 系统中裁决器的end point | | 空 | |
|
||||
| 9 | numOfThreadsPerCore | | **SC** | | 每个CPU核生成的队列消费者线程数量 | | 1.0 | |
|
||||
| 10 | ratioOfQueryThreads | | **S** | | 设置查询线程的最大数量 | 0:表示只有1个查询线程;1:表示最大和CPU核数相等的查询线程;2:表示最大建立2倍CPU核数的查询线程。 | 1 | 该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 |
|
||||
| 11 | numOfMnodes | | **S** | | 系统中管理节点个数 | | 3 | |
|
||||
| 12 | vnodeBak | | **S** | | 删除vnode时是否备份vnode目录 | 0:否,1:是 | 1 | |
|
||||
| 13 | telemetryRePorting | | **S** | | 是否允许 TDengine 采集和上报基本使用信息 | 0:不允许;1:允许 | 1 | |
|
||||
| 14 | balance | | **S** | | 是否启动负载均衡 | 0,1 | 1 | |
|
||||
| 15 | balanceInterval | YES | **S** | 秒 | 管理节点在正常运行状态下,检查负载均衡的时间间隔 | 1-30000 | 300 | |
|
||||
| 16 | role | | **S** | | dnode的可选角色 | 0:any(既可作为mnode,也可分配vnode);1:mgmt(只能作为mnode,不能分配vnode);2:dnode(不能作为mnode,只能分配vnode) | 0 | |
|
||||
| 17 | maxTmerCtrl | | **SC** | 个 | 定时器个数 | 8-2048 | 512 | |
|
||||
| 18 | monitorInterval | | **S** | 秒 | 监控数据库记录系统参数(CPU/内存)的时间间隔 | 1-600 | 30 | |
|
||||
| 19 | offlineThreshold | | **S** | 秒 | dnode离线阈值,超过该时间将导致dnode离线 | 5-7200000 | 86400*10(10天) | |
|
||||
| 20 | rpcTimer | | **SC** | 毫秒 | rpc重试时长 | 100-3000 | 300 | |
|
||||
| 21 | rpcMaxTime | | **SC** | 秒 | rpc等待应答最大时长 | 100-7200 | 600 | |
|
||||
| 22 | statusInterval | | **S** | 秒 | dnode向mnode报告状态间隔 | 1-10 | 1 | |
|
||||
| 23 | shellActivityTimer | | **SC** | 秒 | shell客户端向mnode发送心跳间隔 | 1-120 | 3 | |
|
||||
| 24 | tableMetaKeepTimer | | **S** | 秒 | 表的元数据cache时长 | 1-8640000 | 7200 | |
|
||||
| 25 | minSlidingTime | | **S** | 毫秒 | 最小滑动窗口时长 | 10-1000000 | 10 | 支持us补值后,这个值就是1us了。 |
|
||||
| 26 | minIntervalTime | | **S** | 毫秒 | 时间窗口最小值 | 1-1000000 | 10 | |
|
||||
| 27 | stream | | **S** | | 是否启用连续查询(流计算功能) | 0:不允许;1:允许 | 1 | |
|
||||
| 28 | maxStreamCompDelay | | **S** | 毫秒 | 连续查询启动最大延迟 | 10-1000000000 | 20000 | 为避免多个stream同时执行占用太多系统资源,程序中对stream的执行时间人为增加了一些随机的延时。maxFirstStreamCompDelay 是stream第一次执行前最少要等待的时间。streamCompDelayRatio 是延迟时间的计算系数,它乘以查询的 interval 后为延迟时间基准。maxStreamCompDelay是延迟时间基准的上限。实际延迟时间为一个不超过延迟时间基准的随机值。stream某次计算失败后需要重试,retryStreamCompDelay是重试的等待时间基准。实际重试等待时间为不超过等待时间基准的随机值。 |
|
||||
| 29 | maxFirstStreamCompDelay | | **S** | 毫秒 | 第一次连续查询启动最大延迟 | 10-1000000000 | 10000 | |
|
||||
| 30 | retryStreamCompDelay | | **S** | 毫秒 | 连续查询重试等待间隔 | 10-1000000000 | 10 | |
|
||||
| 31 | streamCompDelayRatio | | **S** | | 连续查询的延迟时间计算系数 | 0.1-0.9 | 0.1 | |
|
||||
| 32 | maxVgroupsPerDb | | **S** | | 每个DB中 能够使用的最大vnode个数 | 0-8192 | | |
|
||||
| 33 | maxTablesPerVnode | | **S** | | 每个vnode中能够创建的最大表个数 | | 1000000 | |
|
||||
| 34 | minTablesPerVnode | YES | **S** | | 每个vnode中必须创建的最小表个数 | | 100 | |
|
||||
| 35 | tableIncStepPerVnode | YES | **S** | | 每个vnode中超过最小表数后递增步长 | | 1000 | |
|
||||
| 36 | cache | | **S** | MB | 内存块的大小 | | 16 | |
|
||||
| 37 | blocks | | **S** | | 每个vnode(tsdb)中有多少cache大小的内存块。因此一个vnode的用的内存大小粗略为(cache * blocks) | | 6 | |
|
||||
| 38 | days | | **S** | 天 | 数据文件存储数据的时间跨度 | | 10 | |
|
||||
| 39 | keep | | **S** | 天 | 数据保留的天数 | | 3650 | |
|
||||
| 40 | minRows | | **S** | | 文件块中记录的最小条数 | | 100 | |
|
||||
| 41 | maxRows | | **S** | | 文件块中记录的最大条数 | | 4096 | |
|
||||
| 42 | quorum | | **S** | | 异步写入成功所需应答之法定数 | 1-3 | 1 | |
|
||||
| 43 | comp | | **S** | | 文件压缩标志位 | 0:关闭,1:一阶段压缩,2:两阶段压缩 | 2 | |
|
||||
| 44 | walLevel | | **S** | | WAL级别 | 1:写wal, 但不执行fsync; 2:写wal, 而且执行fsync | 1 | |
|
||||
| 45 | fsync | | **S** | 毫秒 | 当wal设置为2时,执行fsync的周期 | 最小为0,表示每次写入,立即执行fsync;最大为180000(三分钟) | 3000 | |
|
||||
| 46 | replica | | **S** | | 副本个数 | 1-3 | 1 | |
|
||||
| 47 | mqttHostName | YES | **S** | | mqtt uri | | | [mqtt://username:password@hostname:1883/taos/](mqtt://username:password@hostname:1883/taos/) |
|
||||
| 48 | mqttPort | YES | **S** | | mqtt client name | | | 1883 |
|
||||
| 49 | mqttTopic | YES | **S** | | | | | /test |
|
||||
| 50 | compressMsgSize | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 | -1 | |
|
||||
| 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 65380 | |
|
||||
| 52 | maxNumOfOrderedRes | | **SC** | | 支持超级表时间排序允许的最多记录数限制 | | 10万 | |
|
||||
| 53 | timezone | | **SC** | | 时区 | | 从系统中动态获取当前的时区设置 | |
|
||||
| 54 | locale | | **SC** | | 系统区位信息及编码格式 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | |
|
||||
| 55 | charset | | **SC** | | 字符集编码 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | |
|
||||
| 56 | maxShellConns | | **S** | | 一个dnode容许的连接数 | 10-50000000 | 5000 | |
|
||||
| 57 | maxConnections | | **S** | | 一个数据库连接所容许的dnode连接数 | 1-100000 | 5000 | 实际测试下来,如果默认没有配,选 50 个 worker thread 会产生 Network unavailable |
|
||||
| 58 | minimalLogDirGB | | **SC** | GB | 当日志文件夹的磁盘大小小于该值时,停止写日志 | | 0.1 | |
|
||||
| 59 | minimalTmpDirGB | | **SC** | GB | 当日志文件夹的磁盘大小小于该值时,停止写临时文件 | | 0.1 | |
|
||||
| 60 | minimalDataDirGB | | **S** | GB | 当日志文件夹的磁盘大小小于该值时,停止写时序数据 | | 0.1 | |
|
||||
| 61 | mnodeEqualVnodeNum | | **S** | | 一个mnode等同于vnode消耗的个数 | | 4 | |
|
||||
| 62 | http | | **S** | | 服务器内部的http服务开关。 | 0:关闭http服务, 1:激活http服务。 | 1 | |
|
||||
| 63 | mqtt | YES | **S** | | 服务器内部的mqtt服务开关。 | 0:关闭mqtt服务, 1:激活mqtt服务。 | 0 | |
|
||||
| 64 | monitor | | **S** | | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。 | 0:关闭监控服务, 1:激活监控服务。 | 0 | |
|
||||
| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 |
|
||||
| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数 | | 2 | |
|
||||
| 67 | telegrafUseFieldNum | YES | | | | | | |
|
||||
| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数 | | 10240 | 最大10,000,000 |
|
||||
| 69 | numOfLogLines | | **SC** | | 单个日志文件允许的最大行数。 | | 10,000,000 | |
|
||||
| 70 | asyncLog | | **SC** | | 日志写入模式 | 0:同步、1:异步 | 1 | |
|
||||
| 71 | logKeepDays | | **SC** | 天 | 日志文件的最长保存时间 | | 0 | 大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳。 |
|
||||
| 72 | debugFlag | | **SC** | | 运行日志开关 | 131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志) | 131或135(不同模块有不同的默认值) | |
|
||||
| 73 | mDebugFlag | | **S** | | 管理模块的日志开关 | 同上 | 135 | |
|
||||
| 74 | dDebugFlag | | **SC** | | dnode模块的日志开关 | 同上 | 135 | |
|
||||
| 75 | sDebugFlag | | **SC** | | sync模块的日志开关 | 同上 | 135 | |
|
||||
| 76 | wDebugFlag | | **SC** | | wal模块的日志开关 | 同上 | 135 | |
|
||||
| 77 | sdbDebugFlag | | **SC** | | sdb模块的日志开关 | 同上 | 135 | |
|
||||
| 78 | rpcDebugFlag | | **SC** | | rpc模块的日志开关 | 同上 | | |
|
||||
| 79 | tmrDebugFlag | | **SC** | | 定时器模块的日志开关 | 同上 | | |
|
||||
| 80 | cDebugFlag | | **C** | | client模块的日志开关 | 同上 | | |
|
||||
| 81 | jniDebugFlag | | **C** | | jni模块的日志开关 | 同上 | | |
|
||||
| 82 | odbcDebugFlag | | **C** | | odbc模块的日志开关 | 同上 | | |
|
||||
| 83 | uDebugFlag | | **SC** | | 共用功能模块的日志开关 | 同上 | | |
|
||||
| 84 | httpDebugFlag | | **S** | | http模块的日志开关 | 同上 | | |
|
||||
| 85 | mqttDebugFlag | | **S** | | mqtt模块的日志开关 | 同上 | | |
|
||||
| 86 | monitorDebugFlag | | **S** | | 监控模块的日志开关 | 同上 | | |
|
||||
| 87 | qDebugFlag | | **SC** | | 查询模块的日志开关 | 同上 | | |
|
||||
| 88 | vDebugFlag | | **SC** | | vnode模块的日志开关 | 同上 | | |
|
||||
| 89 | tsdbDebugFlag | | **S** | | TSDB模块的日志开关 | 同上 | | |
|
||||
| 90 | cqDebugFlag | | **SC** | | 连续查询模块的日志开关 | 同上 | | |
|
||||
| 91 | tscEnableRecordSql | | **C** | | 是否记录客户端sql语句到文件 | 0:否,1:是 | 0 | 生成的文件(tscnote-xxxx.0/tscnote-xxx.1,xxxx是pid),与客户端日志所在目录相同。 |
|
||||
| 92 | enableCoreFile | | **SC** | | 是否开启服务crash时生成core文件 | 0:否,1:是 | 1 | 不同的启动方式,生成core文件的目录如下:1、systemctl start taosd启动:生成的core在根目录下;2、手动启动,就在taosd执行目录下。 |
|
||||
| 93 | gitinfo | YES | **SC** | | | 1 | | |
|
||||
| 94 | gitinfoofInternal | YES | **SC** | | | 2 | | |
|
||||
| 95 | Buildinfo | YES | **SC** | | | 3 | | |
|
||||
| 96 | version | YES | **SC** | | | 4 | | |
|
||||
| 97 | | | | | | | | |
|
||||
| 98 | maxBinaryDisplayWidth | | **C** | | Taos shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏 | 5 - | 30 | 实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth,则显示上限为 **字段名长度** 和 **maxBinaryDisplayWidth** 的较大者。否则,上限为 **字段名长度** 和 **字段值长度** 的较大者。可在 shell 中通过命令 set max_binary_display_width nn动态修改此选项 |
|
||||
| 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。(2.0.15 以前的版本中,此参数的单位是字节) |
|
||||
| 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 |
|
||||
| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 |
|
||||
| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
|
||||
| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | |
|
||||
| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 |
|
||||
|
||||
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||
|
||||
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值):
|
||||
|
||||
- days:一个数据文件存储数据的时间跨度。单位为天,默认值:10。
|
||||
- keep:数据库中数据保留的天数。单位为天,默认值:3650。(可通过 alter database 修改)
|
||||
- minRows:文件块中记录的最小条数。单位为条,默认值:100。
|
||||
- maxRows:文件块中记录的最大条数。单位为条,默认值:4096。
|
||||
- comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
|
||||
- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)
|
||||
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
|
||||
- cache:内存块的大小。单位为兆字节(MB),默认值:16。
|
||||
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
|
||||
- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
|
||||
- quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改)
|
||||
- precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
|
||||
- cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
|
||||
- update:是否允许更新。0:不允许;1:允许。默认值:0。
|
||||
| **#** | **配置参数名称** | **单位** | **含义** | **取值范围** | **缺省值** |
|
||||
| ----- | ---------------- | -------- | ------------------------------------------------------------ | ------------------------------------------------ | ---------- |
|
||||
| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 |
|
||||
| 2 | keep | 天 | (可通过 alter database 修改<!-- REPLACE_OPEN_TO_ENTERPRISE__KEEP_PARAM_DESCRIPTION_IN_PARAM_LIST -->)数据库中数据保留的天数。 | 3650 |
|
||||
| 3 | cache | MB | 内存块的大小 | | 16 |
|
||||
| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache * blocks)。 | | 4 |
|
||||
| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 |
|
||||
| 6 | minRows | | 文件块中记录的最小条数 | | 100 |
|
||||
| 7 | maxRows | | 文件块中记录的最大条数 | | 4096 |
|
||||
| 8 | comp | | (可通过 alter database 修改)文件压缩标志位 | 0:关闭,1:一阶段压缩,2:两阶段压缩 | 2 |
|
||||
| 9 | walLevel | | (作为 database 的参数时名为 wal;在 taos.cfg 中作为参数时需要写作 walLevel)WAL级别 | 1:写wal,但不执行fsync;2:写wal, 而且执行fsync | 1 |
|
||||
| 10 | fsync | 毫秒 | 当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。 | | 3000 |
|
||||
| 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 |
|
||||
| 12 | precision | | 时间戳精度标识(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)(从 2.1.5.0 版本开始,新增对纳秒时间精度的支持) | ms 表示毫秒,us 表示微秒,ns 表示纳秒 | ms |
|
||||
| 13 | update | | 是否允许更新 | 0:不允许;1:允许 | 0 |
|
||||
| 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能 | 0 |
|
||||
|
||||
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
|
||||
|
||||
```
|
||||
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
|
||||
```mysql
|
||||
CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1;
|
||||
```
|
||||
|
||||
该SQL创建了一个库demo, 每个数据文件存储10天数据,内存块为32兆字节,每个VNODE占用8个内存块,副本数为3,允许更新,而其他参数与系统配置完全一致。
|
||||
|
||||
一个数据库创建成功后,仅部分参数可以修改并实时生效,其余参数不能修改:
|
||||
|
||||
| **参数名** | **能否修改** | **范围** | **修改语法示例** |
|
||||
| ----------- | ------------ | ------------------------------------------------------------ | ------------------------------------- |
|
||||
| name | | | |
|
||||
| create time | | | |
|
||||
| ntables | | | |
|
||||
| vgroups | | | |
|
||||
| replica | **YES** | 在线dnode数目为1:1-1;2:1-2;>=3:1-3 | ALTER DATABASE <dbname> REPLICA *n* |
|
||||
| quorum | **YES** | 1-2 | ALTER DATABASE <dbname> QUORUM *n* |
|
||||
| days | | | |
|
||||
| keep | **YES** | days-365000 | ALTER DATABASE <dbname> KEEP *n* |
|
||||
| cache | | | |
|
||||
| blocks | **YES** | 3-1000 | ALTER DATABASE <dbname> BLOCKS *n* |
|
||||
| minrows | | | |
|
||||
| maxrows | | | |
|
||||
| wal | | | |
|
||||
| fsync | | | |
|
||||
| comp | **YES** | 0-2 | ALTER DATABASE <dbname> COMP *n* |
|
||||
| precision | | | |
|
||||
| status | | | |
|
||||
| update | | | |
|
||||
| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE <dbname> CACHELAST *n* |
|
||||
|
||||
**说明:**在 2.1.3.0 版本之前,通过 ALTER DATABASE 语句修改这些参数后,需要重启服务器才能生效。
|
||||
|
||||
TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
|
||||
|
||||
- numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。)
|
||||
|
@ -172,7 +307,7 @@ ALTER DNODE <dnode_id> <config>
|
|||
alter dnode 1 debugFlag 135;
|
||||
```
|
||||
|
||||
## <a class="anchor" id="client"></a>客户端配置
|
||||
## <a class="anchor" id="client"></a>客户端及应用驱动配置
|
||||
|
||||
TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见帮助信息 `taos --help`。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
|
||||
|
||||
|
@ -182,15 +317,15 @@ TDengine系统的前台交互客户端应用程序为taos,以及应用驱动
|
|||
taos -C 或 taos --dump-config
|
||||
```
|
||||
|
||||
客户端配置参数
|
||||
客户端及应用驱动配置参数列表及解释
|
||||
|
||||
- firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
|
||||
|
||||
- secondEp: taos 启动时,如果 firstEp 连不上,将尝试连接 secondEp。
|
||||
|
||||
- locale
|
||||
- locale:系统区位信息及编码格式。
|
||||
|
||||
默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
|
||||
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。
|
||||
|
||||
|
@ -198,9 +333,9 @@ taos -C 或 taos --dump-config
|
|||
|
||||
在 Linux 中 locale 的命名规则为: <语言>\_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
|
||||
|
||||
- charset
|
||||
- charset:字符集编码。
|
||||
|
||||
默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
|
||||
默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。
|
||||
|
||||
如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。
|
||||
|
||||
|
@ -260,7 +395,7 @@ taos -C 或 taos --dump-config
|
|||
|
||||
- maxBinaryDisplayWidth
|
||||
|
||||
Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。
|
||||
Shell中 binary 和 nchar 字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 taos shell 中通过命令 set max_binary_display_width nn 动态修改此选项。
|
||||
|
||||
## <a class="anchor" id="user"></a>用户管理
|
||||
|
||||
|
@ -315,7 +450,7 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。C
|
|||
```mysql
|
||||
insert into tb1 file 'path/data.csv';
|
||||
```
|
||||
注意:如果CSV文件首行存在描述信息,请手动删除后再导入
|
||||
**注意:如果CSV文件首行存在描述信息,请手动删除后再导入。如某列为空,填NULL,无引号。**
|
||||
|
||||
例如,现在存在一个子表d1001, 其表结构如下:
|
||||
|
||||
|
@ -343,7 +478,7 @@ taos> DESCRIBE d1001
|
|||
'2018-10-11 06:38:05.000',17.30000,219,0.32000
|
||||
'2018-10-12 06:38:05.000',18.30000,219,0.31000
|
||||
```
|
||||
那么可以用如下命令导入数据
|
||||
那么可以用如下命令导入数据:
|
||||
|
||||
```mysql
|
||||
taos> insert into d1001 file '~/data.csv';
|
||||
|
@ -360,7 +495,7 @@ TDengine提供了方便的数据库导入导出工具taosdump。用户可以将t
|
|||
|
||||
**按表导出CSV文件**
|
||||
|
||||
如果用户需要导出一个表或一个STable中的数据,可在shell中运行
|
||||
如果用户需要导出一个表或一个STable中的数据,可在taos shell中运行:
|
||||
|
||||
```mysql
|
||||
select * from <tb_name> >> data.csv;
|
||||
|
@ -370,7 +505,9 @@ select * from <tb_name> >> data.csv;
|
|||
|
||||
**用taosdump导出数据**
|
||||
|
||||
TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)
|
||||
利用taosdump,用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。
|
||||
|
||||
具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)。
|
||||
|
||||
## <a class="anchor" id="status"></a>系统连接、任务查询管理
|
||||
|
||||
|
@ -427,7 +564,7 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务
|
|||
COMPACT VNODES IN (vg_id1, vg_id2, ...)
|
||||
```
|
||||
|
||||
COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态。
|
||||
COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 2 时表示对应的 VGroup 处于排队等待进行重整的状态,值为 1 时表示正在进行碎片重整,为 0 时则表示并没有处于重整状态(未要求进行重整或已经完成重整)。
|
||||
|
||||
需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
|
||||
|
||||
|
@ -435,46 +572,100 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会
|
|||
|
||||
安装TDengine后,默认会在操作系统中生成下列目录或文件:
|
||||
|
||||
| 目录/文件 | 说明 |
|
||||
| ------------------------- | :----------------------------------------------------------- |
|
||||
| **目录/文件** | **说明** |
|
||||
| ------------------------- | ------------------------------------------------------------ |
|
||||
| /usr/local/taos/bin | TDengine可执行文件目录。其中的执行文件都会软链接到/usr/bin目录下。 |
|
||||
| /usr/local/taos/connector | TDengine各种连接器目录。 |
|
||||
| /usr/local/taos/driver | TDengine动态链接库目录。会软链接到/usr/lib目录下。 |
|
||||
| /usr/local/taos/examples | TDengine各种语言应用示例目录。 |
|
||||
| /usr/local/taos/include | TDengine对外提供的C语言接口的头文件。 |
|
||||
| /etc/taos/taos.cfg | TDengine默认[配置文件] |
|
||||
| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. |
|
||||
| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 |
|
||||
| /var/lib/taos | TDengine默认数据文件目录。可通过[配置文件]修改位置。 |
|
||||
| /var/log/taos | TDengine默认日志文件目录。可通过[配置文件]修改位置。 |
|
||||
|
||||
**可执行文件**
|
||||
|
||||
TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括:
|
||||
|
||||
- _taosd_:TDengine服务端可执行文件
|
||||
- _taos_: TDengine Shell可执行文件
|
||||
- _taosdump_:数据导入导出工具
|
||||
- remove.sh:卸载TDengine的脚本, 请谨慎执行,链接到/usr/bin目录下的rmtaos命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。
|
||||
- *taosd*:TDengine服务端可执行文件
|
||||
- *taos*:TDengine Shell可执行文件
|
||||
- *taosdump*:数据导入导出工具
|
||||
- *taosdemo*:TDengine测试工具
|
||||
- remove.sh:卸载TDengine的脚本,请谨慎执行,链接到/usr/bin目录下的**rmtaos**命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。
|
||||
|
||||
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。
|
||||
|
||||
## TDengine 的启动、停止、卸载
|
||||
|
||||
TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启动和、停止、重启操作。TDengine 的服务进程是 taosd,默认情况下 TDengine 在系统启动后将自动启动。DBA 可以通过 systemd/systemctl/service 手动操作停止、启动、重新启动服务。
|
||||
|
||||
以 systemctl 为例,命令如下:
|
||||
|
||||
- 启动服务进程:`systemctl start taosd`
|
||||
|
||||
- 停止服务进程:`systemctl stop taosd`
|
||||
|
||||
- 重启服务进程:`systemctl restart taosd`
|
||||
|
||||
- 查看服务状态:`systemctl status taosd`
|
||||
|
||||
如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
|
||||
```
|
||||
......
|
||||
|
||||
Active: active (running)
|
||||
|
||||
......
|
||||
```
|
||||
|
||||
如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
|
||||
```
|
||||
......
|
||||
|
||||
Active: inactive (dead)
|
||||
|
||||
......
|
||||
```
|
||||
|
||||
卸载 TDengine,只需要执行如下命令
|
||||
```
|
||||
rmtaos
|
||||
```
|
||||
|
||||
**警告:执行该命令后,TDengine 程序将被完全删除,务必谨慎使用。**
|
||||
|
||||
## <a class="anchor" id="keywords"></a>TDengine参数限制与保留关键字
|
||||
|
||||
**名称命名规则**
|
||||
|
||||
1. 合法字符:英文字符、数字和下划线
|
||||
2. 允许英文字符或下划线开头,不允许以数字开头
|
||||
3. 不区分大小写
|
||||
|
||||
**密码合法字符集**
|
||||
|
||||
`[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`
|
||||
|
||||
去掉了 ```‘“`\``` (单双引号、撇号、反斜杠、空格)
|
||||
|
||||
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
|
||||
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
|
||||
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
|
||||
- 表的列名:不能包含特殊字符,不能超过 64 个字符
|
||||
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
|
||||
- 表的列数:不能超过 1024 列
|
||||
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳
|
||||
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
|
||||
- 单条 SQL 语句默认最大字符串长度:65480 byte
|
||||
- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte
|
||||
- 数据库副本数:不能超过 3
|
||||
- 用户名:不能超过 23 个 byte
|
||||
- 用户密码:不能超过 15 个 byte
|
||||
- 标签(Tags)数量:不能超过 128 个
|
||||
- 标签(Tags)数量:不能超过 128 个,可以 0 个
|
||||
- 标签的总长度:不能超过 16K byte
|
||||
- 记录条数:仅受存储空间限制
|
||||
- 表的个数:仅受节点个数限制
|
||||
- 库的个数:仅受节点个数限制
|
||||
- 单个库上虚拟节点个数:不能超过 64 个
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
||||
|
||||
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
|
||||
|
||||
|
@ -519,3 +710,102 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
| CONNS | ID | NOTNULL | STABLE | WAL |
|
||||
| COPY | IF | NOW | STABLES | WHERE |
|
||||
|
||||
## 诊断及其他
|
||||
|
||||
#### 网络连接诊断
|
||||
|
||||
当出现客户端应用无法访问服务端时,需要确认客户端与服务端之间网络的各端口连通情况,以便有针对性地排除故障。
|
||||
|
||||
目前网络连接诊断支持在:Linux 与 Linux,Linux 与 Windows 之间进行诊断测试。
|
||||
|
||||
诊断步骤:
|
||||
|
||||
1. 如拟诊断的端口范围与服务器 taosd 实例的端口范围相同,须先停掉 taosd 实例
|
||||
2. 服务端命令行输入:`taos -n server -P <port>` 以服务端身份启动对端口 port 为基准端口的监听
|
||||
3. 客户端命令行输入:`taos -n client -h <fqdn of server> -P <port>` 以客户端身份启动对指定的服务器、指定的端口发送测试包
|
||||
|
||||
服务端运行正常的话会输出以下信息
|
||||
|
||||
```bash
|
||||
# taos -n server -P 6000
|
||||
12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
|
||||
|
||||
12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening
|
||||
12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening
|
||||
...
|
||||
...
|
||||
...
|
||||
12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening
|
||||
12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening
|
||||
12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening
|
||||
12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000
|
||||
12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000
|
||||
12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000
|
||||
12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000
|
||||
12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001
|
||||
12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001
|
||||
12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001
|
||||
12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001
|
||||
...
|
||||
...
|
||||
...
|
||||
12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011
|
||||
12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011
|
||||
12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011
|
||||
12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011
|
||||
```
|
||||
|
||||
客户端运行正常会输出以下信息:
|
||||
|
||||
```bash
|
||||
# taos -n client -h 172.27.0.7 -P 6000
|
||||
12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
|
||||
|
||||
12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7
|
||||
12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000
|
||||
12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000
|
||||
...
|
||||
...
|
||||
...
|
||||
12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010
|
||||
12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010
|
||||
12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011
|
||||
12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
|
||||
```
|
||||
|
||||
仔细阅读打印出来的错误信息,可以帮助管理员找到原因,以解决问题。
|
||||
|
||||
#### 启动状态及RPC诊断
|
||||
|
||||
`taos -n startup -h <fqdn of server>`
|
||||
|
||||
判断 taosd 服务端是否成功启动,是数据库管理员经常遇到的一种情形。特别当若干台服务器组成集群时,判断每个服务端实例是否成功启动就会是一个重要问题。除检索 taosd 服务端日志文件进行问题定位、分析外,还可以通过 `taos -n startup -h <fqdn of server>` 来诊断一个 taosd 进程的启动状态。
|
||||
|
||||
针对多台服务器组成的集群,当服务启动过程耗时较长时,可通过该命令行来诊断每台服务器的 taosd 实例的启动状态,以准确定位问题。
|
||||
|
||||
`taos -n rpc -h <fqdn of server>`
|
||||
|
||||
该命令用来诊断已经启动的 taosd 实例的端口是否可正常访问。如果 taosd 程序异常或者失去响应,可以通过 `taos -n rpc -h <fqdn of server>` 来发起一个与指定 fqdn 的 rpc 通信,看看 taosd 是否能收到,以此来判定是网络问题还是 taosd 程序异常问题。
|
||||
|
||||
#### sync 及 arbitrator 诊断
|
||||
|
||||
```
|
||||
taos -n sync -P 6040 -h <fqdn of server>
|
||||
taos -n sync -P 6042 -h <fqdn of server>
|
||||
```
|
||||
|
||||
用来诊断 sync 端口是否工作正常,判断服务端 sync 模块是否成功工作。另外,-P 6042 用来诊断 arbitrator 是否配置正常,判断指定服务器的 arbitrator 是否能正常工作。
|
||||
|
||||
#### 服务端日志
|
||||
|
||||
taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。
|
||||
|
||||
一旦设定为 135 或 143,日志文件增长很快,特别是写入、查询请求量较大时,增长速度惊人。如合并保存日志,很容易把日志内的关键信息(如配置信息、错误信息等)冲掉。为此,服务端将重要信息日志与其他日志分开存放:
|
||||
|
||||
- taosinfo 存放重要信息日志
|
||||
- taosdlog 存放其他日志
|
||||
|
||||
其中,taosinfo 日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。
|
||||
|
||||
taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。
|
||||
|
||||
|
|
|
@ -34,16 +34,16 @@ taos> DESCRIBE meters;
|
|||
- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
|
||||
- 内部函数 now 是客户端的当前时间
|
||||
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
|
||||
- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数)
|
||||
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
|
||||
- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。)
|
||||
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
|
||||
|
||||
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
|
||||
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度)
|
||||
|
||||
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
|
||||
|
||||
| # | **类型** | **Bytes** | **说明** |
|
||||
| ---- | :-------: | ------ | ------------------------------------------------------------ |
|
||||
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制) |
|
||||
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制)(从 2.1.5.0 版本开始支持纳秒精度) |
|
||||
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
|
||||
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
|
||||
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
|
||||
|
@ -182,7 +182,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
|||
- **批量创建数据表**
|
||||
|
||||
```mysql
|
||||
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
|
||||
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
|
||||
```
|
||||
以更快的速度批量创建大量数据表(服务器端 2.0.14 及以上版本)。
|
||||
|
||||
|
@ -206,7 +206,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
|||
|
||||
显示当前数据库下的所有数据表信息。
|
||||
|
||||
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
|
||||
说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
|
||||
|
||||
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
|
||||
|
||||
|
@ -389,7 +389,7 @@ INSERT INTO
|
|||
INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33);
|
||||
```
|
||||
**注意:**
|
||||
1)在第二个例子中,两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000,而如果是在微秒精度设置下就需要写为 1626164208000000。
|
||||
1)在第二个例子中,两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000,而如果是在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。
|
||||
2)在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为 NOW,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的实际执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
|
||||
3)允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 keep 值(数据保留的天数);允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 days 值(数据文件存储数据的时间跨度,单位为天)。keep 和 days 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。
|
||||
|
||||
|
@ -414,13 +414,13 @@ INSERT INTO
|
|||
```
|
||||
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
|
||||
```mysql
|
||||
INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
|
||||
INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
|
||||
```
|
||||
自动建表语法也支持在一条语句中向多个表插入记录。例如:
|
||||
```mysql
|
||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||
```
|
||||
**说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。
|
||||
|
||||
|
@ -435,6 +435,17 @@ INSERT INTO
|
|||
INSERT INTO d1001 FILE '/tmp/csvfile.csv';
|
||||
```
|
||||
|
||||
- **插入来自文件的数据记录,并自动建表**
|
||||
从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如:
|
||||
```mysql
|
||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
|
||||
```
|
||||
也可以在一条语句中向多个表以自动建表的方式插入记录。例如:
|
||||
```mysql
|
||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
|
||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||
```
|
||||
|
||||
**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
|
||||
|
||||
**说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。
|
||||
|
@ -942,6 +953,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
|
||||
### 选择函数
|
||||
|
||||
在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。
|
||||
|
||||
- **MIN**
|
||||
```mysql
|
||||
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
|
@ -1215,6 +1228,37 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
Query OK, 1 row(s) in set (0.001042s)
|
||||
```
|
||||
|
||||
- **INTERP**
|
||||
```mysql
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR})];
|
||||
```
|
||||
功能说明:返回表/超级表的指定时间截面、指定字段的记录。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
应用字段:所有字段。
|
||||
|
||||
适用于:**表、超级表**。
|
||||
|
||||
说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中,条件语句里面可以附带更多的筛选条件,例如标签、tbname。
|
||||
|
||||
限制:INTERP 目前不支持 FILL(NEXT)。
|
||||
|
||||
示例:
|
||||
```mysql
|
||||
taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev);
|
||||
interp(ts) | interp(f1) | interp(f2) | interp(f3) |
|
||||
====================================================================
|
||||
2017-07-14 10:42:00.005 | 5 | 9 | 6 |
|
||||
Query OK, 1 row(s) in set (0.002912s)
|
||||
|
||||
taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev);
|
||||
interp(ts) | interp(f1) | interp(f2) | interp(f3) |
|
||||
====================================================================
|
||||
2017-07-14 10:42:00.005 | 5 | 6 | 7 |
|
||||
Query OK, 1 row(s) in set (0.002005s)
|
||||
```
|
||||
|
||||
### 计算函数
|
||||
|
||||
- **DIFF**
|
||||
|
|
|
@ -32,7 +32,7 @@ Replace the database operating in the current connection with “power”, other
|
|||
An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable:
|
||||
|
||||
```mysql
|
||||
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
|
||||
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
|
||||
```
|
||||
|
||||
**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.
|
||||
|
|
|
@ -17,7 +17,7 @@ The continuous query provided by TDengine differs from the time window calculati
|
|||
The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement:
|
||||
|
||||
```sql
|
||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
|
||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
|
||||
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
|
||||
create table D1002 using meters tags ("Beijing.Haidian", 2);
|
||||
...
|
||||
|
@ -357,4 +357,4 @@ This SQL statement will obtain the last recorded voltage value of all smart mete
|
|||
|
||||
In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
|
||||
|
||||
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
|
||||
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
|
||||
|
|
|
@ -165,7 +165,7 @@ Note:
|
|||
- **Create tables in batches**
|
||||
|
||||
```mysql
|
||||
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
|
||||
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
|
||||
```
|
||||
Create a large number of data tables in batches faster. (Server side 2.0. 14 and above)
|
||||
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package dataimport
|
||||
|
||||
import (
|
||||
|
|
|
@ -144,6 +144,9 @@ keepColumnName 1
|
|||
# max length of an SQL
|
||||
# maxSQLLength 65480
|
||||
|
||||
# max length of WildCards
|
||||
# maxWildCardsLength 100
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ echo "version=${version}"
|
|||
#docker manifest rm tdengine/tdengine
|
||||
#docker manifest rm tdengine/tdengine:${version}
|
||||
if [ "$verType" == "beta" ]; then
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine-beta:latest
|
||||
docker manifest rm tdengine/tdengine-beta:latest
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
|
@ -52,6 +53,7 @@ if [ "$verType" == "beta" ]; then
|
|||
docker manifest push tdengine/tdengine-beta:${version}
|
||||
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
|
|
|
@ -35,7 +35,7 @@ fi
|
|||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/taosd
|
||||
strip ${build_dir}/bin/taos
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
|
||||
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.1.5.0'
|
||||
version: '2.1.6.0'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
@ -72,7 +72,7 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.1.5.0
|
||||
- usr/lib/libtaos.so.2.1.6.0
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
|
|
|
@ -29,15 +29,16 @@ extern "C" {
|
|||
#include "tsched.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
|
||||
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
|
||||
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE))
|
||||
|
||||
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
|
||||
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
|
||||
|
||||
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
|
||||
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
|
||||
|
||||
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
|
||||
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
|
||||
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo)))
|
||||
|
||||
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
|
||||
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
|
||||
|
||||
#pragma pack(push,1)
|
||||
|
@ -61,6 +62,7 @@ typedef struct SJoinSupporter {
|
|||
uint64_t uid; // query table uid
|
||||
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
|
||||
SArray* exprList;
|
||||
SArray* colCond;
|
||||
SFieldInfo fieldsInfo;
|
||||
STagCond tagCond;
|
||||
SGroupbyExpr groupInfo; // group by info
|
||||
|
@ -220,7 +222,7 @@ void tscExprDestroy(SArray* pExprInfo);
|
|||
|
||||
int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, SExprInfo*** pExpr, int32_t* num);
|
||||
|
||||
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
|
||||
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta, uint64_t id);
|
||||
|
||||
SColumn* tscColumnClone(const SColumn* src);
|
||||
void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
|
||||
|
@ -244,8 +246,9 @@ SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
|
|||
void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw);
|
||||
|
||||
int32_t tscTagCondCopy(STagCond* dest, const STagCond* src);
|
||||
int32_t tscColCondCopy(SArray** dest, const SArray* src, uint64_t uid, int16_t tidx);
|
||||
void tscTagCondRelease(STagCond* pCond);
|
||||
|
||||
void tscColCondRelease(SArray** pCond);
|
||||
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscShouldBeFreed(SSqlObj* pSql);
|
||||
|
@ -318,7 +321,7 @@ void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex);
|
|||
bool hasMoreVnodesToTry(SSqlObj *pSql);
|
||||
bool hasMoreClauseToTry(SSqlObj* pSql);
|
||||
|
||||
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta);
|
||||
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeCachedMeta, uint64_t id);
|
||||
|
||||
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
|
||||
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
|
||||
|
@ -340,7 +343,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
|
|||
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
|
||||
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
|
||||
uint32_t tscGetTableMetaMaxSize();
|
||||
int32_t tscCreateTableMetaFromSTableMeta(STableMeta** pChild, const char* name, size_t *tableMetaCapacity);
|
||||
int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta **ppStable);
|
||||
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
|
||||
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
|
||||
|
||||
|
@ -355,8 +358,9 @@ char* strdup_throw(const char* str);
|
|||
|
||||
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
|
||||
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
|
||||
STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx);
|
||||
|
||||
void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id);
|
||||
void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -84,9 +84,14 @@ typedef struct SParamInfo {
|
|||
} SParamInfo;
|
||||
|
||||
typedef struct SBoundColumn {
|
||||
bool hasVal; // denote if current column has bound or not
|
||||
int32_t offset; // all column offset value
|
||||
int32_t offset; // all column offset value
|
||||
int32_t toffset; // first part offset for SDataRow TODO: get offset from STSchema on future
|
||||
uint8_t valStat; // denote if current column bound or not(0 means has val, 1 means no val)
|
||||
} SBoundColumn;
|
||||
typedef enum {
|
||||
VAL_STAT_HAS = 0x0, // 0 means has val
|
||||
VAL_STAT_NONE = 0x01, // 1 means no val
|
||||
} EValStat;
|
||||
|
||||
typedef struct {
|
||||
uint16_t schemaColIdx;
|
||||
|
@ -99,32 +104,106 @@ typedef enum _COL_ORDER_STATUS {
|
|||
ORDER_STATUS_ORDERED = 1,
|
||||
ORDER_STATUS_DISORDERED = 2,
|
||||
} EOrderStatus;
|
||||
|
||||
typedef struct SParsedDataColInfo {
|
||||
int16_t numOfCols;
|
||||
int16_t numOfBound;
|
||||
int32_t * boundedColumns; // bounded column idx according to schema
|
||||
uint16_t flen; // TODO: get from STSchema
|
||||
uint16_t allNullLen; // TODO: get from STSchema
|
||||
uint16_t extendedVarLen;
|
||||
int32_t * boundedColumns; // bound column idx according to schema
|
||||
SBoundColumn * cols;
|
||||
SBoundIdxInfo *colIdxInfo;
|
||||
int8_t orderStatus; // bounded columns:
|
||||
int8_t orderStatus; // bound columns
|
||||
} SParsedDataColInfo;
|
||||
|
||||
#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED)
|
||||
#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED)
|
||||
|
||||
typedef struct {
|
||||
SSchema * pSchema;
|
||||
int16_t sversion;
|
||||
int32_t flen;
|
||||
uint16_t nCols;
|
||||
void * buf;
|
||||
void * pDataBlock;
|
||||
SSubmitBlk *pSubmitBlk;
|
||||
int32_t dataLen; // len of SDataRow
|
||||
int32_t kvLen; // len of SKVRow
|
||||
} SMemRowInfo;
|
||||
typedef struct {
|
||||
uint8_t memRowType;
|
||||
uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need
|
||||
TDRowTLenT dataRowInitLen;
|
||||
TDRowTLenT kvRowInitLen;
|
||||
SMemRowInfo *rowInfo;
|
||||
} SMemRowBuilder;
|
||||
|
||||
typedef struct {
|
||||
TDRowLenT allNullLen;
|
||||
} SMemRowHelper;
|
||||
typedef enum {
|
||||
ROW_COMPARE_UNKNOWN = 0,
|
||||
ROW_COMPARE_NEED = 1,
|
||||
ROW_COMPARE_NO_NEED = 2,
|
||||
} ERowCompareStat;
|
||||
|
||||
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
|
||||
|
||||
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
|
||||
int32_t allNullLen);
|
||||
void destroyMemRowBuilder(SMemRowBuilder *pBuilder);
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param memRowType
|
||||
* @param spd
|
||||
* @param idx the absolute bound index of columns
|
||||
* @return FORCE_INLINE
|
||||
*/
|
||||
static FORCE_INLINE void tscGetMemRowAppendInfo(SSchema *pSchema, uint8_t memRowType, SParsedDataColInfo *spd,
|
||||
int32_t idx, int32_t *toffset, int16_t *colId) {
|
||||
int32_t schemaIdx = 0;
|
||||
if (IS_DATA_COL_ORDERED(spd)) {
|
||||
schemaIdx = spd->boundedColumns[idx];
|
||||
if (isDataRowT(memRowType)) {
|
||||
*toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart
|
||||
} else {
|
||||
*toffset = idx * sizeof(SColIdx); // the offset of SColIdx
|
||||
}
|
||||
} else {
|
||||
ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx);
|
||||
schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx;
|
||||
if (isDataRowT(memRowType)) {
|
||||
*toffset = (spd->cols + schemaIdx)->toffset;
|
||||
} else {
|
||||
*toffset = ((spd->colIdxInfo + idx)->finalIdx) * sizeof(SColIdx);
|
||||
}
|
||||
}
|
||||
*colId = pSchema[schemaIdx].colId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Applicable to consume by multi-columns
|
||||
*
|
||||
* @param row
|
||||
* @param value
|
||||
* @param isCopyVarData In some scenario, the varVal is copied to row directly before calling tdAppend***ColVal()
|
||||
* @param colId
|
||||
* @param colType
|
||||
* @param idx index in SSchema
|
||||
* @param pBuilder
|
||||
* @param spd
|
||||
* @return FORCE_INLINE
|
||||
*/
|
||||
static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
|
||||
int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder,
|
||||
int32_t rowNum) {
|
||||
tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset);
|
||||
if (pBuilder->compareStat == ROW_COMPARE_NEED) {
|
||||
SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum;
|
||||
tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen);
|
||||
}
|
||||
}
|
||||
|
||||
// Applicable to consume by one row
|
||||
static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
|
||||
int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen,
|
||||
uint8_t compareStat) {
|
||||
tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset);
|
||||
if (compareStat == ROW_COMPARE_NEED) {
|
||||
tdGetColAppendDeltaLen(value, colType, dataLen, kvLen);
|
||||
}
|
||||
}
|
||||
typedef struct STableDataBlocks {
|
||||
SName tableName;
|
||||
int8_t tsSource; // where does the UNIX timestamp come from, server or client
|
||||
|
@ -146,7 +225,7 @@ typedef struct STableDataBlocks {
|
|||
uint32_t numOfAllocedParams;
|
||||
uint32_t numOfParams;
|
||||
SParamInfo * params;
|
||||
SMemRowHelper rowHelper;
|
||||
SMemRowBuilder rowBuilder;
|
||||
} STableDataBlocks;
|
||||
|
||||
typedef struct {
|
||||
|
@ -295,7 +374,7 @@ typedef struct SSqlObj {
|
|||
SSqlCmd cmd;
|
||||
SSqlRes res;
|
||||
bool isBind;
|
||||
|
||||
|
||||
SSubqueryState subState;
|
||||
struct SSqlObj **pSubs;
|
||||
|
||||
|
@ -368,7 +447,7 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc
|
|||
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
|
||||
void destroyTableNameList(SInsertStatementParam* pInsertParam);
|
||||
|
||||
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
|
||||
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta, uint64_t id);
|
||||
|
||||
/**
|
||||
* free query result of the sql object
|
||||
|
@ -435,8 +514,398 @@ int16_t getNewResColId(SSqlCmd* pCmd);
|
|||
|
||||
int32_t schemaIdxCompar(const void *lhs, const void *rhs);
|
||||
int32_t boundIdxCompar(const void *lhs, const void *rhs);
|
||||
int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen);
|
||||
int32_t getExtendedRowSize(STableComInfo *tinfo);
|
||||
static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) {
|
||||
ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize);
|
||||
return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) {
|
||||
if (isDataRow(row)) {
|
||||
if (kvLen < (dataLen * KVRatioConvert)) {
|
||||
memRowSetConvert(row);
|
||||
}
|
||||
} else if (kvLen > dataLen) {
|
||||
memRowSetConvert(row);
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) {
|
||||
memRowSetType(row, memRowType);
|
||||
if (isDataRowT(memRowType)) {
|
||||
dataRowSetVersion(memRowDataBody(row), pBlock->pTableMeta->sversion);
|
||||
dataRowSetLen(memRowDataBody(row), (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBlock->boundColumnInfo.flen));
|
||||
} else {
|
||||
ASSERT(nBoundCols > 0);
|
||||
memRowSetKvVersion(row, pBlock->pTableMeta->sversion);
|
||||
kvRowSetNCols(memRowKvBody(row), nBoundCols);
|
||||
kvRowSetLen(memRowKvBody(row), (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* TODO: Move to tdataformat.h and refactor when STSchema available.
|
||||
* - fetch flen and toffset from STSChema and remove param spd
|
||||
*/
|
||||
static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols,
|
||||
SParsedDataColInfo *spd) {
|
||||
ASSERT(isKvRow(src));
|
||||
SKVRow kvRow = memRowKvBody(src);
|
||||
SDataRow dataRow = memRowDataBody(dest);
|
||||
|
||||
memRowSetType(dest, SMEM_ROW_DATA);
|
||||
dataRowSetVersion(dataRow, memRowKvVersion(src));
|
||||
dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + spd->flen));
|
||||
|
||||
int32_t kvIdx = 0;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
SSchema *schema = pSchema + i;
|
||||
void * val = tdGetKVRowValOfColEx(kvRow, schema->colId, &kvIdx);
|
||||
tdAppendDataColVal(dataRow, val != NULL ? val : getNullValue(schema->type), true, schema->type,
|
||||
(spd->cols + i)->toffset);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Move to tdataformat.h and refactor when STSchema available.
|
||||
static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, int nBoundCols,
|
||||
SParsedDataColInfo *spd) {
|
||||
ASSERT(isDataRow(src));
|
||||
|
||||
SDataRow dataRow = memRowDataBody(src);
|
||||
SKVRow kvRow = memRowKvBody(dest);
|
||||
|
||||
memRowSetType(dest, SMEM_ROW_KV);
|
||||
memRowSetKvVersion(kvRow, dataRowVersion(dataRow));
|
||||
kvRowSetNCols(kvRow, nBoundCols);
|
||||
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
|
||||
|
||||
int32_t toffset = 0, kvOffset = 0;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
if ((spd->cols + i)->valStat == VAL_STAT_HAS) {
|
||||
SSchema *schema = pSchema + i;
|
||||
toffset = (spd->cols + i)->toffset;
|
||||
void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE);
|
||||
tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset);
|
||||
kvOffset += sizeof(SColIdx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Move to tdataformat.h and refactor when STSchema available.
|
||||
static FORCE_INLINE void convertSMemRow(SMemRow dest, SMemRow src, STableDataBlocks *pBlock) {
|
||||
STableMeta * pTableMeta = pBlock->pTableMeta;
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||
SSchema * pSchema = tscGetTableSchema(pTableMeta);
|
||||
SParsedDataColInfo *spd = &pBlock->boundColumnInfo;
|
||||
|
||||
ASSERT(dest != src);
|
||||
|
||||
if (isDataRow(src)) {
|
||||
// TODO: Can we use pBlock -> numOfParam directly?
|
||||
ASSERT(spd->numOfBound > 0);
|
||||
convertToSKVRow(dest, src, pSchema, tinfo.numOfColumns, spd->numOfBound, spd);
|
||||
} else {
|
||||
convertToSDataRow(dest, src, pSchema, tinfo.numOfColumns, spd);
|
||||
}
|
||||
}
|
||||
|
||||
static bool isNullStr(SStrToken *pToken) {
|
||||
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
|
||||
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
|
||||
errno = 0;
|
||||
*value = strtold(pToken->z, endPtr);
|
||||
|
||||
// not a valid integer number, return error
|
||||
if ((*endPtr - pToken->z) != pToken->n) {
|
||||
return TK_ILLEGAL;
|
||||
}
|
||||
|
||||
return pToken->type;
|
||||
}
|
||||
|
||||
static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
|
||||
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
|
||||
|
||||
static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str,
|
||||
bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId,
|
||||
int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) {
|
||||
int64_t iv;
|
||||
int32_t ret;
|
||||
char * endptr = NULL;
|
||||
|
||||
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
|
||||
return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
|
||||
}
|
||||
|
||||
switch (pSchema->type) {
|
||||
case TSDB_DATA_TYPE_BOOL: { // bool
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
|
||||
if (strncmp(pToken->z, "true", pToken->n) == 0) {
|
||||
tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
|
||||
tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
|
||||
}
|
||||
} else if (pToken->type == TK_INTEGER) {
|
||||
iv = strtoll(pToken->z, NULL, 10);
|
||||
tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset,
|
||||
dataLen, kvLen, compareStat);
|
||||
} else if (pToken->type == TK_FLOAT) {
|
||||
double dv = strtod(pToken->z, NULL);
|
||||
tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset,
|
||||
dataLen, kvLen, compareStat);
|
||||
} else {
|
||||
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
|
||||
} else if (!IS_VALID_TINYINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint8_t tmpVal = (uint8_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
|
||||
} else if (!IS_VALID_UTINYINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint8_t tmpVal = (uint8_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
|
||||
} else if (!IS_VALID_SMALLINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
int16_t tmpVal = (int16_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
|
||||
} else if (!IS_VALID_USMALLINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint16_t tmpVal = (uint16_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
|
||||
} else if (!IS_VALID_INT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
|
||||
}
|
||||
|
||||
int32_t tmpVal = (int32_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
|
||||
} else if (!IS_VALID_UINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint32_t tmpVal = (uint32_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
|
||||
} else if (!IS_VALID_BIGINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
|
||||
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint64_t tmpVal = (uint64_t)iv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
|
||||
isnan(dv)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
float tmpVal = (float)dv;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
if (isNullStr(pToken)) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
|
||||
}
|
||||
|
||||
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
|
||||
}
|
||||
|
||||
tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost
|
||||
if (pToken->type == TK_NULL) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else { // too long values will return invalid sql, not be truncated automatically
|
||||
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
|
||||
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
|
||||
}
|
||||
// STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
|
||||
char *rowEnd = memRowEnd(row);
|
||||
STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n);
|
||||
tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
if (pToken->type == TK_NULL) {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
} else {
|
||||
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
||||
int32_t output = 0;
|
||||
char * rowEnd = memRowEnd(row);
|
||||
if (!taosMbsToUcs4(pToken->z, pToken->n, (char *)varDataVal(rowEnd), pSchema->bytes - VARSTR_HEADER_SIZE,
|
||||
&output)) {
|
||||
char buf[512] = {0};
|
||||
snprintf(buf, tListLen(buf), "%s", strerror(errno));
|
||||
return tscInvalidOperationMsg(msg, buf, pToken->z);
|
||||
}
|
||||
varDataSetLen(rowEnd, output);
|
||||
tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_TIMESTAMP: {
|
||||
if (pToken->type == TK_NULL) {
|
||||
if (primaryKey) {
|
||||
// When building SKVRow primaryKey, we should not skip even with NULL value.
|
||||
int64_t tmpVal = 0;
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
} else {
|
||||
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
|
||||
compareStat);
|
||||
}
|
||||
} else {
|
||||
int64_t tmpVal;
|
||||
if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
|
||||
}
|
||||
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -339,6 +339,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"multi-tableMeta";
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
|
||||
if (code == TSDB_CODE_RPC_FQDN_ERROR) {
|
||||
size_t sz = strlen(tscGetErrorMsgPayload(&sub->cmd));
|
||||
tscAllocPayload(&pSql->cmd, (int)sz + 1);
|
||||
memcpy(tscGetErrorMsgPayload(&pSql->cmd), tscGetErrorMsgPayload(&sub->cmd), sz);
|
||||
}
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
@ -346,7 +351,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
if (pSql->pStream == NULL) {
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
|
||||
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
|
||||
if (pQueryInfo != NULL && TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
|
||||
tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self);
|
||||
|
||||
code = tsParseSql(pSql, false);
|
||||
|
@ -376,7 +381,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
} else {
|
||||
if (pSql->retryReason != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self);
|
||||
tscResetSqlCmd(pCmd, false);
|
||||
pSql->retryReason = TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self);
|
||||
|
|
|
@ -38,43 +38,60 @@ enum {
|
|||
TSDB_USE_CLI_TS = 1,
|
||||
};
|
||||
|
||||
static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
|
||||
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
|
||||
|
||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
|
||||
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
|
||||
char *str, char **end);
|
||||
|
||||
int32_t getExtendedRowSize(STableComInfo *tinfo) {
|
||||
return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns;
|
||||
}
|
||||
int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) {
|
||||
pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta
|
||||
if (pHelper->allNullLen == 0) {
|
||||
for (uint16_t i = 0; i < nCols; ++i) {
|
||||
uint8_t type = pSSchema[i].type;
|
||||
int32_t typeLen = TYPE_BYTES[type];
|
||||
pHelper->allNullLen += typeLen;
|
||||
if (TSDB_DATA_TYPE_BINARY == type) {
|
||||
pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
} else if (TSDB_DATA_TYPE_NCHAR == type) {
|
||||
int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE;
|
||||
pHelper->allNullLen += len;
|
||||
}
|
||||
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
|
||||
int32_t allNullLen) {
|
||||
ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols));
|
||||
if (nRows > 0) {
|
||||
// already init(bind multiple rows by single column)
|
||||
if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
|
||||
errno = 0;
|
||||
*value = strtold(pToken->z, endPtr);
|
||||
|
||||
// not a valid integer number, return error
|
||||
if ((*endPtr - pToken->z) != pToken->n) {
|
||||
return TK_ILLEGAL;
|
||||
|
||||
if (nBoundCols == 0) { // file input
|
||||
pBuilder->memRowType = SMEM_ROW_DATA;
|
||||
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
float boundRatio = ((float)nBoundCols / (float)nCols);
|
||||
|
||||
if (boundRatio < KVRatioKV) {
|
||||
pBuilder->memRowType = SMEM_ROW_KV;
|
||||
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (boundRatio > KVRatioData) {
|
||||
pBuilder->memRowType = SMEM_ROW_DATA;
|
||||
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
pBuilder->compareStat = ROW_COMPARE_NEED;
|
||||
|
||||
if (boundRatio < KVRatioPredict) {
|
||||
pBuilder->memRowType = SMEM_ROW_KV;
|
||||
} else {
|
||||
pBuilder->memRowType = SMEM_ROW_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
return pToken->type;
|
||||
pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
|
||||
pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx);
|
||||
|
||||
if (nRows > 0) {
|
||||
pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo));
|
||||
if (pBuilder->rowInfo == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
for (int i = 0; i < nRows; ++i) {
|
||||
(pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen;
|
||||
(pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
|
||||
|
@ -146,10 +163,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool isNullStr(SStrToken* pToken) {
|
||||
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
|
||||
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
|
||||
}
|
||||
int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
|
||||
int16_t timePrec) {
|
||||
int64_t iv;
|
||||
|
@ -400,342 +413,6 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId,
|
||||
uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) {
|
||||
payloadColSetId(payload, columnId);
|
||||
payloadColSetType(payload, columnType);
|
||||
memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen);
|
||||
return valueLen;
|
||||
}
|
||||
|
||||
static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart,
|
||||
char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec,
|
||||
TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen,
|
||||
TDRowLenT *kvRowColLen) {
|
||||
int64_t iv;
|
||||
int32_t ret;
|
||||
char * endptr = NULL;
|
||||
|
||||
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
|
||||
return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
|
||||
}
|
||||
|
||||
switch (pSchema->type) {
|
||||
case TSDB_DATA_TYPE_BOOL: { // bool
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
} else {
|
||||
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
|
||||
if (strncmp(pToken->z, "true", pToken->n) == 0) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
|
||||
} else if (strncmp(pToken->z, "false", pToken->n) == 0) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
|
||||
} else {
|
||||
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
|
||||
}
|
||||
} else if (pToken->type == TK_INTEGER) {
|
||||
iv = strtoll(pToken->z, NULL, 10);
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
|
||||
} else if (pToken->type == TK_FLOAT) {
|
||||
double dv = strtod(pToken->z, NULL);
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]);
|
||||
} else {
|
||||
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
|
||||
} else if (!IS_VALID_TINYINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint8_t tmpVal = (uint8_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
|
||||
} else if (!IS_VALID_UTINYINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint8_t tmpVal = (uint8_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
|
||||
} else if (!IS_VALID_SMALLINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
int16_t tmpVal = (int16_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend =
|
||||
tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
|
||||
} else if (!IS_VALID_USMALLINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint16_t tmpVal = (uint16_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
|
||||
} else if (!IS_VALID_INT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
|
||||
}
|
||||
|
||||
int32_t tmpVal = (int32_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
|
||||
} else if (!IS_VALID_UINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint32_t tmpVal = (uint32_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
|
||||
} else if (!IS_VALID_BIGINT(iv)) {
|
||||
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
|
||||
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
|
||||
return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
uint64_t tmpVal = (uint64_t)iv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset);
|
||||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
|
||||
isnan(dv)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
float tmpVal = (float)dv;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
|
||||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
|
||||
}
|
||||
|
||||
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
|
||||
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
|
||||
}
|
||||
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost
|
||||
if (pToken->type == TK_NULL) {
|
||||
payloadColSetId(payload, pSchema->colId);
|
||||
payloadColSetType(payload, pSchema->type);
|
||||
memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
} else { // too long values will return invalid sql, not be truncated automatically
|
||||
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
|
||||
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
|
||||
}
|
||||
// STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
|
||||
|
||||
payloadColSetId(payload, pSchema->colId);
|
||||
payloadColSetType(payload, pSchema->type);
|
||||
varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n);
|
||||
memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n);
|
||||
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n);
|
||||
*dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
if (pToken->type == TK_NULL) {
|
||||
payloadColSetId(payload, pSchema->colId);
|
||||
payloadColSetType(payload, pSchema->type);
|
||||
memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
|
||||
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
|
||||
} else {
|
||||
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
||||
int32_t output = 0;
|
||||
payloadColSetId(payload, pSchema->colId);
|
||||
payloadColSetType(payload, pSchema->type);
|
||||
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)),
|
||||
pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
|
||||
char buf[512] = {0};
|
||||
snprintf(buf, tListLen(buf), "%s", strerror(errno));
|
||||
return tscInvalidOperationMsg(msg, buf, pToken->z);
|
||||
}
|
||||
|
||||
varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output);
|
||||
|
||||
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output);
|
||||
*dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t));
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output);
|
||||
}
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_TIMESTAMP: {
|
||||
if (pToken->type == TK_NULL) {
|
||||
if (primaryKey) {
|
||||
// When building SKVRow primaryKey, we should not skip even with NULL value.
|
||||
int64_t tmpVal = 0;
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal,
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
|
||||
} else {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
getNullValue(TSDB_DATA_TYPE_TIMESTAMP),
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
|
||||
}
|
||||
} else {
|
||||
int64_t tmpVal;
|
||||
if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
|
||||
}
|
||||
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId,
|
||||
pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
|
||||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server time/client time should not be mixed up in one sql string
|
||||
* Do not employ sort operation is not involved if server time is used.
|
||||
|
@ -777,31 +454,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
|
|||
int32_t index = 0;
|
||||
SStrToken sToken = {0};
|
||||
|
||||
SMemRowHelper *pHelper = &pDataBlocks->rowHelper;
|
||||
char * payload = pDataBlocks->pData + pDataBlocks->size;
|
||||
char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header
|
||||
|
||||
SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo;
|
||||
SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta);
|
||||
STableMeta * pTableMeta = pDataBlocks->pTableMeta;
|
||||
SSchema * schema = tscGetTableSchema(pTableMeta);
|
||||
SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder;
|
||||
int32_t dataLen = pBuilder->dataRowInitLen;
|
||||
int32_t kvLen = pBuilder->kvRowInitLen;
|
||||
bool isParseBindParam = false;
|
||||
|
||||
TDRowTLenT dataRowLen = pHelper->allNullLen;
|
||||
TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE;
|
||||
TDRowTLenT payloadValOffset = 0;
|
||||
TDRowLenT colValOffset = 0;
|
||||
ASSERT(dataRowLen > 0);
|
||||
|
||||
payloadSetNCols(payload, spd->numOfBound);
|
||||
payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols
|
||||
// payloadSetTLen(payload, payloadValOffset);
|
||||
|
||||
char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple
|
||||
char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey
|
||||
initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound);
|
||||
|
||||
// 1. set the parsed value from sql string
|
||||
for (int i = 0; i < spd->numOfBound; ++i) {
|
||||
// the start position in data block buffer of current value in sql
|
||||
int32_t colIndex = spd->boundedColumns[i];
|
||||
|
||||
char *start = payload + spd->cols[colIndex].offset;
|
||||
char *start = row + spd->cols[colIndex].offset;
|
||||
|
||||
SSchema *pSchema = &schema[colIndex]; // get colId here
|
||||
|
||||
|
@ -810,6 +480,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
|
|||
*str += index;
|
||||
|
||||
if (sToken.type == TK_QUESTION) {
|
||||
if (!isParseBindParam) {
|
||||
isParseBindParam = true;
|
||||
}
|
||||
if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str);
|
||||
}
|
||||
|
@ -860,54 +533,45 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
|
|||
sToken.n -= 2 + cnt;
|
||||
}
|
||||
|
||||
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns.
|
||||
TDRowLenT kvRowColLen = 0;
|
||||
TDRowLenT colValAppended = 0;
|
||||
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
int32_t toffset = -1;
|
||||
int16_t colId = -1;
|
||||
tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId);
|
||||
|
||||
if (!IS_DATA_COL_ORDERED(spd->orderStatus)) {
|
||||
ASSERT(spd->colIdxInfo != NULL);
|
||||
if(!isPrimaryKey) {
|
||||
kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN);
|
||||
} else {
|
||||
ASSERT(spd->colIdxInfo[i].finalIdx == 0);
|
||||
}
|
||||
}
|
||||
// the primary key locates in 1st column
|
||||
int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str,
|
||||
isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended,
|
||||
&dataRowDeltaColLen, &kvRowColLen);
|
||||
int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset,
|
||||
colId, &dataLen, &kvLen, pBuilder->compareStat);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (isPrimaryKey) {
|
||||
if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) {
|
||||
TSKEY tsKey = memRowKey(row);
|
||||
if (tsCheckTimestamp(pDataBlocks, (const char *)&tsKey) != TSDB_CODE_SUCCESS) {
|
||||
tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
|
||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||
}
|
||||
payloadColSetOffset(kvPrimaryKeyStart, colValOffset);
|
||||
} else {
|
||||
payloadColSetOffset(kvStart, colValOffset);
|
||||
if (IS_DATA_COL_ORDERED(spd->orderStatus)) {
|
||||
kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column
|
||||
}
|
||||
}
|
||||
|
||||
if (!isParseBindParam) {
|
||||
// 2. check and set convert flag
|
||||
if (pBuilder->compareStat == ROW_COMPARE_NEED) {
|
||||
checkAndConvertMemRow(row, dataLen, kvLen);
|
||||
}
|
||||
|
||||
// 3. set the null value for the columns that do not assign values
|
||||
if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) {
|
||||
SDataRow dataRow = memRowDataBody(row);
|
||||
for (int32_t i = 0; i < spd->numOfCols; ++i) {
|
||||
if (spd->cols[i].valStat == VAL_STAT_NONE) {
|
||||
tdAppendDataColVal(dataRow, getNullValue(schema[i].type), true, schema[i].type, spd->cols[i].toffset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
colValOffset += colValAppended;
|
||||
kvRowLen += kvRowColLen;
|
||||
dataRowLen += dataRowDeltaColLen;
|
||||
}
|
||||
|
||||
if (kvRowLen < dataRowLen) {
|
||||
payloadSetType(payload, SMEM_ROW_KV);
|
||||
} else {
|
||||
payloadSetType(payload, SMEM_ROW_DATA);
|
||||
}
|
||||
*len = getExtendedRowSize(pDataBlocks);
|
||||
|
||||
*len = (int32_t)(payloadValOffset + colValOffset);
|
||||
payloadSetTLen(payload, *len);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -957,11 +621,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
|
|||
|
||||
int32_t precision = tinfo.precision;
|
||||
|
||||
int32_t extendedRowSize = getExtendedRowSize(&tinfo);
|
||||
|
||||
initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta),
|
||||
tscGetNumOfColumns(pDataBlock->pTableMeta), 0);
|
||||
int32_t extendedRowSize = getExtendedRowSize(pDataBlock);
|
||||
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
(code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound,
|
||||
pDataBlock->boundColumnInfo.allNullLen))) {
|
||||
return code;
|
||||
}
|
||||
while (1) {
|
||||
index = 0;
|
||||
sToken = tStrGetToken(*str, &index, false);
|
||||
|
@ -991,9 +657,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
|
|||
index = 0;
|
||||
sToken = tStrGetToken(*str, &index, false);
|
||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||
tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
|
||||
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
return code;
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
|
||||
}
|
||||
|
||||
*str += index;
|
||||
|
@ -1012,19 +676,37 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
|
|||
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) {
|
||||
pColInfo->numOfCols = numOfCols;
|
||||
pColInfo->numOfBound = numOfCols;
|
||||
pColInfo->orderStatus = ORDER_STATUS_ORDERED;
|
||||
pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode
|
||||
pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t));
|
||||
pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn));
|
||||
pColInfo->colIdxInfo = NULL;
|
||||
pColInfo->flen = 0;
|
||||
pColInfo->allNullLen = 0;
|
||||
|
||||
int32_t nVar = 0;
|
||||
for (int32_t i = 0; i < pColInfo->numOfCols; ++i) {
|
||||
uint8_t type = pSchema[i].type;
|
||||
if (i > 0) {
|
||||
pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset;
|
||||
pColInfo->cols[i].toffset = pColInfo->flen;
|
||||
}
|
||||
pColInfo->flen += TYPE_BYTES[type];
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
pColInfo->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
++nVar;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
pColInfo->allNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
|
||||
++nVar;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pColInfo->cols[i].hasVal = true;
|
||||
pColInfo->boundedColumns[i] = i;
|
||||
}
|
||||
pColInfo->allNullLen += pColInfo->flen;
|
||||
pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT));
|
||||
}
|
||||
|
||||
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) {
|
||||
|
@ -1124,35 +806,29 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
|
|||
if (dataBuf->tsSource == TSDB_USE_SERVER_TS) {
|
||||
assert(dataBuf->ordered);
|
||||
}
|
||||
// allocate memory
|
||||
// allocate memory
|
||||
size_t nAlloc = nRows * sizeof(SBlockKeyTuple);
|
||||
if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) {
|
||||
size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple);
|
||||
char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc);
|
||||
if (tmp == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp;
|
||||
pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc;
|
||||
}
|
||||
memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc);
|
||||
|
||||
int32_t extendedRowSize = getExtendedRowSize(dataBuf);
|
||||
SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
|
||||
char * pBlockData = pBlocks->data;
|
||||
TDRowTLenT totolPayloadTLen = 0;
|
||||
TDRowTLenT payloadTLen = 0;
|
||||
int n = 0;
|
||||
while (n < nRows) {
|
||||
pBlkKeyTuple->skey = payloadTSKey(pBlockData);
|
||||
pBlkKeyTuple->skey = memRowKey(pBlockData);
|
||||
pBlkKeyTuple->payloadAddr = pBlockData;
|
||||
payloadTLen = payloadTLen(pBlockData);
|
||||
#if 0
|
||||
ASSERT(payloadNCols(pBlockData) <= 4096);
|
||||
ASSERT(payloadTLen(pBlockData) < 65536);
|
||||
#endif
|
||||
totolPayloadTLen += payloadTLen;
|
||||
|
||||
// next loop
|
||||
pBlockData += payloadTLen;
|
||||
pBlockData += extendedRowSize;
|
||||
++pBlkKeyTuple;
|
||||
++n;
|
||||
}
|
||||
|
@ -1169,7 +845,6 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
|
|||
TSKEY tj = (pBlkKeyTuple + j)->skey;
|
||||
|
||||
if (ti == tj) {
|
||||
totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j);
|
||||
++j;
|
||||
continue;
|
||||
}
|
||||
|
@ -1185,17 +860,15 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
|
|||
pBlocks->numOfRows = i + 1;
|
||||
}
|
||||
|
||||
dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen;
|
||||
dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize;
|
||||
dataBuf->prevTS = INT64_MIN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
||||
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
|
||||
|
||||
static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
||||
int32_t maxNumOfRows;
|
||||
int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows);
|
||||
int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(dataBuf), &maxNumOfRows);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -1533,7 +1206,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
|
|||
pColInfo->numOfBound = 0;
|
||||
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols);
|
||||
for (int32_t i = 0; i < nCols; ++i) {
|
||||
pColInfo->cols[i].hasVal = false;
|
||||
pColInfo->cols[i].valStat = VAL_STAT_NONE;
|
||||
}
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -1572,12 +1245,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
|
|||
int32_t nScanned = 0, t = lastColIdx + 1;
|
||||
while (t < nCols) {
|
||||
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
|
||||
if (pColInfo->cols[t].hasVal == true) {
|
||||
if (pColInfo->cols[t].valStat == VAL_STAT_HAS) {
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
pColInfo->cols[t].hasVal = true;
|
||||
pColInfo->cols[t].valStat = VAL_STAT_HAS;
|
||||
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
|
||||
++pColInfo->numOfBound;
|
||||
findColumnIndex = true;
|
||||
|
@ -1595,12 +1268,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
|
|||
int32_t nRemain = nCols - nScanned;
|
||||
while (t < nRemain) {
|
||||
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
|
||||
if (pColInfo->cols[t].hasVal == true) {
|
||||
if (pColInfo->cols[t].valStat == VAL_STAT_HAS) {
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
pColInfo->cols[t].hasVal = true;
|
||||
pColInfo->cols[t].valStat = VAL_STAT_HAS;
|
||||
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
|
||||
++pColInfo->numOfBound;
|
||||
findColumnIndex = true;
|
||||
|
@ -1835,7 +1508,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
goto _clean;
|
||||
}
|
||||
|
||||
if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
|
||||
if (dataBuf->boundColumnInfo.cols[0].valStat == VAL_STAT_NONE) {
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL);
|
||||
goto _clean;
|
||||
}
|
||||
|
@ -1922,7 +1595,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) {
|
||||
tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
|
||||
|
||||
tscResetSqlCmd(pCmd, true);
|
||||
tscResetSqlCmd(pCmd, true, pSql->self);
|
||||
pSql->parseRetry++;
|
||||
|
||||
if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) {
|
||||
|
@ -1939,7 +1612,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
if (ret == TSDB_CODE_TSC_INVALID_OPERATION && pSql->parseRetry < 1 && sqlInfo.type == TSDB_SQL_SELECT) {
|
||||
tscDebug("0x%"PRIx64 " parse query sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
|
||||
|
||||
tscResetSqlCmd(pCmd, true);
|
||||
tscResetSqlCmd(pCmd, true, pSql->self);
|
||||
pSql->parseRetry++;
|
||||
|
||||
ret = tscValidateSqlInfo(pSql, &sqlInfo);
|
||||
|
@ -2046,15 +1719,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
goto _error;
|
||||
}
|
||||
|
||||
tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows);
|
||||
tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows);
|
||||
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
|
||||
if (tokenBuf == NULL) {
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta),
|
||||
tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0);
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
(ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams,
|
||||
pTableDataBlock->boundColumnInfo.allNullLen))) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
while ((readLen = tgetline(&line, &n, fp)) != -1) {
|
||||
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
|
||||
|
|
|
@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
|
|||
SSchema *schema = (SSchema*)pBlock->pTableMeta->schema;
|
||||
|
||||
for (int32_t i = 0; i < spd->numOfCols; ++i) {
|
||||
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
|
||||
if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null
|
||||
for (int32_t n = 0; n < rowNum; ++n) {
|
||||
char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset;
|
||||
|
||||
|
@ -1694,7 +1694,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
|
||||
SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
|
||||
pCmd->insertParam.pTableBlockHashList = NULL;
|
||||
tscResetSqlCmd(pCmd, false);
|
||||
tscResetSqlCmd(pCmd, false, pSql->self);
|
||||
pCmd->insertParam.pTableBlockHashList = hashList;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
#include "tsclient.h"
|
||||
#include "tsocket.h"
|
||||
#include "ttimer.h"
|
||||
#include "tutil.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tcq.h"
|
||||
|
||||
#include "taos.h"
|
||||
#include "tscUtil.h"
|
||||
|
||||
void tscSaveSlowQueryFp(void *handle, void *tmrId);
|
||||
TAOS *tscSlowQueryConn = NULL;
|
||||
|
@ -227,16 +227,16 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
|
|||
|
||||
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
||||
SHeartBeatMsg *pHeartbeat = pMsg;
|
||||
|
||||
int allocedQueriesNum = pHeartbeat->numOfQueries;
|
||||
int allocedStreamsNum = pHeartbeat->numOfStreams;
|
||||
|
||||
pHeartbeat->numOfQueries = 0;
|
||||
SQueryDesc *pQdesc = (SQueryDesc *)pHeartbeat->pData;
|
||||
|
||||
// We extract the lock to tscBuildHeartBeatMsg function.
|
||||
|
||||
int64_t now = taosGetTimestampMs();
|
||||
SSqlObj *pSql = pObj->sqlList;
|
||||
|
||||
while (pSql) {
|
||||
/*
|
||||
* avoid sqlobj may not be correctly removed from sql list
|
||||
|
@ -248,26 +248,55 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
|||
}
|
||||
|
||||
tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql));
|
||||
pQdesc->stime = htobe64(pSql->stime);
|
||||
pQdesc->queryId = htonl(pSql->queryId);
|
||||
//pQdesc->useconds = htobe64(pSql->res.useconds);
|
||||
pQdesc->stime = htobe64(pSql->stime);
|
||||
pQdesc->queryId = htonl(pSql->queryId);
|
||||
pQdesc->useconds = htobe64(now - pSql->stime);
|
||||
pQdesc->qId = htobe64(pSql->res.qId);
|
||||
pQdesc->qId = htobe64(pSql->res.qId);
|
||||
pQdesc->sqlObjId = htobe64(pSql->self);
|
||||
pQdesc->pid = pHeartbeat->pid;
|
||||
if (pSql->cmd.pQueryInfo->stableQuery == true) {
|
||||
pQdesc->numOfSub = pSql->subState.numOfSub;
|
||||
} else {
|
||||
pQdesc->numOfSub = 1;
|
||||
}
|
||||
pQdesc->numOfSub = htonl(pQdesc->numOfSub);
|
||||
pQdesc->pid = pHeartbeat->pid;
|
||||
pQdesc->numOfSub = pSql->subState.numOfSub;
|
||||
|
||||
// todo race condition
|
||||
pQdesc->stableQuery = 0;
|
||||
|
||||
char *p = pQdesc->subSqlInfo;
|
||||
int32_t remainLen = sizeof(pQdesc->subSqlInfo);
|
||||
if (pQdesc->numOfSub == 0) {
|
||||
snprintf(p, remainLen, "N/A");
|
||||
} else {
|
||||
// SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||
// if (pQueryInfo != NULL) {
|
||||
// pQdesc->stableQuery = (pQueryInfo->stableQuery)?1:0;
|
||||
// } else {
|
||||
// pQdesc->stableQuery = 0;
|
||||
// }
|
||||
|
||||
if (pSql->pSubs != NULL && pSql->subState.states != NULL) {
|
||||
for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
|
||||
SSqlObj *psub = pSql->pSubs[i];
|
||||
int64_t self = (psub != NULL)? psub->self : 0;
|
||||
|
||||
int32_t len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i, self, pSql->subState.states[i] ? 'C' : 'I');
|
||||
if (len > remainLen) {
|
||||
break;
|
||||
}
|
||||
|
||||
remainLen -= len;
|
||||
p += len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pQdesc->numOfSub = htonl(pQdesc->numOfSub);
|
||||
taosGetFqdn(pQdesc->fqdn);
|
||||
|
||||
pHeartbeat->numOfQueries++;
|
||||
pQdesc++;
|
||||
|
||||
pSql = pSql->next;
|
||||
if (pHeartbeat->numOfQueries >= allocedQueriesNum) break;
|
||||
if (pHeartbeat->numOfQueries >= allocedQueriesNum) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pHeartbeat->numOfStreams = 0;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -501,6 +501,15 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
pRes->code = rpcMsg->code;
|
||||
}
|
||||
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
|
||||
if (pRes->code == TSDB_CODE_RPC_FQDN_ERROR) {
|
||||
if (pEpSet) {
|
||||
char buf[TSDB_FQDN_LEN + 64] = {0};
|
||||
tscAllocPayload(pCmd, sizeof(buf));
|
||||
sprintf(tscGetErrorMsgPayload(pCmd), "%s\"%s\"", tstrerror(pRes->code),pEpSet->fqdn[(pEpSet->inUse)%(pEpSet->numOfEps)]);
|
||||
} else {
|
||||
sprintf(tscGetErrorMsgPayload(pCmd), "%s", tstrerror(pRes->code));
|
||||
}
|
||||
}
|
||||
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
|
||||
}
|
||||
|
||||
|
@ -516,6 +525,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
SSchedMsg schedMsg = {0};
|
||||
|
||||
schedMsg.fp = doProcessMsgFromServer;
|
||||
|
@ -534,6 +544,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
schedMsg.msg = NULL;
|
||||
|
||||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
|
||||
int64_t et = taosGetTimestampUs();
|
||||
if (et - st > 100) {
|
||||
tscDebug("add message to task queue, elapsed time:%"PRId64, et - st);
|
||||
}
|
||||
}
|
||||
|
||||
int doBuildAndSendMsg(SSqlObj *pSql) {
|
||||
|
@ -675,7 +690,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
|
||||
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
|
||||
int32_t srcColFilterSize = tscGetColFilterSerializeLen(pQueryInfo);
|
||||
int32_t srcColFilterSize = 0;
|
||||
int32_t srcTagFilterSize = tscGetTagFilterSerializeLen(pQueryInfo);
|
||||
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
|
@ -686,6 +701,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
|
|||
|
||||
int32_t tableSerialize = 0;
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
if (pTableMetaInfo->pVgroupTables != NULL) {
|
||||
size_t numOfGroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
||||
|
||||
|
@ -698,8 +714,15 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
|
|||
tableSerialize = totalTables * sizeof(STableIdInfo);
|
||||
}
|
||||
|
||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize +
|
||||
exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
|
||||
if (pQueryInfo->colCond && taosArrayGetSize(pQueryInfo->colCond) > 0) {
|
||||
STblCond *pCond = tsGetTableFilter(pQueryInfo->colCond, pTableMeta->id.uid, 0);
|
||||
if (pCond != NULL && pCond->cond != NULL) {
|
||||
srcColFilterSize = pCond->len;
|
||||
}
|
||||
}
|
||||
|
||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + exprSize + tsBufSize +
|
||||
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
|
||||
}
|
||||
|
||||
static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, char *pMsg,
|
||||
|
@ -880,16 +903,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
}
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
SQueryAttr query = {{0}};
|
||||
tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql);
|
||||
query.vgId = pTableMeta->vgId;
|
||||
|
||||
SArray* tableScanOperator = createTableScanPlan(&query);
|
||||
SArray* queryOperator = createExecOperatorPlan(&query);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
|
||||
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));
|
||||
|
||||
|
@ -957,10 +980,21 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pQueryMsg->tableCols[i].colId = htons(pCol->colId);
|
||||
pQueryMsg->tableCols[i].bytes = htons(pCol->bytes);
|
||||
pQueryMsg->tableCols[i].type = htons(pCol->type);
|
||||
pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters);
|
||||
//pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters);
|
||||
pQueryMsg->tableCols[i].flist.numOfFilters = 0;
|
||||
|
||||
// append the filter information after the basic column information
|
||||
serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg);
|
||||
//serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg);
|
||||
}
|
||||
|
||||
if (pQueryInfo->colCond && taosArrayGetSize(pQueryInfo->colCond) > 0 && !onlyQueryTags(&query) ) {
|
||||
STblCond *pCond = tsGetTableFilter(pQueryInfo->colCond, pTableMeta->id.uid, 0);
|
||||
if (pCond != NULL && pCond->cond != NULL) {
|
||||
pQueryMsg->colCondLen = htons(pCond->len);
|
||||
memcpy(pMsg, pCond->cond, pCond->len);
|
||||
|
||||
pMsg += pCond->len;
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < query.numOfOutput; ++i) {
|
||||
|
@ -1035,7 +1069,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
|
||||
if (pCond != NULL && pCond->cond != NULL) {
|
||||
pQueryMsg->tagCondLen = htonl(pCond->len);
|
||||
pQueryMsg->tagCondLen = htons(pCond->len);
|
||||
memcpy(pMsg, pCond->cond, pCond->len);
|
||||
|
||||
pMsg += pCond->len;
|
||||
|
@ -2241,6 +2275,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
|
|||
pMsg = buf;
|
||||
}
|
||||
|
||||
if (pParentCmd->pTableMetaMap == NULL) {
|
||||
pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) {
|
||||
STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg;
|
||||
int32_t code = tableMetaMsgConvert(pMetaMsg);
|
||||
|
@ -2577,7 +2615,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) {
|
|||
|
||||
int tscProcessDropTableRsp(SSqlObj *pSql) {
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
|
||||
tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
|
||||
tscRemoveCachedTableMeta(pTableMetaInfo, pSql->self);
|
||||
tfree(pTableMetaInfo->pTableMeta);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2844,18 +2882,19 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
|
|||
tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||
|
||||
size_t len = strlen(name);
|
||||
if (pTableMetaInfo->tableMetaCapacity != 0) {
|
||||
if (pTableMetaInfo->pTableMeta != NULL) {
|
||||
memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
|
||||
}
|
||||
// just make runtime happy
|
||||
if (pTableMetaInfo->tableMetaCapacity != 0 && pTableMetaInfo->pTableMeta != NULL) {
|
||||
memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
|
||||
}
|
||||
taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity);
|
||||
|
||||
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
|
||||
STableMeta* pSTMeta = (STableMeta *)(pSql->pBuf);
|
||||
if (pMeta && pMeta->id.uid > 0) {
|
||||
// in case of child table, here only get the
|
||||
if (pMeta->tableType == TSDB_CHILD_TABLE) {
|
||||
int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity);
|
||||
int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity, (STableMeta **)(&pSTMeta));
|
||||
pSql->pBuf = (void *)(pSTMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
|
||||
}
|
||||
|
@ -2961,13 +3000,11 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
|
|||
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
|
||||
}
|
||||
|
||||
|
||||
// remove stored tableMeta info in hash table
|
||||
tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
|
||||
tscResetSqlCmd(pCmd, true, pSql->self);
|
||||
|
||||
pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
|
||||
pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
|
||||
SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
|
||||
SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
|
||||
SArray* vgroupList = taosArrayInit(1, POINTER_BYTES);
|
||||
|
||||
char* n = strdup(name);
|
||||
|
|
|
@ -196,6 +196,11 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
|
|||
|
||||
if (pSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
terrno = pSql->res.code;
|
||||
if (terrno ==TSDB_CODE_RPC_FQDN_ERROR) {
|
||||
printf("taos connect failed, reason: %s\n\n", taos_errstr(pSql));
|
||||
} else {
|
||||
printf("taos connect failed, reason: %s.\n\n", tstrerror(terrno));
|
||||
}
|
||||
taos_free_result(pSql);
|
||||
taos_close(pObj);
|
||||
return NULL;
|
||||
|
@ -643,7 +648,7 @@ char *taos_errstr(TAOS_RES *tres) {
|
|||
return (char*) tstrerror(terrno);
|
||||
}
|
||||
|
||||
if (hasAdditionalErrorInfo(pSql->res.code, &pSql->cmd)) {
|
||||
if (hasAdditionalErrorInfo(pSql->res.code, &pSql->cmd) || pSql->res.code == TSDB_CODE_RPC_FQDN_ERROR) {
|
||||
return pSql->cmd.payload;
|
||||
} else {
|
||||
return (char*)tstrerror(pSql->res.code);
|
||||
|
|
|
@ -113,7 +113,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
|
|||
|
||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||
|
||||
pSql->fp = tscProcessStreamQueryCallback;
|
||||
pSql->fp = tscProcessStreamQueryCallback;
|
||||
pSql->fetchFp = tscProcessStreamQueryCallback;
|
||||
executeQuery(pSql, pQueryInfo);
|
||||
tscIncStreamExecutionCount(pStream);
|
||||
|
@ -142,6 +142,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
if(pSql == NULL) {
|
||||
return ;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||
tscDebug("0x%"PRIx64" add into timer", pSql->self);
|
||||
|
||||
|
@ -186,14 +187,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
}
|
||||
|
||||
// launch stream computing in a new thread
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
schedMsg.fp = tscProcessStreamLaunchQuery;
|
||||
SSchedMsg schedMsg = {0};
|
||||
schedMsg.fp = tscProcessStreamLaunchQuery;
|
||||
schedMsg.ahandle = pStream;
|
||||
schedMsg.thandle = (void *)1;
|
||||
schedMsg.msg = NULL;
|
||||
schedMsg.msg = NULL;
|
||||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
}
|
||||
|
||||
static void cbParseSql(void* param, TAOS_RES* res, int code);
|
||||
|
||||
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
|
||||
SSqlStream *pStream = (SSqlStream *)param;
|
||||
if (tres == NULL || numOfRows < 0) {
|
||||
|
@ -201,24 +204,26 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
|||
tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
|
||||
pStream, numOfRows, retryDelay);
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0);
|
||||
SSqlObj* pSql = pStream->pSql;
|
||||
|
||||
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||
tscFreeSqlResult(pSql);
|
||||
tscFreeSubobj(pSql);
|
||||
tfree(pSql->pSubs);
|
||||
pSql->subState.numOfSub = 0;
|
||||
|
||||
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||
int32_t code = tsParseSql(pSql, true);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
cbParseSql(pStream, pSql, code);
|
||||
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
tscDebug("0x%"PRIx64" CQ taso_open_stream IN Process", pSql->self);
|
||||
} else {
|
||||
tscError("0x%"PRIx64" open stream failed, code:%s", pSql->self, tstrerror(code));
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
free(pStream);
|
||||
}
|
||||
|
||||
tfree(pTableMetaInfo->pTableMeta);
|
||||
|
||||
tscFreeSqlResult(pStream->pSql);
|
||||
tscFreeSubobj(pStream->pSql);
|
||||
tfree(pStream->pSql->pSubs);
|
||||
pStream->pSql->subState.numOfSub = 0;
|
||||
|
||||
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
||||
return;
|
||||
// tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
||||
// return;
|
||||
}
|
||||
|
||||
taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param);
|
||||
|
@ -555,7 +560,6 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pSql->res.code = code;
|
||||
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
|
||||
|
||||
pStream->fp(pStream->param, NULL, NULL);
|
||||
return;
|
||||
}
|
||||
|
@ -582,9 +586,10 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
|
||||
// set stime with ltime if ltime > stime
|
||||
const char* dstTable = pStream->dstTable? pStream->dstTable: "";
|
||||
tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime);
|
||||
tscDebug("0x%"PRIx64" CQ table %s ltime is %"PRId64, pSql->self, dstTable, pStream->ltime);
|
||||
|
||||
if(pStream->ltime != INT64_MIN && pStream->ltime > pStream->stime) {
|
||||
tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime);
|
||||
tscWarn("0x%"PRIx64" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime > 0", pSql->self, dstTable, pStream->stime, pStream->ltime);
|
||||
pStream->stime = pStream->ltime;
|
||||
}
|
||||
|
||||
|
@ -592,7 +597,6 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
pCmd->command = TSDB_SQL_SELECT;
|
||||
|
||||
tscAddIntoStreamList(pStream);
|
||||
|
||||
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
|
||||
|
||||
tscDebug("0x%"PRIx64" stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql->self,
|
||||
|
@ -659,10 +663,9 @@ void cbParseSql(void* param, TAOS_RES* res, int code) {
|
|||
char sql[128] = "";
|
||||
sprintf(sql, "select last_row(*) from %s;", pStream->dstTable);
|
||||
taos_query_a(pSql->pTscObj, sql, fpStreamLastRow, param);
|
||||
return ;
|
||||
}
|
||||
|
||||
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
|
||||
int64_t stime, void *param, void (*callback)(void *), void* cqhandle) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
||||
|
@ -697,14 +700,12 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
|||
pStream->param = param;
|
||||
pStream->pSql = pSql;
|
||||
pStream->cqhandle = cqhandle;
|
||||
pSql->pStream = pStream;
|
||||
pSql->param = pStream;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||
tscSetStreamDestTable(pStream, dstTable);
|
||||
|
||||
pSql->pStream = pStream;
|
||||
pSql->param = pStream;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||
|
||||
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||
|
@ -725,14 +726,13 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
|||
|
||||
pSql->fp = cbParseSql;
|
||||
pSql->fetchFp = cbParseSql;
|
||||
|
||||
registerSqlObj(pSql);
|
||||
|
||||
int32_t code = tsParseSql(pSql, true);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
cbParseSql(pStream, pSql, code);
|
||||
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
tscDebug(" CQ taso_open_stream IN Process. sql=%s", sqlstr);
|
||||
tscDebug("0x%"PRIx64" CQ taso_open_stream IN Process", pSql->self);
|
||||
} else {
|
||||
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
|
@ -743,7 +743,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
|||
return pStream;
|
||||
}
|
||||
|
||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
|
||||
int64_t stime, void *param, void (*callback)(void *)) {
|
||||
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback, NULL);
|
||||
}
|
||||
|
|
|
@ -796,6 +796,7 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
|
|||
STimeWindow window = pQueryInfo->window;
|
||||
tscInitQueryInfo(pQueryInfo);
|
||||
|
||||
pQueryInfo->colCond = pSupporter->colCond;
|
||||
pQueryInfo->window = window;
|
||||
TSDB_QUERY_CLEAR_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
|
||||
|
@ -1883,6 +1884,9 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
|||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // return the tableId & tag
|
||||
SColumnIndex colIndex = {0};
|
||||
|
||||
pSupporter->colCond = pNewQueryInfo->colCond;
|
||||
pNewQueryInfo->colCond = NULL;
|
||||
|
||||
STagCond* pTagCond = &pSupporter->tagCond;
|
||||
assert(pTagCond->joinInfo.hasJoin);
|
||||
|
||||
|
@ -2319,6 +2323,11 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
|
|||
goto _error;
|
||||
}
|
||||
|
||||
if (tscColCondCopy(&pNewQueryInfo->colCond, pQueryInfo->colCond, pTableMetaInfo->pTableMeta->id.uid, 0) != 0) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pNewQueryInfo->window = pQueryInfo->window;
|
||||
pNewQueryInfo->interval = pQueryInfo->interval;
|
||||
pNewQueryInfo->sessionWindow = pQueryInfo->sessionWindow;
|
||||
|
@ -2395,8 +2404,8 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
|
|||
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
|
||||
tscColumnCopy(x, pCol);
|
||||
} else {
|
||||
SColumn *p = tscColumnClone(pCol);
|
||||
taosArrayPush(pNewQueryInfo->colList, &p);
|
||||
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
|
||||
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2718,16 +2727,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
int32_t code = pParentSql->res.code;
|
||||
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
|
||||
// remove the cached tableMeta and vgroup id list, and then parse the sql again
|
||||
SSqlCmd* pParentCmd = &pParentSql->cmd;
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0);
|
||||
tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
|
||||
tscResetSqlCmd( &pParentSql->cmd, true, pParentSql->self);
|
||||
|
||||
pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap);
|
||||
pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_SUCCESS;
|
||||
pParentSql->retry++;
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_SUCCESS;
|
||||
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
|
||||
tstrerror(code), pParentSql->retry);
|
||||
|
||||
|
@ -3031,7 +3034,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
assert(code == taos_errno(pSql));
|
||||
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) {
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID && code != TSDB_CODE_VND_INVALID_VGROUP_ID)) {
|
||||
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
|
||||
|
||||
int32_t sent = 0;
|
||||
|
@ -3142,7 +3145,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
|||
numOfFailed += 1;
|
||||
|
||||
// clean up tableMeta in cache
|
||||
tscFreeQueryInfo(&pSql->cmd, false);
|
||||
tscFreeQueryInfo(&pSql->cmd, false, pSql->self);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoS(&pSql->cmd);
|
||||
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, 0);
|
||||
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
|
||||
|
@ -3164,7 +3167,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
|||
}
|
||||
|
||||
pParentObj->res.code = TSDB_CODE_SUCCESS;
|
||||
tscResetSqlCmd(&pParentObj->cmd, false);
|
||||
tscResetSqlCmd(&pParentObj->cmd, false, pParentObj->self);
|
||||
|
||||
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
|
||||
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
|
||||
|
|
|
@ -62,11 +62,11 @@ int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *le
|
|||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
n = sprintf(str, "%f", GET_FLOAT_VAL(buf));
|
||||
n = sprintf(str, "%e", GET_FLOAT_VAL(buf));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
n = sprintf(str, "%f", GET_DOUBLE_VAL(buf));
|
||||
n = sprintf(str, "%e", GET_DOUBLE_VAL(buf));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -82,6 +82,22 @@ int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *le
|
|||
n = bufSize + 2;
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
n = sprintf(str, "%d", *(uint8_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
n = sprintf(str, "%d", *(uint16_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
n = sprintf(str, "%u", *(uint32_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
n = sprintf(str, "%" PRIu64, *(uint64_t*)buf);
|
||||
break;
|
||||
|
||||
default:
|
||||
tscError("unsupported type:%d", type);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
|
@ -118,6 +134,24 @@ SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx) {
|
||||
if (filters == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t size = taosArrayGetSize(filters);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
STblCond* cond = taosArrayGet(filters, i);
|
||||
|
||||
if (uid == cond->uid && (idx >= 0 && cond->idx == idx)) {
|
||||
return cond;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) {
|
||||
if (tbufTell(bw) == 0) {
|
||||
return;
|
||||
|
@ -753,8 +787,7 @@ typedef struct SDummyInputInfo {
|
|||
SSDataBlock *block;
|
||||
STableQueryInfo *pTableQueryInfo;
|
||||
SSqlObj *pSql; // refactor: remove it
|
||||
int32_t numOfFilterCols;
|
||||
SSingleColumnFilterInfo *pFilterInfo;
|
||||
SFilterInfo *pFilterInfo;
|
||||
} SDummyInputInfo;
|
||||
|
||||
typedef struct SJoinStatus {
|
||||
|
@ -770,38 +803,7 @@ typedef struct SJoinOperatorInfo {
|
|||
SRspResultInfo resultInfo; // todo refactor, add this info for each operator
|
||||
} SJoinOperatorInfo;
|
||||
|
||||
static void converNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t rows, bool *gotNchar) {
|
||||
for (int32_t i = 0; i < numOfFilterCols; ++i) {
|
||||
if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
pFilterInfo[i].pData2 = pFilterInfo[i].pData;
|
||||
pFilterInfo[i].pData = malloc(rows * pFilterInfo[i].info.bytes);
|
||||
int32_t bufSize = pFilterInfo[i].info.bytes - VARSTR_HEADER_SIZE;
|
||||
for (int32_t j = 0; j < rows; ++j) {
|
||||
char* dst = (char *)pFilterInfo[i].pData + j * pFilterInfo[i].info.bytes;
|
||||
char* src = (char *)pFilterInfo[i].pData2 + j * pFilterInfo[i].info.bytes;
|
||||
int32_t len = 0;
|
||||
taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
|
||||
varDataLen(dst) = len;
|
||||
}
|
||||
*gotNchar = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void freeNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
|
||||
for (int32_t i = 0; i < numOfFilterCols; ++i) {
|
||||
if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (pFilterInfo[i].pData2) {
|
||||
tfree(pFilterInfo[i].pData);
|
||||
pFilterInfo[i].pData = pFilterInfo[i].pData2;
|
||||
pFilterInfo[i].pData2 = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
|
||||
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* pFilterInfo) {
|
||||
int32_t offset = 0;
|
||||
char* pData = pRes->data;
|
||||
|
||||
|
@ -817,14 +819,16 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnF
|
|||
}
|
||||
|
||||
// filter data if needed
|
||||
if (numOfFilterCols > 0) {
|
||||
doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
|
||||
if (pFilterInfo) {
|
||||
//doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
|
||||
doSetFilterColInfo(pFilterInfo, pBlock);
|
||||
bool gotNchar = false;
|
||||
converNcharFilterColumn(pFilterInfo, numOfFilterCols, pBlock->info.rows, &gotNchar);
|
||||
filterConverNcharColumns(pFilterInfo, pBlock->info.rows, &gotNchar);
|
||||
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
|
||||
bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
|
||||
//bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
|
||||
bool all = filterExecute(pFilterInfo, pBlock->info.rows, p);
|
||||
if (gotNchar) {
|
||||
freeNcharFilterColumn(pFilterInfo, numOfFilterCols);
|
||||
filterFreeNcharColumns(pFilterInfo);
|
||||
}
|
||||
if (!all) {
|
||||
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
|
||||
|
@ -862,7 +866,7 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
|
|||
|
||||
pBlock->info.rows = pRes->numOfRows;
|
||||
if (pRes->numOfRows != 0) {
|
||||
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
|
||||
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
|
||||
*newgroup = false;
|
||||
return pBlock;
|
||||
}
|
||||
|
@ -877,7 +881,7 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
|
|||
}
|
||||
|
||||
pBlock->info.rows = pRes->numOfRows;
|
||||
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
|
||||
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
|
||||
*newgroup = false;
|
||||
return pBlock;
|
||||
}
|
||||
|
@ -920,25 +924,40 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
|||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
|
||||
|
||||
SJoinStatus* st0 = &pJoinInfo->status[0];
|
||||
SColumnInfoData* p0 = taosArrayGet(st0->pBlock->pDataBlock, 0);
|
||||
int64_t* ts0 = (int64_t*) p0->pData;
|
||||
|
||||
if (st0->index >= st0->pBlock->info.rows) {
|
||||
continue;
|
||||
}
|
||||
|
||||
bool prefixEqual = true;
|
||||
|
||||
while(1) {
|
||||
prefixEqual = true;
|
||||
for (int32_t i = 1; i < pJoinInfo->numOfUpstream; ++i) {
|
||||
SJoinStatus* st = &pJoinInfo->status[i];
|
||||
ts0 = (int64_t*) p0->pData;
|
||||
|
||||
SColumnInfoData* p = taosArrayGet(st->pBlock->pDataBlock, 0);
|
||||
int64_t* ts = (int64_t*)p->pData;
|
||||
|
||||
if (st->index >= st->pBlock->info.rows || st0->index >= st0->pBlock->info.rows) {
|
||||
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
|
||||
prefixEqual = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ts[st->index] < ts0[st0->index]) { // less than the first
|
||||
prefixEqual = false;
|
||||
|
||||
if ((++(st->index)) >= st->pBlock->info.rows) {
|
||||
if ((++(st->index)) >= st->pBlock->info.rows) {
|
||||
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
|
@ -1053,22 +1072,21 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
|
|||
pInfo->block = destroyOutputBuf(pInfo->block);
|
||||
pInfo->pSql = NULL;
|
||||
|
||||
doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols);
|
||||
filterFreeInfo(pInfo->pFilterInfo);
|
||||
|
||||
cleanupResultRowInfo(&pInfo->pTableQueryInfo->resInfo);
|
||||
tfree(pInfo->pTableQueryInfo);
|
||||
}
|
||||
|
||||
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
|
||||
SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
|
||||
SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SFilterInfo* pFilters) {
|
||||
assert(numOfCols > 0);
|
||||
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
|
||||
SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo));
|
||||
|
||||
pInfo->pSql = pSql;
|
||||
pInfo->pFilterInfo = pFilterInfo;
|
||||
pInfo->numOfFilterCols = numOfFilterCols;
|
||||
pInfo->pFilterInfo = pFilters;
|
||||
pInfo->pTableQueryInfo = createTmpTableQueryInfo(win);
|
||||
|
||||
pInfo->block = calloc(numOfCols, sizeof(SSDataBlock));
|
||||
|
@ -1156,6 +1174,7 @@ void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, b
|
|||
pRes->completed = (pRes->numOfRows == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t* numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo) {
|
||||
SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
|
||||
for(int32_t i = 0; i < numOfCol1; ++i) {
|
||||
|
@ -1173,6 +1192,7 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t
|
|||
|
||||
tfree(tableCols);
|
||||
}
|
||||
*/
|
||||
|
||||
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
|
||||
SSqlRes* pOutput = &pSql->res;
|
||||
|
@ -1201,12 +1221,16 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
// if it is a join query, create join operator here
|
||||
int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns;
|
||||
|
||||
int32_t numOfFilterCols = 0;
|
||||
SSingleColumnFilterInfo* pFilterInfo = NULL;
|
||||
createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
|
||||
|
||||
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
|
||||
SFilterInfo *pFilters = NULL;
|
||||
STblCond *pCond = NULL;
|
||||
if (px->colCond) {
|
||||
pCond = tsGetTableFilter(px->colCond, pTableMeta->id.uid, 0);
|
||||
if (pCond && pCond->cond) {
|
||||
createQueryFilter(pCond->cond, pCond->len, &pFilters);
|
||||
}
|
||||
}
|
||||
|
||||
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
|
||||
pOutput->precision = pSqlObjList[0]->res.precision;
|
||||
|
||||
SSchema* schema = NULL;
|
||||
|
@ -1222,15 +1246,21 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
|
||||
for(int32_t i = 1; i < px->numOfTables; ++i) {
|
||||
STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta;
|
||||
numOfCol1 = pTableMeta1->tableInfo.numOfColumns;
|
||||
SFilterInfo *pFilters1 = NULL;
|
||||
|
||||
SSchema* pSchema1 = tscGetTableSchema(pTableMeta1);
|
||||
int32_t n = pTableMeta1->tableInfo.numOfColumns;
|
||||
|
||||
int32_t numOfFilterCols1 = 0;
|
||||
SSingleColumnFilterInfo* pFilterInfo1 = NULL;
|
||||
createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
|
||||
if (px->colCond) {
|
||||
pCond = tsGetTableFilter(px->colCond, pTableMeta1->id.uid, i);
|
||||
if (pCond && pCond->cond) {
|
||||
createQueryFilter(pCond->cond, pCond->len, &pFilters1);
|
||||
}
|
||||
//createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
|
||||
}
|
||||
|
||||
p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilterInfo1, numOfFilterCols1);
|
||||
p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilters1);
|
||||
memcpy(&schema[offset], pSchema1, n * sizeof(SSchema));
|
||||
offset += n;
|
||||
}
|
||||
|
@ -1300,12 +1330,13 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
|||
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
||||
}
|
||||
|
||||
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
|
||||
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeCachedMeta, uint64_t id) {
|
||||
if (pCmd == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = pCmd->pQueryInfo;
|
||||
|
||||
while(pQueryInfo != NULL) {
|
||||
SQueryInfo* p = pQueryInfo->sibling;
|
||||
|
||||
|
@ -1314,7 +1345,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
|
|||
SQueryInfo* pUpQueryInfo = taosArrayGetP(pQueryInfo->pUpstream, i);
|
||||
freeQueryInfoImpl(pUpQueryInfo);
|
||||
|
||||
clearAllTableMetaInfo(pUpQueryInfo, removeMeta);
|
||||
clearAllTableMetaInfo(pUpQueryInfo, removeCachedMeta, id);
|
||||
if (pUpQueryInfo->pQInfo != NULL) {
|
||||
qDestroyQueryInfo(pUpQueryInfo->pQInfo);
|
||||
pUpQueryInfo->pQInfo = NULL;
|
||||
|
@ -1330,7 +1361,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
|
|||
}
|
||||
|
||||
freeQueryInfoImpl(pQueryInfo);
|
||||
clearAllTableMetaInfo(pQueryInfo, removeMeta);
|
||||
clearAllTableMetaInfo(pQueryInfo, removeCachedMeta, id);
|
||||
|
||||
if (pQueryInfo->pQInfo != NULL) {
|
||||
qDestroyQueryInfo(pQueryInfo->pQInfo);
|
||||
|
@ -1359,7 +1390,7 @@ void destroyTableNameList(SInsertStatementParam* pInsertParam) {
|
|||
tfree(pInsertParam->pTableNameList);
|
||||
}
|
||||
|
||||
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
|
||||
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta, uint64_t id) {
|
||||
pCmd->command = 0;
|
||||
pCmd->numOfCols = 0;
|
||||
pCmd->count = 0;
|
||||
|
@ -1373,19 +1404,8 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
|
|||
tfree(pCmd->insertParam.tagData.data);
|
||||
pCmd->insertParam.tagData.dataLen = 0;
|
||||
|
||||
tscFreeQueryInfo(pCmd, clearCachedMeta);
|
||||
|
||||
if (pCmd->pTableMetaMap != NULL) {
|
||||
STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
|
||||
while (p) {
|
||||
taosArrayDestroy(p->vgroupIdList);
|
||||
tfree(p->pTableMeta);
|
||||
p = taosHashIterate(pCmd->pTableMetaMap, p);
|
||||
}
|
||||
|
||||
taosHashCleanup(pCmd->pTableMetaMap);
|
||||
pCmd->pTableMetaMap = NULL;
|
||||
}
|
||||
tscFreeQueryInfo(pCmd, clearCachedMeta, id);
|
||||
pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
|
||||
}
|
||||
|
||||
void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) {
|
||||
|
@ -1481,8 +1501,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
tscFreeMetaSqlObj(&pSql->metaRid);
|
||||
tscFreeMetaSqlObj(&pSql->svgroupRid);
|
||||
|
||||
tscFreeSubobj(pSql);
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
int32_t cmd = pCmd->command;
|
||||
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_GLOBALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
|
||||
|
@ -1490,6 +1508,8 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
tscRemoveFromSqlList(pSql);
|
||||
}
|
||||
|
||||
tscFreeSubobj(pSql);
|
||||
|
||||
pSql->signature = NULL;
|
||||
pSql->fp = NULL;
|
||||
tfree(pSql->sqlstr);
|
||||
|
@ -1500,7 +1520,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
pSql->self = 0;
|
||||
|
||||
tscFreeSqlResult(pSql);
|
||||
tscResetSqlCmd(pCmd, false);
|
||||
tscResetSqlCmd(pCmd, false, pSql->self);
|
||||
|
||||
tfree(pCmd->payload);
|
||||
pCmd->allocSize = 0;
|
||||
|
@ -1775,101 +1795,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
|
||||
SSchema* pSchema = pBuilder->pSchema;
|
||||
char* p = (char*)pBuilder->buf;
|
||||
int toffset = 0;
|
||||
uint16_t nCols = pBuilder->nCols;
|
||||
|
||||
uint8_t memRowType = payloadType(p);
|
||||
uint16_t nColsBound = payloadNCols(p);
|
||||
if (pBuilder->nCols <= 0 || nColsBound <= 0) {
|
||||
return NULL;
|
||||
}
|
||||
char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p));
|
||||
SMemRow* memRow = (SMemRow)pBuilder->pDataBlock;
|
||||
memRowSetType(memRow, memRowType);
|
||||
|
||||
// ----------------- Raw payload structure for row:
|
||||
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
|
||||
* | |<----------------- flen ------------->|<--- value part --->|
|
||||
* |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
|
||||
* +-----------+----------+----------+--------------------------------------|--------------------|
|
||||
* | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
|
||||
* +-----------+----------+----------+--------------------------------------+--------------------|
|
||||
* 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
|
||||
* 2. dataTLen: total length including the header and body.
|
||||
*/
|
||||
|
||||
if (memRowType == SMEM_ROW_DATA) {
|
||||
SDataRow trow = (SDataRow)memRowDataBody(memRow);
|
||||
dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen));
|
||||
dataRowSetVersion(trow, pBuilder->sversion);
|
||||
|
||||
p = (char*)payloadBody(pBuilder->buf);
|
||||
uint16_t i = 0, j = 0;
|
||||
while (j < nCols) {
|
||||
if (i >= nColsBound) {
|
||||
break;
|
||||
}
|
||||
int16_t colId = payloadColId(p);
|
||||
if (colId == pSchema[j].colId) {
|
||||
// ASSERT(payloadColType(p) == pSchema[j].type);
|
||||
tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset);
|
||||
toffset += TYPE_BYTES[pSchema[j].type];
|
||||
p = payloadNextCol(p);
|
||||
++i;
|
||||
++j;
|
||||
} else if (colId < pSchema[j].colId) {
|
||||
p = payloadNextCol(p);
|
||||
++i;
|
||||
} else {
|
||||
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
|
||||
toffset += TYPE_BYTES[pSchema[j].type];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
while (j < nCols) {
|
||||
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
|
||||
toffset += TYPE_BYTES[pSchema[j].type];
|
||||
++j;
|
||||
}
|
||||
|
||||
#if 0 // no need anymore
|
||||
while (i < nColsBound) {
|
||||
p = payloadNextCol(p);
|
||||
++i;
|
||||
}
|
||||
#endif
|
||||
|
||||
} else if (memRowType == SMEM_ROW_KV) {
|
||||
SKVRow kvRow = (SKVRow)memRowKvBody(memRow);
|
||||
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound));
|
||||
kvRowSetNCols(kvRow, nColsBound);
|
||||
memRowSetKvVersion(memRow, pBuilder->sversion);
|
||||
|
||||
p = (char*)payloadBody(pBuilder->buf);
|
||||
int i = 0;
|
||||
while (i < nColsBound) {
|
||||
int16_t colId = payloadColId(p);
|
||||
uint8_t colType = payloadColType(p);
|
||||
tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset);
|
||||
//toffset += sizeof(SColIdx);
|
||||
p = payloadNextCol(p);
|
||||
++i;
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
int32_t rowTLen = memRowTLen(memRow);
|
||||
pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row
|
||||
pBuilder->pSubmitBlk->dataLen += rowTLen;
|
||||
|
||||
return memRow;
|
||||
}
|
||||
|
||||
// Erase the empty space reserved for binary data
|
||||
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam,
|
||||
SBlockKeyTuple* blkKeyTuple) {
|
||||
|
@ -1901,10 +1826,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
|
|||
int32_t schemaSize = sizeof(STColumn) * numOfCols;
|
||||
pBlock->schemaLen = schemaSize;
|
||||
} else {
|
||||
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
|
||||
flen += TYPE_BYTES[pSchema[j].type];
|
||||
if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
|
||||
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
|
||||
flen += TYPE_BYTES[pSchema[j].type];
|
||||
}
|
||||
}
|
||||
|
||||
pBlock->schemaLen = 0;
|
||||
}
|
||||
|
||||
|
@ -1931,18 +1857,19 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
|
|||
pBlock->dataLen += memRowTLen(memRow);
|
||||
}
|
||||
} else {
|
||||
SMemRowBuilder rowBuilder;
|
||||
rowBuilder.pSchema = pSchema;
|
||||
rowBuilder.sversion = pTableMeta->sversion;
|
||||
rowBuilder.flen = flen;
|
||||
rowBuilder.nCols = tinfo.numOfColumns;
|
||||
rowBuilder.pDataBlock = pDataBlock;
|
||||
rowBuilder.pSubmitBlk = pBlock;
|
||||
rowBuilder.buf = p;
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
rowBuilder.buf = (blkKeyTuple + i)->payloadAddr;
|
||||
tdGenMemRowFromBuilder(&rowBuilder);
|
||||
char* payload = (blkKeyTuple + i)->payloadAddr;
|
||||
if (isNeedConvertRow(payload)) {
|
||||
convertSMemRow(pDataBlock, payload, pTableDataBlock);
|
||||
TDRowTLenT rowTLen = memRowTLen(pDataBlock);
|
||||
pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
|
||||
pBlock->dataLen += rowTLen;
|
||||
} else {
|
||||
TDRowTLenT rowTLen = memRowTLen(payload);
|
||||
memcpy(pDataBlock, payload, rowTLen);
|
||||
pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
|
||||
pBlock->dataLen += rowTLen;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1955,9 +1882,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
|
|||
|
||||
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
|
||||
int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE;
|
||||
int32_t columns = tscGetNumOfColumns(pTableMeta);
|
||||
int32_t columns = tscGetNumOfColumns(pTableMeta);
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMeta);
|
||||
for(int32_t i = 0; i < columns; i++) {
|
||||
for (int32_t i = 0; i < columns; i++) {
|
||||
if (IS_VAR_DATA_TYPE((pSchema + i)->type)) {
|
||||
result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
|
||||
}
|
||||
|
@ -2003,7 +1930,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
|
|||
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
|
||||
if (pBlocks->numOfRows > 0) {
|
||||
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
|
||||
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
|
||||
int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0;
|
||||
STableDataBlocks* dataBuf = NULL;
|
||||
|
||||
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
|
||||
|
@ -2016,7 +1943,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
|
|||
return ret;
|
||||
}
|
||||
|
||||
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize +
|
||||
sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||
|
||||
if (dataBuf->nAllocSize < destSize) {
|
||||
dataBuf->nAllocSize = (uint32_t)(destSize * 1.5);
|
||||
|
@ -2060,7 +1988,9 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
|
|||
pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
|
||||
}
|
||||
|
||||
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||
int32_t len = pBlocks->numOfRows *
|
||||
(isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) +
|
||||
sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||
|
||||
pBlocks->tid = htonl(pBlocks->tid);
|
||||
pBlocks->uid = htobe64(pBlocks->uid);
|
||||
|
@ -2267,6 +2197,11 @@ int32_t tscGetResRowLength(SArray* pExprList) {
|
|||
}
|
||||
|
||||
static void destroyFilterInfo(SColumnFilterList* pFilterList) {
|
||||
if (pFilterList->filterInfo == NULL) {
|
||||
pFilterList->numOfFilters = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pFilterList->numOfFilters; ++i) {
|
||||
if (pFilterList->filterInfo[i].filterstr) {
|
||||
tfree(pFilterList->filterInfo[i].pz);
|
||||
|
@ -2969,6 +2904,64 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tscColCondCopy(SArray** dest, const SArray* src, uint64_t uid, int16_t tidx) {
|
||||
if (src == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t s = taosArrayGetSize(src);
|
||||
*dest = taosArrayInit(s, sizeof(SCond));
|
||||
|
||||
for (int32_t i = 0; i < s; ++i) {
|
||||
STblCond* pCond = taosArrayGet(src, i);
|
||||
STblCond c = {0};
|
||||
|
||||
if (tidx > 0) {
|
||||
if (!(pCond->uid == uid && pCond->idx == tidx)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
c.idx = 0;
|
||||
} else {
|
||||
c.idx = pCond->idx;
|
||||
}
|
||||
|
||||
c.len = pCond->len;
|
||||
c.uid = pCond->uid;
|
||||
|
||||
if (pCond->len > 0) {
|
||||
assert(pCond->cond != NULL);
|
||||
c.cond = malloc(c.len);
|
||||
if (c.cond == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(c.cond, pCond->cond, c.len);
|
||||
}
|
||||
|
||||
taosArrayPush(*dest, &c);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tscColCondRelease(SArray** pCond) {
|
||||
if (*pCond == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t s = taosArrayGetSize(*pCond);
|
||||
for (int32_t i = 0; i < s; ++i) {
|
||||
STblCond* p = taosArrayGet(*pCond, i);
|
||||
tfree(p->cond);
|
||||
}
|
||||
|
||||
taosArrayDestroy(*pCond);
|
||||
|
||||
*pCond = NULL;
|
||||
}
|
||||
|
||||
|
||||
void tscTagCondRelease(STagCond* pTagCond) {
|
||||
free(pTagCond->tbnameCond.cond);
|
||||
|
||||
|
@ -3161,6 +3154,7 @@ int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
|
|||
|
||||
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
|
||||
tscTagCondRelease(&pQueryInfo->tagCond);
|
||||
tscColCondRelease(&pQueryInfo->colCond);
|
||||
tscFieldInfoClear(&pQueryInfo->fieldsInfo);
|
||||
|
||||
tscExprDestroy(pQueryInfo->exprList);
|
||||
|
@ -3251,6 +3245,11 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
|
|||
goto _error;
|
||||
}
|
||||
|
||||
if (tscColCondCopy(&pQueryInfo->colCond, pSrc->colCond, 0, -1) != 0) {
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (pSrc->fillType != TSDB_FILL_NONE) {
|
||||
pQueryInfo->fillVal = calloc(1, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
if (pQueryInfo->fillVal == NULL) {
|
||||
|
@ -3367,20 +3366,15 @@ SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
|
|||
return pa;
|
||||
}
|
||||
|
||||
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
|
||||
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta, uint64_t id) {
|
||||
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
|
||||
|
||||
if (removeMeta) {
|
||||
char name[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||
tscRemoveCachedTableMeta(pTableMetaInfo, id);
|
||||
}
|
||||
|
||||
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
|
||||
tscClearTableMetaInfo(pTableMetaInfo);
|
||||
|
||||
free(pTableMetaInfo);
|
||||
}
|
||||
|
||||
tfree(pQueryInfo->pTableMetaInfo);
|
||||
|
@ -3447,10 +3441,12 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo) {
|
|||
}
|
||||
|
||||
tfree(pTableMetaInfo->pTableMeta);
|
||||
|
||||
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
||||
|
||||
tscColumnListDestroy(pTableMetaInfo->tagColList);
|
||||
pTableMetaInfo->tagColList = NULL;
|
||||
|
||||
free(pTableMetaInfo);
|
||||
}
|
||||
|
||||
void tscResetForNextRetrieve(SSqlRes* pRes) {
|
||||
|
@ -3647,6 +3643,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
|||
goto _error;
|
||||
}
|
||||
|
||||
if (tscColCondCopy(&pNewQueryInfo->colCond, pQueryInfo->colCond, pTableMetaInfo->pTableMeta->id.uid, tableIndex) != 0) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
//just make memory memory sanitizer happy
|
||||
//refactor later
|
||||
|
@ -3844,13 +3845,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
|
|||
|
||||
// todo refactor
|
||||
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
|
||||
|
||||
SSqlCmd* pParentCmd = &pParentSql->cmd;
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0);
|
||||
tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
|
||||
|
||||
pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap);
|
||||
pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_SUCCESS;
|
||||
pParentSql->retry++;
|
||||
|
@ -3869,7 +3864,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
|
|||
return;
|
||||
}
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
|
||||
executeQuery(pParentSql, pQueryInfo);
|
||||
return;
|
||||
}
|
||||
|
@ -4448,14 +4443,16 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
|
|||
return cMeta;
|
||||
}
|
||||
|
||||
int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity) {
|
||||
int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta**ppSTable) {
|
||||
assert(*ppChild != NULL);
|
||||
|
||||
STableMeta* p = NULL;
|
||||
size_t sz = 0;
|
||||
STableMeta* p = *ppSTable;
|
||||
STableMeta* pChild = *ppChild;
|
||||
|
||||
size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
|
||||
if (p != NULL && sz != 0) {
|
||||
memset((char *)p, 0, sz);
|
||||
}
|
||||
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
|
||||
*ppSTable = p;
|
||||
|
||||
// tableMeta exists, build child table meta according to the super table meta
|
||||
// the uid need to be checked in addition to the general name of the super table.
|
||||
|
@ -4474,10 +4471,8 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
|
|||
memcpy(pChild->schema, p->schema, totalBytes);
|
||||
|
||||
*ppChild = pChild;
|
||||
tfree(p);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else { // super table has been removed, current tableMeta is also expired. remove it here
|
||||
tfree(p);
|
||||
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||
return -1;
|
||||
}
|
||||
|
@ -4993,7 +4988,7 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
|
|||
return info;
|
||||
}
|
||||
|
||||
void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
|
||||
void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id) {
|
||||
char fname[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
tNameExtractFullName(&pTableMetaInfo->name, fname);
|
||||
|
||||
|
|
|
@ -186,6 +186,7 @@ typedef void *SDataRow;
|
|||
#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
|
||||
|
||||
#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535
|
||||
#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r))
|
||||
#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
|
||||
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
|
||||
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
|
||||
|
@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row);
|
|||
void tdInitDataRow(SDataRow row, STSchema *pSchema);
|
||||
SDataRow tdDataRowDup(SDataRow row);
|
||||
|
||||
|
||||
// offset here not include dataRow header length
|
||||
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
|
||||
static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type,
|
||||
int32_t offset) {
|
||||
ASSERT(value != NULL);
|
||||
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
|
||||
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
|
||||
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
|
||||
if (isCopyVarData) {
|
||||
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
|
||||
}
|
||||
dataRowLen(row) += varDataTLen(value);
|
||||
} else {
|
||||
if (offset == 0) {
|
||||
|
@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// offset here not include dataRow header length
|
||||
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
|
||||
return tdAppendDataColVal(row, value, true, type, offset);
|
||||
}
|
||||
|
||||
// NOTE: offset here including the header size
|
||||
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
|
@ -328,11 +339,10 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
|
|||
int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
|
||||
|
||||
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
|
||||
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
|
||||
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
|
||||
void dataColSetOffset(SDataCol *pCol, int nEle);
|
||||
|
||||
bool isNEleNull(SDataCol *pCol, int nEle);
|
||||
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
|
||||
|
||||
// Get the data pointer from a column-wised data
|
||||
static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) {
|
||||
|
@ -357,13 +367,11 @@ static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) {
|
|||
}
|
||||
|
||||
typedef struct {
|
||||
int maxRowSize;
|
||||
int maxCols; // max number of columns
|
||||
int maxPoints; // max number of points
|
||||
|
||||
int numOfRows;
|
||||
int numOfCols; // Total number of cols
|
||||
int sversion; // TODO: set sversion
|
||||
int maxCols; // max number of columns
|
||||
int maxPoints; // max number of points
|
||||
int numOfRows;
|
||||
int numOfCols; // Total number of cols
|
||||
int sversion; // TODO: set sversion
|
||||
SDataCol *cols;
|
||||
} SDataCols;
|
||||
|
||||
|
@ -407,7 +415,7 @@ static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
|
|||
}
|
||||
}
|
||||
|
||||
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
|
||||
SDataCols *tdNewDataCols(int maxCols, int maxRows);
|
||||
void tdResetDataCols(SDataCols *pCols);
|
||||
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
|
||||
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
|
||||
|
@ -475,9 +483,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
|
|||
}
|
||||
|
||||
// offset here not include kvRow header length
|
||||
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) {
|
||||
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type,
|
||||
int32_t offset) {
|
||||
ASSERT(value != NULL);
|
||||
int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE;
|
||||
int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
|
||||
SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
|
||||
char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
|
||||
|
||||
|
@ -485,10 +494,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
|
|||
pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE
|
||||
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
memcpy(ptr, value, varDataTLen(value));
|
||||
if (isCopyValData) {
|
||||
memcpy(ptr, value, varDataTLen(value));
|
||||
}
|
||||
kvRowLen(row) += varDataTLen(value);
|
||||
} else {
|
||||
if (*offset == 0) {
|
||||
if (offset == 0) {
|
||||
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
|
||||
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
|
||||
memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
|
||||
|
@ -497,7 +508,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
|
|||
}
|
||||
kvRowLen(row) += TYPE_BYTES[type];
|
||||
}
|
||||
*offset += sizeof(SColIdx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -592,12 +602,24 @@ typedef void *SMemRow;
|
|||
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
|
||||
#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
|
||||
|
||||
#define SMEM_ROW_DATA 0U // SDataRow
|
||||
#define SMEM_ROW_KV 1U // SKVRow
|
||||
#define SMEM_ROW_DATA 0x0U // SDataRow
|
||||
#define SMEM_ROW_KV 0x01U // SKVRow
|
||||
#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag
|
||||
|
||||
#define memRowType(r) (*(uint8_t *)(r))
|
||||
#define KVRatioKV (0.2f) // all bool
|
||||
#define KVRatioPredict (0.4f)
|
||||
#define KVRatioData (0.75f) // all bigint
|
||||
#define KVRatioConvert (0.9f)
|
||||
|
||||
#define memRowType(r) ((*(uint8_t *)(r)) & 0x01)
|
||||
|
||||
#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory
|
||||
#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit
|
||||
#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01))
|
||||
#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
|
||||
#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01))
|
||||
#define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
|
||||
#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)
|
||||
|
||||
#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
|
||||
#define memRowKvBody(r) \
|
||||
|
@ -614,6 +636,14 @@ typedef void *SMemRow;
|
|||
#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r))
|
||||
#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen
|
||||
|
||||
static FORCE_INLINE char *memRowEnd(SMemRow row) {
|
||||
if (isDataRow(row)) {
|
||||
return (char *)dataRowEnd(memRowDataBody(row));
|
||||
} else {
|
||||
return (char *)kvRowEnd(memRowKvBody(row));
|
||||
}
|
||||
}
|
||||
|
||||
#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r))
|
||||
#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
|
||||
#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
|
||||
|
@ -631,7 +661,6 @@ typedef void *SMemRow;
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define memRowSetType(r, t) (memRowType(r) = (t))
|
||||
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
|
||||
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
|
||||
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
|
||||
|
@ -664,12 +693,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset,
|
||||
int32_t *kvOffset) {
|
||||
static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
|
||||
int8_t type, int32_t offset) {
|
||||
if (isDataRow(row)) {
|
||||
tdAppendColVal(memRowDataBody(row), value, type, offset);
|
||||
tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset);
|
||||
} else {
|
||||
tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset);
|
||||
tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -691,6 +720,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value
|
|||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. calculate the delta of AllNullLen for SDataRow.
|
||||
* 2. calculate the real len for SKVRow.
|
||||
*/
|
||||
static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) {
|
||||
switch (colType) {
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
int32_t varLen = varDataLen(value);
|
||||
*dataLen += (varLen - CHAR_BYTES);
|
||||
*kvLen += (varLen + sizeof(SColIdx));
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t varLen = varDataLen(value);
|
||||
*dataLen += (varLen - TSDB_NCHAR_SIZE);
|
||||
*kvLen += (varLen + sizeof(SColIdx));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
*kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int16_t colId;
|
||||
|
@ -706,7 +759,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c
|
|||
|
||||
SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
|
||||
|
||||
|
||||
#if 0
|
||||
// ----------------- Raw payload structure for row:
|
||||
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
|
||||
* | |<----------------- flen ------------->|<--- value part --->|
|
||||
|
@ -752,6 +805,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
|
|||
|
||||
static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); }
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -41,6 +41,7 @@ extern char tsArbitrator[];
|
|||
extern int8_t tsArbOnline;
|
||||
extern int64_t tsArbOnlineTimestamp;
|
||||
extern int32_t tsDnodeId;
|
||||
extern int64_t tsDnodeStartTime;
|
||||
|
||||
// common
|
||||
extern int tsRpcTimer;
|
||||
|
@ -71,6 +72,7 @@ extern int8_t tsKeepOriginalColumnName;
|
|||
|
||||
// client
|
||||
extern int32_t tsMaxSQLStringLen;
|
||||
extern int32_t tsMaxWildCardsLen;
|
||||
extern int8_t tsTscEnableRecordSql;
|
||||
extern int32_t tsMaxNumOfOrderedResults;
|
||||
extern int32_t tsMinSlidingTime;
|
||||
|
|
|
@ -53,6 +53,8 @@ int32_t tVariantToString(tVariant *pVar, char *dst);
|
|||
|
||||
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix);
|
||||
|
||||
int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix, bool *converted, char *extInfo);
|
||||
|
||||
int32_t tVariantTypeSetType(tVariant *pVariant, char type);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
#include "wchar.h"
|
||||
#include "tarray.h"
|
||||
|
||||
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
|
||||
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
|
||||
int limit2, int tRows, bool forceSetNull);
|
||||
|
||||
//TODO: change caller to use return val
|
||||
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
|
||||
int spaceNeeded = pCol->bytes * maxPoints;
|
||||
if(IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
|
@ -31,7 +31,7 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
|
|||
if(pCol->spaceSize < spaceNeeded) {
|
||||
void* ptr = realloc(pCol->pData, spaceNeeded);
|
||||
if(ptr == NULL) {
|
||||
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)pCol->spaceSize,
|
||||
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded,
|
||||
strerror(errno));
|
||||
return -1;
|
||||
} else {
|
||||
|
@ -239,20 +239,19 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
|
|||
pDataCol->len = 0;
|
||||
}
|
||||
// value from timestamp should be TKEY here instead of TSKEY
|
||||
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
|
||||
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
|
||||
ASSERT(pCol != NULL && value != NULL);
|
||||
|
||||
if (isAllRowsNull(pCol)) {
|
||||
if (isNull(value, pCol->type)) {
|
||||
// all null value yet, just return
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
|
||||
if (numOfRows > 0) {
|
||||
// Find the first not null value, fill all previouse values as NULL
|
||||
dataColSetNEleNull(pCol, numOfRows, maxPoints);
|
||||
} else {
|
||||
tdAllocMemForCol(pCol, maxPoints);
|
||||
dataColSetNEleNull(pCol, numOfRows);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -268,12 +267,21 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP
|
|||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
|
||||
pCol->len += pCol->bytes;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
|
||||
} else {
|
||||
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
|
||||
}
|
||||
}
|
||||
|
||||
bool isNEleNull(SDataCol *pCol, int nEle) {
|
||||
if(isAllRowsNull(pCol)) return true;
|
||||
for (int i = 0; i < nEle; i++) {
|
||||
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
|
||||
if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -290,9 +298,7 @@ static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
|
|||
}
|
||||
}
|
||||
|
||||
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
|
||||
tdAllocMemForCol(pCol, maxPoints);
|
||||
|
||||
static void dataColSetNEleNull(SDataCol *pCol, int nEle) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->len = 0;
|
||||
for (int i = 0; i < nEle; i++) {
|
||||
|
@ -318,7 +324,7 @@ void dataColSetOffset(SDataCol *pCol, int nEle) {
|
|||
}
|
||||
}
|
||||
|
||||
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
|
||||
SDataCols *tdNewDataCols(int maxCols, int maxRows) {
|
||||
SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols));
|
||||
if (pCols == NULL) {
|
||||
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno));
|
||||
|
@ -326,6 +332,9 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
|
|||
}
|
||||
|
||||
pCols->maxPoints = maxRows;
|
||||
pCols->maxCols = maxCols;
|
||||
pCols->numOfRows = 0;
|
||||
pCols->numOfCols = 0;
|
||||
|
||||
if (maxCols > 0) {
|
||||
pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol));
|
||||
|
@ -342,13 +351,8 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
|
|||
pCols->cols[i].pData = NULL;
|
||||
pCols->cols[i].dataOff = NULL;
|
||||
}
|
||||
|
||||
pCols->maxCols = maxCols;
|
||||
}
|
||||
|
||||
pCols->maxRowSize = maxRowSize;
|
||||
|
||||
|
||||
return pCols;
|
||||
}
|
||||
|
||||
|
@ -357,8 +361,9 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
|
|||
int oldMaxCols = pCols->maxCols;
|
||||
if (schemaNCols(pSchema) > oldMaxCols) {
|
||||
pCols->maxCols = schemaNCols(pSchema);
|
||||
pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
|
||||
if (pCols->cols == NULL) return -1;
|
||||
void* ptr = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
|
||||
if (ptr == NULL) return -1;
|
||||
pCols->cols = ptr;
|
||||
for(i = oldMaxCols; i < pCols->maxCols; i++) {
|
||||
pCols->cols[i].pData = NULL;
|
||||
pCols->cols[i].dataOff = NULL;
|
||||
|
@ -366,10 +371,6 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
|
|||
}
|
||||
}
|
||||
|
||||
if (schemaTLen(pSchema) > pCols->maxRowSize) {
|
||||
pCols->maxRowSize = schemaTLen(pSchema);
|
||||
}
|
||||
|
||||
tdResetDataCols(pCols);
|
||||
pCols->numOfCols = schemaNCols(pSchema);
|
||||
|
||||
|
@ -398,7 +399,7 @@ SDataCols *tdFreeDataCols(SDataCols *pCols) {
|
|||
}
|
||||
|
||||
SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
|
||||
SDataCols *pRet = tdNewDataCols(pDataCols->maxRowSize, pDataCols->maxCols, pDataCols->maxPoints);
|
||||
SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints);
|
||||
if (pRet == NULL) return NULL;
|
||||
|
||||
pRet->numOfCols = pDataCols->numOfCols;
|
||||
|
@ -413,7 +414,10 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
|
|||
|
||||
if (keepData) {
|
||||
if (pDataCols->cols[i].len > 0) {
|
||||
tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints);
|
||||
if(tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) {
|
||||
tdFreeDataCols(pRet);
|
||||
return NULL;
|
||||
}
|
||||
pRet->cols[i].len = pDataCols->cols[i].len;
|
||||
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
|
||||
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
|
||||
|
@ -584,9 +588,12 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
|
|||
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
|
||||
for (int i = 0; i < src2->numOfCols; i++) {
|
||||
ASSERT(target->cols[i].type == src2->cols[i].type);
|
||||
if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) {
|
||||
if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) {
|
||||
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
|
||||
target->maxPoints);
|
||||
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
|
||||
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
|
||||
target->maxPoints);
|
||||
}
|
||||
}
|
||||
target->numOfRows++;
|
||||
|
@ -844,7 +851,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
|
|||
int16_t k;
|
||||
for (k = 0; k < nKvNCols; ++k) {
|
||||
SColInfo *pColInfo = taosArrayGet(stashRow, k);
|
||||
tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset);
|
||||
tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset);
|
||||
toffset += sizeof(SColIdx);
|
||||
}
|
||||
ASSERT(kvLen == memRowTLen(tRow));
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
|
|||
} else if (pNode->nodeType == TSQL_NODE_VALUE) {
|
||||
tVariantDestroy(pNode->pVal);
|
||||
} else if (pNode->nodeType == TSQL_NODE_COL) {
|
||||
free(pNode->pSchema);
|
||||
tfree(pNode->pSchema);
|
||||
}
|
||||
|
||||
free(pNode);
|
||||
|
@ -435,7 +435,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
|
|||
expr->_node.optr = TSDB_RELATION_IN;
|
||||
tVariant* pVal = exception_calloc(1, sizeof(tVariant));
|
||||
right->pVal = pVal;
|
||||
pVal->nType = TSDB_DATA_TYPE_ARRAY;
|
||||
pVal->nType = TSDB_DATA_TYPE_POINTER_ARRAY;
|
||||
pVal->arr = taosArrayInit(2, POINTER_BYTES);
|
||||
|
||||
const char* cond = tbnameCond + QUERY_COND_REL_PREFIX_IN_LEN;
|
||||
|
@ -502,6 +502,183 @@ void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) {
|
|||
*q = (void *)pObj;
|
||||
}
|
||||
|
||||
void convertFilterSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType) {
|
||||
SBufferReader br = tbufInitReader(buf, len, false);
|
||||
uint32_t sType = tbufReadUint32(&br);
|
||||
SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(tType), true, false);
|
||||
|
||||
taosHashSetEqualFp(pObj, taosGetDefaultEqualFunction(tType));
|
||||
|
||||
int dummy = -1;
|
||||
tVariant tmpVar = {0};
|
||||
size_t t = 0;
|
||||
int32_t sz = tbufReadInt32(&br);
|
||||
void *pvar = NULL;
|
||||
int64_t val = 0;
|
||||
int32_t bufLen = 0;
|
||||
if (IS_NUMERIC_TYPE(sType)) {
|
||||
bufLen = 60; // The maximum length of string that a number is converted to.
|
||||
} else {
|
||||
bufLen = 128;
|
||||
}
|
||||
|
||||
char *tmp = calloc(1, bufLen * TSDB_NCHAR_SIZE);
|
||||
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
switch (sType) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
*(uint8_t *)&val = (uint8_t)tbufReadInt64(&br);
|
||||
t = sizeof(val);
|
||||
pvar = &val;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
*(uint16_t *)&val = (uint16_t)tbufReadInt64(&br);
|
||||
t = sizeof(val);
|
||||
pvar = &val;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
*(uint32_t *)&val = (uint32_t)tbufReadInt64(&br);
|
||||
t = sizeof(val);
|
||||
pvar = &val;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
case TSDB_DATA_TYPE_BIGINT: {
|
||||
*(uint64_t *)&val = (uint64_t)tbufReadInt64(&br);
|
||||
t = sizeof(val);
|
||||
pvar = &val;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
*(double *)&val = tbufReadDouble(&br);
|
||||
t = sizeof(val);
|
||||
pvar = &val;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
*(float *)&val = (float)tbufReadDouble(&br);
|
||||
t = sizeof(val);
|
||||
pvar = &val;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
pvar = (char *)tbufReadBinary(&br, &t);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
pvar = (char *)tbufReadBinary(&br, &t);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
taosHashCleanup(pObj);
|
||||
*q = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
tVariantCreateFromBinary(&tmpVar, (char *)pvar, t, sType);
|
||||
|
||||
if (bufLen < t) {
|
||||
tmp = realloc(tmp, t * TSDB_NCHAR_SIZE);
|
||||
bufLen = (int32_t)t;
|
||||
}
|
||||
|
||||
switch (tType) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
if (tVariantDump(&tmpVar, (char *)&val, tType, false)) {
|
||||
goto err_ret;
|
||||
}
|
||||
pvar = &val;
|
||||
t = sizeof(val);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
if (tVariantDump(&tmpVar, (char *)&val, tType, false)) {
|
||||
goto err_ret;
|
||||
}
|
||||
pvar = &val;
|
||||
t = sizeof(val);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
if (tVariantDump(&tmpVar, (char *)&val, tType, false)) {
|
||||
goto err_ret;
|
||||
}
|
||||
pvar = &val;
|
||||
t = sizeof(val);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
case TSDB_DATA_TYPE_BIGINT: {
|
||||
if (tVariantDump(&tmpVar, (char *)&val, tType, false)) {
|
||||
goto err_ret;
|
||||
}
|
||||
pvar = &val;
|
||||
t = sizeof(val);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
if (tVariantDump(&tmpVar, (char *)&val, tType, false)) {
|
||||
goto err_ret;
|
||||
}
|
||||
pvar = &val;
|
||||
t = sizeof(val);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
if (tVariantDump(&tmpVar, (char *)&val, tType, false)) {
|
||||
goto err_ret;
|
||||
}
|
||||
pvar = &val;
|
||||
t = sizeof(val);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
if (tVariantDump(&tmpVar, tmp, tType, true)) {
|
||||
goto err_ret;
|
||||
}
|
||||
t = varDataLen(tmp);
|
||||
pvar = varDataVal(tmp);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
if (tVariantDump(&tmpVar, tmp, tType, true)) {
|
||||
goto err_ret;
|
||||
}
|
||||
t = varDataLen(tmp);
|
||||
pvar = varDataVal(tmp);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
taosHashPut(pObj, (char *)pvar, t, &dummy, sizeof(dummy));
|
||||
tVariantDestroy(&tmpVar);
|
||||
memset(&tmpVar, 0, sizeof(tmpVar));
|
||||
}
|
||||
|
||||
*q = (void *)pObj;
|
||||
pObj = NULL;
|
||||
|
||||
err_ret:
|
||||
tVariantDestroy(&tmpVar);
|
||||
taosHashCleanup(pObj);
|
||||
tfree(tmp);
|
||||
}
|
||||
|
||||
|
||||
tExprNode* exprdup(tExprNode* pNode) {
|
||||
if (pNode == NULL) {
|
||||
return NULL;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "tutil.h"
|
||||
#include "tlocale.h"
|
||||
#include "ttimezone.h"
|
||||
#include "tcompare.h"
|
||||
|
||||
// cluster
|
||||
char tsFirst[TSDB_EP_LEN] = {0};
|
||||
|
@ -45,6 +46,7 @@ int8_t tsArbOnline = 0;
|
|||
int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME;
|
||||
char tsEmail[TSDB_FQDN_LEN] = {0};
|
||||
int32_t tsDnodeId = 0;
|
||||
int64_t tsDnodeStartTime = 0;
|
||||
|
||||
// common
|
||||
int32_t tsRpcTimer = 300;
|
||||
|
@ -75,6 +77,7 @@ int32_t tsCompressMsgSize = -1;
|
|||
|
||||
// client
|
||||
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
|
||||
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN;
|
||||
int8_t tsTscEnableRecordSql = 0;
|
||||
|
||||
// the maximum number of results for projection query on super table that are returned from
|
||||
|
@ -998,6 +1001,16 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "maxWildCardsLength";
|
||||
cfg.ptr = &tsMaxWildCardsLen;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = TSDB_MAX_FIELD_LEN;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "maxNumOfOrderedRes";
|
||||
cfg.ptr = &tsMaxNumOfOrderedResults;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
|
@ -1545,6 +1558,7 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM);
|
||||
#ifdef TD_TSZ
|
||||
// lossy compress
|
||||
cfg.option = "lossyColumns";
|
||||
|
|
|
@ -61,7 +61,7 @@ bool tscValidateTableNameLength(size_t len) {
|
|||
|
||||
// TODO refactor
|
||||
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
|
||||
if (numOfFilters == 0) {
|
||||
if (numOfFilters == 0 || src == NULL) {
|
||||
assert(src == NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -70,12 +70,11 @@ SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFil
|
|||
|
||||
memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
|
||||
for (int32_t j = 0; j < numOfFilters; ++j) {
|
||||
|
||||
if (pFilter[j].filterstr) {
|
||||
size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
|
||||
pFilter[j].pz = (int64_t) calloc(1, len);
|
||||
|
||||
memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
|
||||
memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t) pFilter[j].len);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -372,21 +372,21 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i
|
|||
}
|
||||
|
||||
tDataTypeDescriptor tDataTypes[15] = {
|
||||
{TSDB_DATA_TYPE_NULL, 6,1, "NOTYPE", NULL, NULL, NULL},
|
||||
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool},
|
||||
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
|
||||
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
|
||||
{TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt, getStatics_i32},
|
||||
{TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", tsCompressBigint, tsDecompressBigint, getStatics_i64},
|
||||
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat, getStatics_f},
|
||||
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble, getStatics_d},
|
||||
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, getStatics_bin},
|
||||
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
|
||||
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, getStatics_nchr},
|
||||
{TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", tsCompressTinyint, tsDecompressTinyint, getStatics_u8},
|
||||
{TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", tsCompressSmallint, tsDecompressSmallint, getStatics_u16},
|
||||
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", tsCompressInt, tsDecompressInt, getStatics_u32},
|
||||
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", tsCompressBigint, tsDecompressBigint, getStatics_u64},
|
||||
{TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", 0, 0, NULL, NULL, NULL},
|
||||
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", false, true, tsCompressBool, tsDecompressBool, getStatics_bool},
|
||||
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", INT8_MIN, INT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
|
||||
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", INT16_MIN, INT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
|
||||
{TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", INT32_MIN, INT32_MAX, tsCompressInt, tsDecompressInt, getStatics_i32},
|
||||
{TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", INT64_MIN, INT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_i64},
|
||||
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f},
|
||||
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d},
|
||||
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", 0, 0, tsCompressString, tsDecompressString, getStatics_bin},
|
||||
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
|
||||
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
|
||||
{TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_u8},
|
||||
{TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_u16},
|
||||
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt, getStatics_u32},
|
||||
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_u64},
|
||||
};
|
||||
|
||||
char tTokenTypeSwitcher[13] = {
|
||||
|
@ -405,6 +405,32 @@ char tTokenTypeSwitcher[13] = {
|
|||
TSDB_DATA_TYPE_NCHAR, // TK_NCHAR
|
||||
};
|
||||
|
||||
float floatMin = -FLT_MAX, floatMax = FLT_MAX;
|
||||
double doubleMin = -DBL_MAX, doubleMax = DBL_MAX;
|
||||
|
||||
FORCE_INLINE void* getDataMin(int32_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return &floatMin;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
return &doubleMin;
|
||||
default:
|
||||
return &tDataTypes[type].minValue;
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE void* getDataMax(int32_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return &floatMax;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
return &doubleMax;
|
||||
default:
|
||||
return &tDataTypes[type].maxValue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool isValidDataType(int32_t type) {
|
||||
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT;
|
||||
}
|
||||
|
@ -566,6 +592,53 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
|||
}
|
||||
}
|
||||
|
||||
void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) {
|
||||
if (optr == TSDB_BINARY_OP_ADD) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
*((int8_t *)dst) = GET_INT8_VAL(s1) + GET_INT8_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
*((uint8_t *)dst) = GET_UINT8_VAL(s1) + GET_UINT8_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
*((int16_t *)dst) = GET_INT16_VAL(s1) + GET_INT16_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
*((uint16_t *)dst) = GET_UINT16_VAL(s1) + GET_UINT16_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
*((int32_t *)dst) = GET_INT32_VAL(s1) + GET_INT32_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
*((uint32_t *)dst) = GET_UINT32_VAL(s1) + GET_UINT32_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
*((int64_t *)dst) = GET_INT64_VAL(s1) + GET_INT64_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
*((uint64_t *)dst) = GET_UINT64_VAL(s1) + GET_UINT64_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
*((int64_t *)dst) = GET_INT64_VAL(s1) + GET_INT64_VAL(s2);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
SET_FLOAT_VAL(dst, GET_FLOAT_VAL(s1) + GET_FLOAT_VAL(s2));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
SET_DOUBLE_VAL(dst, GET_DOUBLE_VAL(s1) + GET_DOUBLE_VAL(s2));
|
||||
break;
|
||||
default: {
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
|
|
|
@ -23,6 +23,13 @@
|
|||
#include "tutil.h"
|
||||
#include "tvariant.h"
|
||||
|
||||
#define SET_EXT_INFO(converted, res, minv, maxv, exti) do { \
|
||||
if (converted == NULL || exti == NULL || *converted == false) { break; } \
|
||||
if ((res) < (minv)) { *exti = -1; break; } \
|
||||
if ((res) > (maxv)) { *exti = 1; break; } \
|
||||
assert(0); \
|
||||
} while (0)
|
||||
|
||||
void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
||||
int32_t ret = 0;
|
||||
int32_t type = token->type;
|
||||
|
@ -184,7 +191,7 @@ void tVariantDestroy(tVariant *pVar) {
|
|||
}
|
||||
|
||||
// NOTE: this is only for string array
|
||||
if (pVar->nType == TSDB_DATA_TYPE_ARRAY) {
|
||||
if (pVar->nType == TSDB_DATA_TYPE_POINTER_ARRAY) {
|
||||
size_t num = taosArrayGetSize(pVar->arr);
|
||||
for(size_t i = 0; i < num; i++) {
|
||||
void* p = taosArrayGetP(pVar->arr, i);
|
||||
|
@ -192,6 +199,9 @@ void tVariantDestroy(tVariant *pVar) {
|
|||
}
|
||||
taosArrayDestroy(pVar->arr);
|
||||
pVar->arr = NULL;
|
||||
} else if (pVar->nType == TSDB_DATA_TYPE_VALUE_ARRAY) {
|
||||
taosArrayDestroy(pVar->arr);
|
||||
pVar->arr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,7 +230,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
|
|||
|
||||
if (IS_NUMERIC_TYPE(pSrc->nType) || (pSrc->nType == TSDB_DATA_TYPE_BOOL)) {
|
||||
pDst->i64 = pSrc->i64;
|
||||
} else if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { // this is only for string array
|
||||
} else if (pSrc->nType == TSDB_DATA_TYPE_POINTER_ARRAY) { // this is only for string array
|
||||
size_t num = taosArrayGetSize(pSrc->arr);
|
||||
pDst->arr = taosArrayInit(num, sizeof(char*));
|
||||
for(size_t i = 0; i < num; i++) {
|
||||
|
@ -228,9 +238,18 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
|
|||
char* n = strdup(p);
|
||||
taosArrayPush(pDst->arr, &n);
|
||||
}
|
||||
} else if (pSrc->nType == TSDB_DATA_TYPE_VALUE_ARRAY) {
|
||||
size_t num = taosArrayGetSize(pSrc->arr);
|
||||
pDst->arr = taosArrayInit(num, sizeof(int64_t));
|
||||
pDst->nLen = pSrc->nLen;
|
||||
assert(pSrc->nLen == num);
|
||||
for(size_t i = 0; i < num; i++) {
|
||||
int64_t *p = taosArrayGet(pSrc->arr, i);
|
||||
taosArrayPush(pDst->arr, p);
|
||||
}
|
||||
}
|
||||
|
||||
if (pDst->nType != TSDB_DATA_TYPE_ARRAY) {
|
||||
if (pDst->nType != TSDB_DATA_TYPE_POINTER_ARRAY && pDst->nType != TSDB_DATA_TYPE_VALUE_ARRAY) {
|
||||
pDst->nLen = tDataTypes[pDst->nType].bytes;
|
||||
}
|
||||
}
|
||||
|
@ -450,7 +469,7 @@ static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *val
|
|||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, bool issigned, bool releaseVariantPtr) {
|
||||
static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, bool issigned, bool releaseVariantPtr, bool *converted) {
|
||||
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
|
||||
setNull((char *)result, type, tDataTypes[type].bytes);
|
||||
return 0;
|
||||
|
@ -540,6 +559,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
}
|
||||
}
|
||||
|
||||
if (converted) {
|
||||
*converted = true;
|
||||
}
|
||||
|
||||
bool code = false;
|
||||
|
||||
uint64_t ui = 0;
|
||||
|
@ -602,6 +625,18 @@ static int32_t convertToBool(tVariant *pVariant, int64_t *pDest) {
|
|||
* to column type defined in schema
|
||||
*/
|
||||
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix) {
|
||||
return tVariantDumpEx(pVariant, payload, type, includeLengthPrefix, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* transfer data from variant serve as the implicit data conversion: from input sql string pVariant->nType
|
||||
* to column type defined in schema
|
||||
*/
|
||||
int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix, bool *converted, char *extInfo) {
|
||||
if (converted) {
|
||||
*converted = false;
|
||||
}
|
||||
|
||||
if (pVariant == NULL || (pVariant->nType != 0 && !isValidDataType(pVariant->nType))) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -620,7 +655,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, true, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, result, INT8_MIN + 1, INT8_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((int8_t *)payload) = (int8_t) result;
|
||||
|
@ -628,7 +664,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT: {
|
||||
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, false, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, result, 0, UINT8_MAX - 1, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((uint8_t *)payload) = (uint8_t) result;
|
||||
|
@ -636,7 +673,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, true, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, result, INT16_MIN + 1, INT16_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((int16_t *)payload) = (int16_t)result;
|
||||
|
@ -644,7 +682,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT: {
|
||||
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, false, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, result, 0, UINT16_MAX - 1, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((uint16_t *)payload) = (uint16_t)result;
|
||||
|
@ -652,7 +691,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, true, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, result, INT32_MIN + 1, INT32_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((int32_t *)payload) = (int32_t)result;
|
||||
|
@ -660,7 +700,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UINT: {
|
||||
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, false, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, result, 0, UINT32_MAX - 1, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((uint32_t *)payload) = (uint32_t)result;
|
||||
|
@ -668,7 +709,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT: {
|
||||
if (convertToInteger(pVariant, &result, type, true, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, true, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, (int64_t)result, INT64_MIN + 1, INT64_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((int64_t *)payload) = (int64_t)result;
|
||||
|
@ -676,7 +718,8 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT: {
|
||||
if (convertToInteger(pVariant, &result, type, false, false) < 0) {
|
||||
if (convertToInteger(pVariant, &result, type, false, false, converted) < 0) {
|
||||
SET_EXT_INFO(converted, (uint64_t)result, 0, UINT64_MAX - 1, extInfo);
|
||||
return -1;
|
||||
}
|
||||
*((uint64_t *)payload) = (uint64_t)result;
|
||||
|
@ -696,11 +739,37 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (converted) {
|
||||
*converted = true;
|
||||
}
|
||||
|
||||
if (value > FLT_MAX || value < -FLT_MAX) {
|
||||
SET_EXT_INFO(converted, value, -FLT_MAX, FLT_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
SET_FLOAT_VAL(payload, value);
|
||||
}
|
||||
} else if (pVariant->nType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(pVariant->nType) || IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
|
||||
if (converted) {
|
||||
*converted = true;
|
||||
}
|
||||
|
||||
if (pVariant->i64 > FLT_MAX || pVariant->i64 < -FLT_MAX) {
|
||||
SET_EXT_INFO(converted, pVariant->i64, -FLT_MAX, FLT_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SET_FLOAT_VAL(payload, pVariant->i64);
|
||||
} else if (IS_FLOAT_TYPE(pVariant->nType)) {
|
||||
if (converted) {
|
||||
*converted = true;
|
||||
}
|
||||
|
||||
if (pVariant->dKey > FLT_MAX || pVariant->dKey < -FLT_MAX) {
|
||||
SET_EXT_INFO(converted, pVariant->dKey, -FLT_MAX, FLT_MAX, extInfo);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SET_FLOAT_VAL(payload, pVariant->dKey);
|
||||
} else if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
|
||||
*((uint32_t *)payload) = TSDB_DATA_FLOAT_NULL;
|
||||
|
@ -824,6 +893,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* In variant, bool/smallint/tinyint/int/bigint share the same attribution of
|
||||
* structure, also ignore the convert the type required
|
||||
|
@ -848,7 +918,7 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
|
|||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
convertToInteger(pVariant, &(pVariant->i64), type, true, true);
|
||||
convertToInteger(pVariant, &(pVariant->i64), type, true, true, NULL);
|
||||
pVariant->nType = TSDB_DATA_TYPE_BIGINT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -113,7 +113,6 @@
|
|||
</includes>
|
||||
<excludes>
|
||||
<exclude>**/AppMemoryLeakTest.java</exclude>
|
||||
<exclude>**/AuthenticationTest.java</exclude>
|
||||
<exclude>**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java</exclude>
|
||||
<exclude>**/DatetimeBefore1970Test.java</exclude>
|
||||
<exclude>**/FailOverTest.java</exclude>
|
||||
|
|
|
@ -77,8 +77,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
}
|
||||
|
||||
public boolean supportsMixedCaseIdentifiers() throws SQLException {
|
||||
//像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
|
||||
return false;
|
||||
return false; //像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
|
||||
}
|
||||
|
||||
public boolean storesUpperCaseIdentifiers() throws SQLException {
|
||||
|
@ -514,7 +513,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col6 = new ColumnMetaData();
|
||||
col6.setColIndex(colIndex);
|
||||
col6.setColName("TYPE_CAT");
|
||||
col6.setColType(Types.NCHAR);
|
||||
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col6;
|
||||
}
|
||||
|
||||
|
@ -522,7 +521,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col7 = new ColumnMetaData();
|
||||
col7.setColIndex(colIndex);
|
||||
col7.setColName("TYPE_SCHEM");
|
||||
col7.setColType(Types.NCHAR);
|
||||
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col7;
|
||||
}
|
||||
|
||||
|
@ -530,7 +529,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col8 = new ColumnMetaData();
|
||||
col8.setColIndex(colIndex);
|
||||
col8.setColName("TYPE_NAME");
|
||||
col8.setColType(Types.NCHAR);
|
||||
col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col8;
|
||||
}
|
||||
|
||||
|
@ -538,7 +537,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col9 = new ColumnMetaData();
|
||||
col9.setColIndex(colIndex);
|
||||
col9.setColName("SELF_REFERENCING_COL_NAME");
|
||||
col9.setColType(Types.NCHAR);
|
||||
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col9;
|
||||
}
|
||||
|
||||
|
@ -546,7 +545,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col10 = new ColumnMetaData();
|
||||
col10.setColIndex(colIndex);
|
||||
col10.setColName("REF_GENERATION");
|
||||
col10.setColType(Types.NCHAR);
|
||||
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col10;
|
||||
}
|
||||
|
||||
|
@ -592,7 +591,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col4 = new ColumnMetaData();
|
||||
col4.setColIndex(colIndex);
|
||||
col4.setColName("TABLE_TYPE");
|
||||
col4.setColType(Types.NCHAR);
|
||||
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col4;
|
||||
}
|
||||
|
||||
|
@ -734,7 +733,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col1 = new ColumnMetaData();
|
||||
col1.setColIndex(colIndex);
|
||||
col1.setColName("TABLE_CAT");
|
||||
col1.setColType(Types.NCHAR);
|
||||
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col1;
|
||||
}
|
||||
|
||||
|
@ -742,7 +741,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col2 = new ColumnMetaData();
|
||||
col2.setColIndex(colIndex);
|
||||
col2.setColName("TABLE_SCHEM");
|
||||
col2.setColType(Types.NCHAR);
|
||||
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col2;
|
||||
}
|
||||
|
||||
|
@ -751,7 +750,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
col3.setColIndex(colIndex);
|
||||
col3.setColName("TABLE_NAME");
|
||||
col3.setColSize(193);
|
||||
col3.setColType(Types.NCHAR);
|
||||
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col3;
|
||||
}
|
||||
|
||||
|
@ -760,7 +759,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
col4.setColIndex(colIndex);
|
||||
col4.setColName("COLUMN_NAME");
|
||||
col4.setColSize(65);
|
||||
col4.setColType(Types.NCHAR);
|
||||
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col4;
|
||||
}
|
||||
|
||||
|
@ -768,7 +767,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col5 = new ColumnMetaData();
|
||||
col5.setColIndex(colIndex);
|
||||
col5.setColName("DATA_TYPE");
|
||||
col5.setColType(Types.INTEGER);
|
||||
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col5;
|
||||
}
|
||||
|
||||
|
@ -776,7 +775,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col7 = new ColumnMetaData();
|
||||
col7.setColIndex(7);
|
||||
col7.setColName("COLUMN_SIZE");
|
||||
col7.setColType(Types.INTEGER);
|
||||
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col7;
|
||||
}
|
||||
|
||||
|
@ -791,7 +790,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col9 = new ColumnMetaData();
|
||||
col9.setColIndex(9);
|
||||
col9.setColName("DECIMAL_DIGITS");
|
||||
col9.setColType(Types.INTEGER);
|
||||
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col9;
|
||||
}
|
||||
|
||||
|
@ -799,7 +798,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col10 = new ColumnMetaData();
|
||||
col10.setColIndex(10);
|
||||
col10.setColName("NUM_PREC_RADIX");
|
||||
col10.setColType(Types.INTEGER);
|
||||
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col10;
|
||||
}
|
||||
|
||||
|
@ -807,7 +806,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col11 = new ColumnMetaData();
|
||||
col11.setColIndex(11);
|
||||
col11.setColName("NULLABLE");
|
||||
col11.setColType(Types.INTEGER);
|
||||
col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col11;
|
||||
}
|
||||
|
||||
|
@ -815,7 +814,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col12 = new ColumnMetaData();
|
||||
col12.setColIndex(colIndex);
|
||||
col12.setColName("REMARKS");
|
||||
col12.setColType(Types.NCHAR);
|
||||
col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col12;
|
||||
}
|
||||
|
||||
|
@ -823,7 +822,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col13 = new ColumnMetaData();
|
||||
col13.setColIndex(13);
|
||||
col13.setColName("COLUMN_DEF");
|
||||
col13.setColType(Types.NCHAR);
|
||||
col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col13;
|
||||
}
|
||||
|
||||
|
@ -831,7 +830,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col14 = new ColumnMetaData();
|
||||
col14.setColIndex(14);
|
||||
col14.setColName("SQL_DATA_TYPE");
|
||||
col14.setColType(Types.INTEGER);
|
||||
col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col14;
|
||||
}
|
||||
|
||||
|
@ -839,7 +838,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col15 = new ColumnMetaData();
|
||||
col15.setColIndex(15);
|
||||
col15.setColName("SQL_DATETIME_SUB");
|
||||
col15.setColType(Types.INTEGER);
|
||||
col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col15;
|
||||
}
|
||||
|
||||
|
@ -847,7 +846,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col16 = new ColumnMetaData();
|
||||
col16.setColIndex(16);
|
||||
col16.setColName("CHAR_OCTET_LENGTH");
|
||||
col16.setColType(Types.INTEGER);
|
||||
col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col16;
|
||||
}
|
||||
|
||||
|
@ -855,7 +854,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col17 = new ColumnMetaData();
|
||||
col17.setColIndex(17);
|
||||
col17.setColName("ORDINAL_POSITION");
|
||||
col17.setColType(Types.INTEGER);
|
||||
col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
|
||||
return col17;
|
||||
}
|
||||
|
||||
|
@ -863,7 +862,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col18 = new ColumnMetaData();
|
||||
col18.setColIndex(18);
|
||||
col18.setColName("IS_NULLABLE");
|
||||
col18.setColType(Types.NCHAR);
|
||||
col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col18;
|
||||
}
|
||||
|
||||
|
@ -871,7 +870,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col19 = new ColumnMetaData();
|
||||
col19.setColIndex(19);
|
||||
col19.setColName("SCOPE_CATALOG");
|
||||
col19.setColType(Types.NCHAR);
|
||||
col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col19;
|
||||
}
|
||||
|
||||
|
@ -879,7 +878,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col20 = new ColumnMetaData();
|
||||
col20.setColIndex(20);
|
||||
col20.setColName("SCOPE_SCHEMA");
|
||||
col20.setColType(Types.NCHAR);
|
||||
col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col20;
|
||||
}
|
||||
|
||||
|
@ -887,7 +886,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col21 = new ColumnMetaData();
|
||||
col21.setColIndex(21);
|
||||
col21.setColName("SCOPE_TABLE");
|
||||
col21.setColType(Types.NCHAR);
|
||||
col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col21;
|
||||
}
|
||||
|
||||
|
@ -903,7 +902,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col23 = new ColumnMetaData();
|
||||
col23.setColIndex(23);
|
||||
col23.setColName("IS_AUTOINCREMENT");
|
||||
col23.setColType(Types.NCHAR);
|
||||
col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col23;
|
||||
}
|
||||
|
||||
|
@ -911,7 +910,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col24 = new ColumnMetaData();
|
||||
col24.setColIndex(24);
|
||||
col24.setColName("IS_GENERATEDCOLUMN");
|
||||
col24.setColType(Types.NCHAR);
|
||||
col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col24;
|
||||
}
|
||||
|
||||
|
@ -1205,7 +1204,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col5 = new ColumnMetaData();
|
||||
col5.setColIndex(colIndex);
|
||||
col5.setColName("KEY_SEQ");
|
||||
col5.setColType(Types.SMALLINT);
|
||||
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
|
||||
return col5;
|
||||
}
|
||||
|
||||
|
@ -1213,7 +1212,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col6 = new ColumnMetaData();
|
||||
col6.setColIndex(colIndex);
|
||||
col6.setColName("PK_NAME");
|
||||
col6.setColType(Types.NCHAR);
|
||||
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col6;
|
||||
}
|
||||
|
||||
|
@ -1275,7 +1274,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
|
|||
ColumnMetaData col4 = new ColumnMetaData();
|
||||
col4.setColIndex(colIndex);
|
||||
col4.setColName("SUPERTABLE_NAME");
|
||||
col4.setColType(Types.NCHAR);
|
||||
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
|
||||
return col4;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ package com.taosdata.jdbc;
|
|||
|
||||
public class ColumnMetaData {
|
||||
|
||||
private int colType = 0;
|
||||
private int colType = 0; //taosType
|
||||
private String colName = null;
|
||||
private int colSize = -1;
|
||||
private int colIndex = 0;
|
||||
|
|
|
@ -68,71 +68,61 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
|
|||
@Override
|
||||
public String getString(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getString(columnIndex, nativeType);
|
||||
return rowCursor.getString(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getBoolean(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getBoolean(columnIndex, nativeType);
|
||||
return rowCursor.getBoolean(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte getByte(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return (byte) rowCursor.getInt(columnIndex, nativeType);
|
||||
return (byte) rowCursor.getInt(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getShort(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return (short) rowCursor.getInt(columnIndex, nativeType);
|
||||
return (short) rowCursor.getInt(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getInt(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getInt(columnIndex, nativeType);
|
||||
return rowCursor.getInt(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLong(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getLong(columnIndex, nativeType);
|
||||
return rowCursor.getLong(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getFloat(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getFloat(columnIndex, nativeType);
|
||||
return rowCursor.getFloat(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getDouble(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getDouble(columnIndex, nativeType);
|
||||
return rowCursor.getDouble(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getBytes(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return (rowCursor.getString(columnIndex, nativeType)).getBytes();
|
||||
return (rowCursor.getString(columnIndex, colType)).getBytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Timestamp getTimestamp(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
return rowCursor.getTimestamp(columnIndex, nativeType);
|
||||
return rowCursor.getTimestamp(columnIndex, colType);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -158,8 +148,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
|
|||
@Override
|
||||
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
|
||||
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
|
||||
double value = rowCursor.getDouble(columnIndex, nativeType);
|
||||
double value = rowCursor.getDouble(columnIndex, colType);
|
||||
return new BigDecimal(value);
|
||||
}
|
||||
|
||||
|
|
|
@ -129,8 +129,9 @@ public abstract class TSDBConstants {
|
|||
return Types.TIMESTAMP;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
return Types.NCHAR;
|
||||
default:
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
|
||||
}
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
|
||||
}
|
||||
|
||||
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
|
||||
|
@ -160,7 +161,7 @@ public abstract class TSDBConstants {
|
|||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
return "NCHAR";
|
||||
default:
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,7 +188,7 @@ public abstract class TSDBConstants {
|
|||
case Types.NCHAR:
|
||||
return TSDBConstants.TSDB_DATA_TYPE_NCHAR;
|
||||
}
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
|
||||
}
|
||||
|
||||
public static String jdbcType2TaosTypeName(int jdbcType) throws SQLException {
|
||||
|
@ -213,7 +214,7 @@ public abstract class TSDBConstants {
|
|||
case Types.NCHAR:
|
||||
return "NCHAR";
|
||||
default:
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
*****************************************************************************/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.sql.*;
|
||||
import java.util.*;
|
||||
import java.util.logging.Logger;
|
||||
|
@ -127,6 +129,11 @@ public class TSDBDriver extends AbstractDriver {
|
|||
return null;
|
||||
}
|
||||
|
||||
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
|
||||
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
|
||||
|
||||
try {
|
||||
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
|
||||
(String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
|
||||
|
|
|
@ -33,6 +33,8 @@ public class TSDBError {
|
|||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required");
|
||||
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
|
||||
|
||||
|
|
|
@ -29,6 +29,9 @@ public class TSDBErrorNumbers {
|
|||
public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
|
||||
public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
|
||||
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
|
||||
public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required
|
||||
public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required
|
||||
|
||||
|
||||
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
|
||||
|
||||
|
@ -67,6 +70,8 @@ public class TSDBErrorNumbers {
|
|||
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
|
||||
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
|
||||
errorNumbers.add(ERROR_RESTFul_Client_IOException);
|
||||
errorNumbers.add(ERROR_USER_IS_REQUIRED);
|
||||
errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED);
|
||||
|
||||
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ public class TSDBJNIConnector {
|
|||
|
||||
static {
|
||||
System.loadLibrary("taos");
|
||||
System.out.println("java.library.path:" + System.getProperty("java.library.path"));
|
||||
}
|
||||
|
||||
public boolean isClosed() {
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD
|
|||
|
||||
ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1);
|
||||
switch (columnMetaData.getColType()) {
|
||||
|
||||
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return 5;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
|
|
|
@ -7,6 +7,7 @@ import com.taosdata.jdbc.utils.HttpClientPoolUtil;
|
|||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
import java.util.logging.Logger;
|
||||
|
@ -40,8 +41,13 @@ public class RestfulDriver extends AbstractDriver {
|
|||
|
||||
String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
|
||||
try {
|
||||
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8");
|
||||
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8");
|
||||
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
|
||||
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
|
||||
|
||||
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName());
|
||||
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName());
|
||||
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
e.printStackTrace();
|
||||
|
|
|
@ -7,6 +7,7 @@ import com.taosdata.jdbc.AbstractStatement;
|
|||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.TSDBError;
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import com.taosdata.jdbc.enums.TimestampFormat;
|
||||
import com.taosdata.jdbc.utils.HttpClientPoolUtil;
|
||||
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
|
||||
|
||||
|
@ -45,9 +46,7 @@ public class RestfulStatement extends AbstractStatement {
|
|||
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
|
||||
|
||||
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
|
||||
return executeOneUpdate(url, sql);
|
||||
return executeOneUpdate(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -62,34 +61,25 @@ public class RestfulStatement extends AbstractStatement {
|
|||
public boolean execute(String sql) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
if (!SqlSyntaxValidator.isValidForExecute(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql);
|
||||
|
||||
//如果执行了use操作应该将当前Statement的catalog设置为新的database
|
||||
boolean result = true;
|
||||
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) {
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
}
|
||||
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) {
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
}
|
||||
|
||||
if (SqlSyntaxValidator.isUseSql(sql)) {
|
||||
HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
|
||||
HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
|
||||
this.database = sql.trim().replace("use", "").trim();
|
||||
this.conn.setCatalog(this.database);
|
||||
result = false;
|
||||
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
|
||||
executeOneQuery(sql);
|
||||
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
|
||||
executeOneUpdate(url, sql);
|
||||
executeOneUpdate(sql);
|
||||
result = false;
|
||||
} else {
|
||||
if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) {
|
||||
executeQuery(sql);
|
||||
executeOneQuery(sql);
|
||||
} else {
|
||||
executeUpdate(sql);
|
||||
executeOneUpdate(sql);
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
|
@ -97,19 +87,25 @@ public class RestfulStatement extends AbstractStatement {
|
|||
return result;
|
||||
}
|
||||
|
||||
private String getUrl() throws SQLException {
|
||||
TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase());
|
||||
String url;
|
||||
switch (timestampFormat) {
|
||||
case TIMESTAMP:
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
break;
|
||||
case UTC:
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
break;
|
||||
default:
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
}
|
||||
return url;
|
||||
}
|
||||
|
||||
private ResultSet executeOneQuery(String sql) throws SQLException {
|
||||
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
|
||||
|
||||
// row data
|
||||
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
|
||||
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat))
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
if ("UTC".equalsIgnoreCase(timestampFormat))
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
|
||||
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
|
||||
String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
|
||||
JSONObject resultJson = JSON.parseObject(result);
|
||||
if (resultJson.getString("status").equals("error")) {
|
||||
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
|
||||
|
@ -119,11 +115,8 @@ public class RestfulStatement extends AbstractStatement {
|
|||
return resultSet;
|
||||
}
|
||||
|
||||
private int executeOneUpdate(String url, String sql) throws SQLException {
|
||||
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
|
||||
|
||||
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
|
||||
private int executeOneUpdate(String sql) throws SQLException {
|
||||
String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
|
||||
JSONObject jsonObject = JSON.parseObject(result);
|
||||
if (jsonObject.getString("status").equals("error")) {
|
||||
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
|
||||
|
@ -134,7 +127,7 @@ public class RestfulStatement extends AbstractStatement {
|
|||
}
|
||||
|
||||
private int getAffectedRows(JSONObject jsonObject) throws SQLException {
|
||||
// create ... SQLs should return 0 , and Restful result is this:
|
||||
// create ... SQLs should return 0 , and Restful result like this:
|
||||
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
|
||||
JSONArray head = jsonObject.getJSONArray("head");
|
||||
if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
|
||||
|
|
|
@ -16,8 +16,7 @@ package com.taosdata.jdbc.utils;
|
|||
|
||||
public class SqlSyntaxValidator {
|
||||
|
||||
private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"};
|
||||
private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"};
|
||||
private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set", "reset"};
|
||||
private static final String[] querySQL = {"select", "show", "describe"};
|
||||
|
||||
private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"};
|
||||
|
@ -38,14 +37,6 @@ public class SqlSyntaxValidator {
|
|||
return false;
|
||||
}
|
||||
|
||||
public static boolean isValidForExecute(String sql) {
|
||||
for (String prefix : SQL) {
|
||||
if (sql.trim().toLowerCase().startsWith(prefix))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static boolean isDatabaseUnspecifiedQuery(String sql) {
|
||||
for (String databaseObj : databaseUnspecifiedShow) {
|
||||
if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*"))
|
||||
|
@ -63,9 +54,5 @@ public class SqlSyntaxValidator {
|
|||
return sql.trim().toLowerCase().startsWith("use");
|
||||
}
|
||||
|
||||
public static boolean isSelectSql(String sql) {
|
||||
return sql.trim().toLowerCase().startsWith("select");
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -69,6 +69,8 @@ public class SubscribeTest {
|
|||
@Before
|
||||
public void createDatabase() throws SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
|
@ -12,6 +15,47 @@ public class AuthenticationTest {
|
|||
private static final String password = "taos?data";
|
||||
private Connection conn;
|
||||
|
||||
@Test
|
||||
public void connectWithoutUserByJni() {
|
||||
try {
|
||||
DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?");
|
||||
} catch (SQLException e) {
|
||||
Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode());
|
||||
Assert.assertEquals("ERROR (2319): user is required", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void connectWithoutUserByRestful() {
|
||||
try {
|
||||
DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?");
|
||||
} catch (SQLException e) {
|
||||
Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode());
|
||||
Assert.assertEquals("ERROR (2319): user is required", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void connectWithoutPasswordByJni() {
|
||||
try {
|
||||
DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root");
|
||||
} catch (SQLException e) {
|
||||
Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode());
|
||||
Assert.assertEquals("ERROR (231a): password is required", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void connectWithoutPasswordByRestful() {
|
||||
try {
|
||||
DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root");
|
||||
} catch (SQLException e) {
|
||||
Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode());
|
||||
Assert.assertEquals("ERROR (231a): password is required", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Ignore
|
||||
@Test
|
||||
public void test() {
|
||||
// change password
|
||||
|
|
|
@ -29,6 +29,8 @@ public class BatchInsertTest {
|
|||
public void before() {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
|
|
@ -21,6 +21,8 @@ public class ImportTest {
|
|||
public static void before() {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
|
|
@ -270,6 +270,41 @@ public class InsertSpecialCharacterJniTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Ignore
|
||||
@Test
|
||||
public void testSingleQuotaEscape() throws SQLException {
|
||||
final long now = System.currentTimeMillis();
|
||||
final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||
// t1
|
||||
pstmt.setInt(1, 1);
|
||||
pstmt.setString(2, tbname2);
|
||||
pstmt.setString(3, special_character_str_5);
|
||||
pstmt.setTimestamp(4, new Timestamp(now));
|
||||
pstmt.setBytes(5, special_character_str_5.getBytes());
|
||||
// t2
|
||||
pstmt.setInt(7, 2);
|
||||
pstmt.setString(8, special_character_str_5);
|
||||
pstmt.setTimestamp(9, new Timestamp(now));
|
||||
pstmt.setString(11, special_character_str_5);
|
||||
|
||||
int ret = pstmt.executeUpdate();
|
||||
Assert.assertEquals(2, ret);
|
||||
}
|
||||
|
||||
String query = "select * from ?.t? where ? < ? and ts >= ? and f1 is not null";
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||
pstmt.setString(1, dbName);
|
||||
pstmt.setInt(2, 1);
|
||||
pstmt.setString(3, "ts");
|
||||
pstmt.setTimestamp(4, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt.setTimestamp(5, new Timestamp(0));
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
Assert.assertNotNull(rs);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase10() throws SQLException {
|
||||
final long now = System.currentTimeMillis();
|
||||
|
@ -293,13 +328,12 @@ public class InsertSpecialCharacterJniTest {
|
|||
Assert.assertEquals(2, ret);
|
||||
}
|
||||
//query t1
|
||||
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
|
||||
String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null";
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||
pstmt.setString(1, dbName);
|
||||
pstmt.setInt(2, 1);
|
||||
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt.setTimestamp(4, new Timestamp(0));
|
||||
pstmt.setString(5, "f1");
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
|
@ -311,12 +345,11 @@ public class InsertSpecialCharacterJniTest {
|
|||
Assert.assertNull(f2);
|
||||
}
|
||||
// query t2
|
||||
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
|
||||
query = "select * from t? where ts < ? and ts >= ? and f2 is not null";
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||
pstmt.setInt(1, 2);
|
||||
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt.setTimestamp(3, new Timestamp(0));
|
||||
pstmt.setString(4, "f2");
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
|
|
|
@ -293,13 +293,12 @@ public class InsertSpecialCharacterRestfulTest {
|
|||
Assert.assertEquals(2, ret);
|
||||
}
|
||||
//query t1
|
||||
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
|
||||
String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null";
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||
pstmt.setString(1, dbName);
|
||||
pstmt.setInt(2, 1);
|
||||
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt.setTimestamp(4, new Timestamp(0));
|
||||
pstmt.setString(5, "f1");
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
|
@ -311,12 +310,11 @@ public class InsertSpecialCharacterRestfulTest {
|
|||
Assert.assertNull(f2);
|
||||
}
|
||||
// query t2
|
||||
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
|
||||
query = "select * from t? where ts < ? and ts >= ? and f2 is not null";
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
|
||||
pstmt.setInt(1, 2);
|
||||
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt.setTimestamp(3, new Timestamp(0));
|
||||
pstmt.setString(4, "f2");
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
|
|
|
@ -22,6 +22,8 @@ public class QueryDataTest {
|
|||
public void createDatabase() {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
|
|
@ -1,51 +1,49 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
public class ResetQueryCacheTest {
|
||||
|
||||
static Connection connection;
|
||||
static Statement statement;
|
||||
static String host = "127.0.0.1";
|
||||
@Test
|
||||
public void jni() throws SQLException {
|
||||
// given
|
||||
Connection connection = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:0/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8");
|
||||
Statement statement = connection.createStatement();
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||
statement = connection.createStatement();
|
||||
} catch (SQLException e) {
|
||||
return;
|
||||
}
|
||||
// when
|
||||
boolean execute = statement.execute("reset query cache");
|
||||
|
||||
// then
|
||||
assertFalse(execute);
|
||||
assertEquals(0, statement.getUpdateCount());
|
||||
|
||||
statement.close();
|
||||
connection.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResetQueryCache() throws SQLException {
|
||||
String resetSql = "reset query cache";
|
||||
statement.execute(resetSql);
|
||||
}
|
||||
public void restful() throws SQLException {
|
||||
// given
|
||||
Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://127.0.0.1:6041/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8");
|
||||
Statement statement = connection.createStatement();
|
||||
|
||||
@After
|
||||
public void close() {
|
||||
try {
|
||||
if (statement != null)
|
||||
statement.close();
|
||||
if (connection != null)
|
||||
connection.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
// when
|
||||
boolean execute = statement.execute("reset query cache");
|
||||
|
||||
// then
|
||||
assertFalse(execute);
|
||||
assertEquals(0, statement.getUpdateCount());
|
||||
|
||||
statement.close();
|
||||
connection.close();
|
||||
}
|
||||
|
||||
}
|
|
@ -20,6 +20,8 @@ public class SelectTest {
|
|||
public void createDatabaseAndTable() {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
|
|
@ -24,6 +24,8 @@ public class StableTest {
|
|||
public static void createDatabase() {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
|
|
@ -0,0 +1,570 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
import java.text.Format;
|
||||
import java.text.SimpleDateFormat;
|
||||
|
||||
public class TimestampPrecisonInNanoRestTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String ns_timestamp_db = "ns_precision_test";
|
||||
private static final long timestamp1 = System.currentTimeMillis();
|
||||
private static final long timestamp2 = timestamp1 * 1000_000 + 123455;
|
||||
private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456;
|
||||
private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
|
||||
private static final String date1 = format.format(new Date(timestamp1));
|
||||
private static final String date4 = format.format(new Date(timestamp1 + 10L));
|
||||
private static final String date2 = date1 + "123455";
|
||||
private static final String date3 = date4 + "123456";
|
||||
|
||||
|
||||
private static Connection conn;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
conn = DriverManager.getConnection(url, properties);
|
||||
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists " + ns_timestamp_db);
|
||||
stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
|
||||
stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
|
||||
stmt.close();
|
||||
}
|
||||
|
||||
@After
|
||||
public void afterEach() throws SQLException {
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists " + ns_timestamp_db);
|
||||
stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
|
||||
stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
|
||||
stmt.close();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
try {
|
||||
if (conn != null)
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private void checkCount(long count, ResultSet rs) throws SQLException {
|
||||
if (count == 0) {
|
||||
Assert.fail();
|
||||
}
|
||||
rs.next();
|
||||
long test_count = rs.getLong(1);
|
||||
Assert.assertEquals(count, test_count);
|
||||
}
|
||||
|
||||
private void checkTime(long ts, ResultSet rs) throws SQLException {
|
||||
rs.next();
|
||||
int nanos = rs.getTimestamp(1).getNanos();
|
||||
Assert.assertEquals(ts % 1000_000_000l, nanos);
|
||||
long test_ts = rs.getLong(1);
|
||||
Assert.assertEquals(ts / 1000_000l, test_ts);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
|
||||
checkTime(timestamp1 * 1000_000l + 123123l, rs);
|
||||
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
|
||||
checkTime(timestamp1 * 1000_000l + 123123l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
long timestamp4 = timestamp1 * 1000_000 + 123123;
|
||||
stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
|
||||
checkTime(timestamp4, rs);
|
||||
rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
|
||||
checkTime(timestamp4, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canSelectLastRowFromWeatherForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canSelectLastRowFromWeatherForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canSelectFirstRowFromWeatherForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canSelectFirstRowFromWeatherForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanInDateTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanInNumberTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanInDateTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanInNumberTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanOrEqualToInDateTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanOrEqualToInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'");
|
||||
checkCount(2l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryBetweenAndInDateTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryBetweenAndInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryBetweenAndInNumberTypeForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryBetweenAndInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
|
||||
checkTime(timestamp3, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryNotEqualToInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryNotEqualToInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryNotEqualInDateTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canQueryNotEqualInNumberTypeForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
|
||||
checkCount(1l, rs);
|
||||
rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
|
||||
checkTime(timestamp2, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather");
|
||||
checkCount(3l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
|
||||
rs.next();
|
||||
long sum = rs.getLong(2);
|
||||
Assert.assertEquals(127l, sum);
|
||||
rs.next();
|
||||
sum = rs.getLong(2);
|
||||
Assert.assertEquals(128l, sum);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
|
||||
rs.next();
|
||||
long sum = rs.getLong(2);
|
||||
Assert.assertEquals(127l, sum);
|
||||
rs.next();
|
||||
sum = rs.getLong(2);
|
||||
Assert.assertEquals(128l, sum);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDataOutOfRangeExceptionForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)");
|
||||
} catch (SQLException e) {
|
||||
Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDataOutOfRangeExceptionForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)");
|
||||
} catch (SQLException e) {
|
||||
Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'");
|
||||
checkCount(1l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'");
|
||||
checkCount(1l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'");
|
||||
checkCount(1l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
|
||||
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'");
|
||||
checkCount(1l, rs);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
public class SqlSyntaxValidatorTest {
|
||||
|
||||
@Test
|
||||
public void isSelectSQL() {
|
||||
Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather"));
|
||||
Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather"));
|
||||
Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather "));
|
||||
Assert.assertFalse(SqlSyntaxValidator.isSelectSql("insert into test.weather values(now, 1.1, 2)"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isUseSQL() {
|
||||
Assert.assertTrue(SqlSyntaxValidator.isUseSql("use database test"));
|
||||
}
|
||||
|
||||
}
|
|
@ -47,7 +47,8 @@ class TaosTimestamp extends Date {
|
|||
super(Math.floor(date / 1000));
|
||||
this.precisionExtras = date % 1000;
|
||||
} else if (precision === 2) {
|
||||
super(parseInt(date / 1000000));
|
||||
// use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected
|
||||
super(parseInt(BigInt(date) / 1000000n));
|
||||
// use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
|
||||
this.precisionExtras = parseInt(BigInt(date) % 1000000n);
|
||||
} else {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "td2.0-connector",
|
||||
"version": "2.0.9",
|
||||
"version": "2.0.10",
|
||||
"description": "A Node.js connector for TDengine.",
|
||||
"main": "tdengine.js",
|
||||
"directories": {
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# TDengine Connector for Python
|
||||
|
||||
[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
|
||||
[TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
|
||||
using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
|
||||
|
||||
## Install
|
||||
|
||||
|
@ -11,8 +12,417 @@ pip install ./TDengine/src/connector/python
|
|||
|
||||
## Source Code
|
||||
|
||||
[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
|
||||
[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
|
||||
|
||||
## License - AGPL
|
||||
## Examples
|
||||
|
||||
### Query with PEP-249 API
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("show databases")
|
||||
results = cursor.fetchall()
|
||||
for row in results:
|
||||
print(row)
|
||||
cursor.close()
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Query with objective API
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
conn.exec("create database if not exists pytest")
|
||||
|
||||
result = conn.query("show databases")
|
||||
num_of_fields = result.field_count
|
||||
for field in result.fields:
|
||||
print(field)
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
conn.exec("drop database pytest")
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Query with async API
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
from ctypes import *
|
||||
import time
|
||||
|
||||
def fetch_callback(p_param, p_result, num_of_rows):
|
||||
print("fetched ", num_of_rows, "rows")
|
||||
p = cast(p_param, POINTER(Counter))
|
||||
result = TaosResult(p_result)
|
||||
|
||||
if num_of_rows == 0:
|
||||
print("fetching completed")
|
||||
p.contents.done = True
|
||||
result.close()
|
||||
return
|
||||
if num_of_rows < 0:
|
||||
p.contents.done = True
|
||||
result.check_error(num_of_rows)
|
||||
result.close()
|
||||
return None
|
||||
|
||||
for row in result.rows_iter(num_of_rows):
|
||||
# print(row)
|
||||
None
|
||||
p.contents.count += result.row_count
|
||||
result.fetch_rows_a(fetch_callback, p_param)
|
||||
|
||||
|
||||
|
||||
def query_callback(p_param, p_result, code):
|
||||
# type: (c_void_p, c_void_p, c_int) -> None
|
||||
if p_result == None:
|
||||
return
|
||||
result = TaosResult(p_result)
|
||||
if code == 0:
|
||||
result.fetch_rows_a(fetch_callback, p_param)
|
||||
result.check_error(code)
|
||||
|
||||
|
||||
class Counter(Structure):
|
||||
_fields_ = [("count", c_int), ("done", c_bool)]
|
||||
|
||||
def __str__(self):
|
||||
return "{ count: %d, done: %s }" % (self.count, self.done)
|
||||
|
||||
|
||||
def test_query(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
counter = Counter(count=0)
|
||||
conn.query_a("select * from log.log", query_callback, byref(counter))
|
||||
|
||||
while not counter.done:
|
||||
print("wait query callback")
|
||||
time.sleep(1)
|
||||
print(counter)
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_query(connect())
|
||||
```
|
||||
|
||||
### Statement API - Bind row after row
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
|
||||
conn = connect()
|
||||
|
||||
dbname = "pytest_taos_stmt"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
conn.exec(
|
||||
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
|
||||
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
|
||||
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
|
||||
)
|
||||
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
|
||||
params = new_bind_params(16)
|
||||
params[0].timestamp(1626861392589)
|
||||
params[1].bool(True)
|
||||
params[2].null()
|
||||
params[3].tinyint(2)
|
||||
params[4].smallint(3)
|
||||
params[5].int(4)
|
||||
params[6].bigint(5)
|
||||
params[7].tinyint_unsigned(6)
|
||||
params[8].smallint_unsigned(7)
|
||||
params[9].int_unsigned(8)
|
||||
params[10].bigint_unsigned(9)
|
||||
params[11].float(10.1)
|
||||
params[12].double(10.11)
|
||||
params[13].binary("hello")
|
||||
params[14].nchar("stmt")
|
||||
params[15].timestamp(1626861392589)
|
||||
stmt.bind_param(params)
|
||||
|
||||
params[0].timestamp(1626861392590)
|
||||
params[15].null()
|
||||
stmt.bind_param(params)
|
||||
stmt.execute()
|
||||
|
||||
|
||||
result = stmt.use_result()
|
||||
assert result.affected_rows == 2
|
||||
result.close()
|
||||
|
||||
result = conn.query("select * from log")
|
||||
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
stmt.close()
|
||||
conn.close()
|
||||
|
||||
```
|
||||
|
||||
### Statement API - Bind multi rows
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
|
||||
conn = connect()
|
||||
|
||||
dbname = "pytest_taos_stmt"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
conn.exec(
|
||||
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
|
||||
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
|
||||
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
|
||||
)
|
||||
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
|
||||
params = new_multi_binds(16)
|
||||
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, None])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
stmt.bind_param_batch(params)
|
||||
stmt.execute()
|
||||
|
||||
|
||||
result = stmt.use_result()
|
||||
assert result.affected_rows == 3
|
||||
result.close()
|
||||
|
||||
result = conn.query("select * from log")
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
stmt.close()
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Statement API - Subscribe
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
dbname = "pytest_taos_subscribe_callback"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.exec("create table if not exists log(ts timestamp, n int)")
|
||||
for i in range(10):
|
||||
conn.exec("insert into log values(now, %d)" % i)
|
||||
|
||||
sub = conn.subscribe(True, "test", "select * from log", 1000)
|
||||
print("# consume from begin")
|
||||
for ts, n in sub.consume():
|
||||
print(ts, n)
|
||||
|
||||
print("# consume new data")
|
||||
for i in range(5):
|
||||
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
|
||||
result = sub.consume()
|
||||
for ts, n in result:
|
||||
print(ts, n)
|
||||
|
||||
print("# consume with a stop condition")
|
||||
for i in range(10):
|
||||
conn.exec("insert into log values(now, %d)" % int(random() * 10))
|
||||
result = sub.consume()
|
||||
try:
|
||||
ts, n = next(result)
|
||||
print(ts, n)
|
||||
if n > 5:
|
||||
result.stop_query()
|
||||
print("## stopped")
|
||||
break
|
||||
except StopIteration:
|
||||
continue
|
||||
|
||||
sub.close()
|
||||
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Statement API - Subscribe asynchronously with callback
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
from ctypes import *
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def subscribe_callback(p_sub, p_result, p_param, errno):
|
||||
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
|
||||
print("# fetch in callback")
|
||||
result = TaosResult(p_result)
|
||||
result.check_error(errno)
|
||||
for row in result.rows_iter():
|
||||
ts, n = row()
|
||||
print(ts, n)
|
||||
|
||||
|
||||
def test_subscribe_callback(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
dbname = "pytest_taos_subscribe_callback"
|
||||
try:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.exec("create table if not exists log(ts timestamp, n int)")
|
||||
|
||||
print("# subscribe with callback")
|
||||
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
|
||||
|
||||
for i in range(10):
|
||||
conn.exec("insert into log values(now, %d)" % i)
|
||||
time.sleep(0.7)
|
||||
sub.close()
|
||||
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
except Exception as err:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
raise err
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_subscribe_callback(connect())
|
||||
|
||||
```
|
||||
|
||||
### Statement API - Stream
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
from ctypes import *
|
||||
|
||||
def stream_callback(p_param, p_result, p_row):
|
||||
# type: (c_void_p, c_void_p, c_void_p) -> None
|
||||
|
||||
if p_result == None or p_row == None:
|
||||
return
|
||||
result = TaosResult(p_result)
|
||||
row = TaosRow(result, p_row)
|
||||
try:
|
||||
ts, count = row()
|
||||
p = cast(p_param, POINTER(Counter))
|
||||
p.contents.count += count
|
||||
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
|
||||
|
||||
except Exception as err:
|
||||
print(err)
|
||||
raise err
|
||||
|
||||
|
||||
class Counter(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("count", c_int),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return "%d" % self.count
|
||||
|
||||
|
||||
def test_stream(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
dbname = "pytest_taos_stream"
|
||||
try:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.exec("create table if not exists log(ts timestamp, n int)")
|
||||
|
||||
result = conn.query("select count(*) from log interval(5s)")
|
||||
assert result.field_count == 2
|
||||
counter = Counter()
|
||||
counter.count = 0
|
||||
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
|
||||
|
||||
for _ in range(0, 20):
|
||||
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
|
||||
time.sleep(2)
|
||||
stream.close()
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
except Exception as err:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
raise err
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_stream(connect())
|
||||
```
|
||||
|
||||
### Insert with line protocol
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
dbname = "pytest_line"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s precision 'us'" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
lines = [
|
||||
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
|
||||
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
|
||||
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
|
||||
]
|
||||
conn.insert_lines(lines)
|
||||
print("inserted")
|
||||
|
||||
lines = [
|
||||
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
|
||||
]
|
||||
conn.insert_lines(lines)
|
||||
|
||||
result = conn.query("show tables")
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
|
||||
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
|
||||
```
|
||||
|
||||
## License - AGPL-3.0
|
||||
|
||||
Keep same with [TDengine](https://github.com/taosdata/TDengine).
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
# encoding:UTF-8
|
||||
from taos import *
|
||||
|
||||
conn = connect()
|
||||
|
||||
dbname = "pytest_taos_stmt_multi"
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
conn.execute(
|
||||
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
|
||||
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
|
||||
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
|
||||
)
|
||||
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
|
||||
params = new_multi_binds(16)
|
||||
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, None])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
stmt.bind_param_batch(params)
|
||||
stmt.execute()
|
||||
|
||||
|
||||
result = stmt.use_result()
|
||||
assert result.affected_rows == 3
|
||||
result.close()
|
||||
|
||||
result = conn.query("select * from log")
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
stmt.close()
|
||||
conn.close()
|
|
@ -0,0 +1,57 @@
|
|||
from taos import *
|
||||
|
||||
conn = connect()
|
||||
|
||||
dbname = "pytest_taos_stmt"
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
conn.execute(
|
||||
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
|
||||
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
|
||||
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
|
||||
)
|
||||
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
|
||||
params = new_bind_params(16)
|
||||
params[0].timestamp(1626861392589)
|
||||
params[1].bool(True)
|
||||
params[2].null()
|
||||
params[3].tinyint(2)
|
||||
params[4].smallint(3)
|
||||
params[5].int(4)
|
||||
params[6].bigint(5)
|
||||
params[7].tinyint_unsigned(6)
|
||||
params[8].smallint_unsigned(7)
|
||||
params[9].int_unsigned(8)
|
||||
params[10].bigint_unsigned(9)
|
||||
params[11].float(10.1)
|
||||
params[12].double(10.11)
|
||||
params[13].binary("hello")
|
||||
params[14].nchar("stmt")
|
||||
params[15].timestamp(1626861392589)
|
||||
stmt.bind_param(params)
|
||||
|
||||
params[0].timestamp(1626861392590)
|
||||
params[15].null()
|
||||
stmt.bind_param(params)
|
||||
stmt.execute()
|
||||
|
||||
|
||||
result = stmt.use_result()
|
||||
assert result.affected_rows == 2
|
||||
# No need to explicitly close, but ok for you
|
||||
# result.close()
|
||||
|
||||
result = conn.query("select * from log")
|
||||
|
||||
for row in result:
|
||||
print(row)
|
||||
|
||||
# No need to explicitly close, but ok for you
|
||||
# result.close()
|
||||
# stmt.close()
|
||||
# conn.close()
|
|
@ -0,0 +1,22 @@
|
|||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
dbname = "pytest_line"
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s precision 'us'" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
lines = [
|
||||
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
|
||||
]
|
||||
conn.insert_lines(lines)
|
||||
print("inserted")
|
||||
|
||||
conn.insert_lines(lines)
|
||||
|
||||
result = conn.query("show tables")
|
||||
for row in result:
|
||||
print(row)
|
||||
|
||||
|
||||
conn.execute("drop database if exists %s" % dbname)
|
|
@ -0,0 +1,9 @@
|
|||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("show databases")
|
||||
results = cursor.fetchall()
|
||||
for row in results:
|
||||
print(row)
|
|
@ -0,0 +1,62 @@
|
|||
from taos import *
|
||||
from ctypes import *
|
||||
import time
|
||||
|
||||
def fetch_callback(p_param, p_result, num_of_rows):
|
||||
print("fetched ", num_of_rows, "rows")
|
||||
p = cast(p_param, POINTER(Counter))
|
||||
result = TaosResult(p_result)
|
||||
|
||||
if num_of_rows == 0:
|
||||
print("fetching completed")
|
||||
p.contents.done = True
|
||||
# should explicitly close the result in fetch completed or cause error
|
||||
result.close()
|
||||
return
|
||||
if num_of_rows < 0:
|
||||
p.contents.done = True
|
||||
result.check_error(num_of_rows)
|
||||
result.close()
|
||||
return None
|
||||
|
||||
for row in result.rows_iter(num_of_rows):
|
||||
# print(row)
|
||||
None
|
||||
p.contents.count += result.row_count
|
||||
result.fetch_rows_a(fetch_callback, p_param)
|
||||
|
||||
|
||||
|
||||
def query_callback(p_param, p_result, code):
|
||||
# type: (c_void_p, c_void_p, c_int) -> None
|
||||
if p_result == None:
|
||||
return
|
||||
result = TaosResult(p_result)
|
||||
if code == 0:
|
||||
result.fetch_rows_a(fetch_callback, p_param)
|
||||
result.check_error(code)
|
||||
# explicitly close result while query failed
|
||||
result.close()
|
||||
|
||||
|
||||
class Counter(Structure):
|
||||
_fields_ = [("count", c_int), ("done", c_bool)]
|
||||
|
||||
def __str__(self):
|
||||
return "{ count: %d, done: %s }" % (self.count, self.done)
|
||||
|
||||
|
||||
def test_query(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
counter = Counter(count=0)
|
||||
conn.query_a("select * from log.log", query_callback, byref(counter))
|
||||
|
||||
while not counter.done:
|
||||
print("wait query callback")
|
||||
time.sleep(1)
|
||||
print(counter)
|
||||
# conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_query(connect())
|
|
@ -0,0 +1,12 @@
|
|||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
conn.execute("create database if not exists pytest")
|
||||
|
||||
result = conn.query("show databases")
|
||||
num_of_fields = result.field_count
|
||||
for field in result.fields:
|
||||
print(field)
|
||||
for row in result:
|
||||
print(row)
|
||||
conn.execute("drop database pytest")
|
|
@ -0,0 +1,43 @@
|
|||
from taos import *
|
||||
from ctypes import *
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def subscribe_callback(p_sub, p_result, p_param, errno):
|
||||
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
|
||||
print("# fetch in callback")
|
||||
result = TaosResult(p_result)
|
||||
result.check_error(errno)
|
||||
for row in result.rows_iter():
|
||||
ts, n = row()
|
||||
print(ts, n)
|
||||
|
||||
|
||||
def test_subscribe_callback(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
dbname = "pytest_taos_subscribe_callback"
|
||||
try:
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.execute("create table if not exists log(ts timestamp, n int)")
|
||||
|
||||
print("# subscribe with callback")
|
||||
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
|
||||
|
||||
for i in range(10):
|
||||
conn.execute("insert into log values(now, %d)" % i)
|
||||
time.sleep(0.7)
|
||||
# sub.close()
|
||||
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
# conn.close()
|
||||
except Exception as err:
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
# conn.close()
|
||||
raise err
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_subscribe_callback(connect())
|
|
@ -0,0 +1,53 @@
|
|||
import taos
|
||||
import random
|
||||
|
||||
conn = taos.connect()
|
||||
dbname = "pytest_taos_subscribe"
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.execute("create table if not exists log(ts timestamp, n int)")
|
||||
for i in range(10):
|
||||
conn.execute("insert into log values(now, %d)" % i)
|
||||
|
||||
sub = conn.subscribe(False, "test", "select * from log", 1000)
|
||||
print("# consume from begin")
|
||||
for ts, n in sub.consume():
|
||||
print(ts, n)
|
||||
|
||||
print("# consume new data")
|
||||
for i in range(5):
|
||||
conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
|
||||
result = sub.consume()
|
||||
for ts, n in result:
|
||||
print(ts, n)
|
||||
|
||||
sub.close(True)
|
||||
print("# keep progress consume")
|
||||
sub = conn.subscribe(False, "test", "select * from log", 1000)
|
||||
result = sub.consume()
|
||||
rows = result.fetch_all()
|
||||
# consume from latest subscription needs root privilege(for /var/lib/taos).
|
||||
assert result.row_count == 0
|
||||
print("## consumed ", len(rows), "rows")
|
||||
|
||||
print("# consume with a stop condition")
|
||||
for i in range(10):
|
||||
conn.execute("insert into log values(now, %d)" % random.randint(0, 10))
|
||||
result = sub.consume()
|
||||
try:
|
||||
ts, n = next(result)
|
||||
print(ts, n)
|
||||
if n > 5:
|
||||
result.stop_query()
|
||||
print("## stopped")
|
||||
break
|
||||
except StopIteration:
|
||||
continue
|
||||
|
||||
sub.close()
|
||||
|
||||
# sub.close()
|
||||
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
# conn.close()
|
|
@ -0,0 +1,27 @@
|
|||
[tool.poetry]
|
||||
name = "taos"
|
||||
version = "2.1.0"
|
||||
description = "TDengine connector for python"
|
||||
authors = ["Taosdata Inc. <support@taosdata.com>"]
|
||||
license = "AGPL-3.0"
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^2.7 || ^3.4"
|
||||
typing = "*"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
pytest = [
|
||||
{ version = "^4.6", python = "^2.7" },
|
||||
{ version = "^6.2", python = "^3.7" }
|
||||
]
|
||||
pdoc = { version = "^7.1.1", python = "^3.7" }
|
||||
mypy = { version = "^0.910", python = "^3.6" }
|
||||
black = { version = "^21.7b0", python = "^3.6" }
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.black]
|
||||
line-length = 119
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.11",
|
||||
version="2.1.0",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -1,20 +1,478 @@
|
|||
# encoding:UTF-8
|
||||
"""
|
||||
# TDengine Connector for Python
|
||||
|
||||
from .connection import TDengineConnection
|
||||
from .cursor import TDengineCursor
|
||||
[TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
|
||||
using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
|
||||
|
||||
# For some reason, the following is needed for VS Code (through PyLance) to
|
||||
## Install
|
||||
|
||||
```sh
|
||||
git clone --depth 1 https://github.com/taosdata/TDengine.git
|
||||
pip install ./TDengine/src/connector/python
|
||||
```
|
||||
|
||||
## Source Code
|
||||
|
||||
[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
|
||||
|
||||
## Examples
|
||||
|
||||
### Query with PEP-249 API
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("show databases")
|
||||
results = cursor.fetchall()
|
||||
for row in results:
|
||||
print(row)
|
||||
cursor.close()
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Query with objective API
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
conn.exec("create database if not exists pytest")
|
||||
|
||||
result = conn.query("show databases")
|
||||
num_of_fields = result.field_count
|
||||
for field in result.fields:
|
||||
print(field)
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
conn.exec("drop database pytest")
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Query with async API
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
from ctypes import *
|
||||
import time
|
||||
|
||||
def fetch_callback(p_param, p_result, num_of_rows):
|
||||
print("fetched ", num_of_rows, "rows")
|
||||
p = cast(p_param, POINTER(Counter))
|
||||
result = TaosResult(p_result)
|
||||
|
||||
if num_of_rows == 0:
|
||||
print("fetching completed")
|
||||
p.contents.done = True
|
||||
result.close()
|
||||
return
|
||||
if num_of_rows < 0:
|
||||
p.contents.done = True
|
||||
result.check_error(num_of_rows)
|
||||
result.close()
|
||||
return None
|
||||
|
||||
for row in result.rows_iter(num_of_rows):
|
||||
# print(row)
|
||||
None
|
||||
p.contents.count += result.row_count
|
||||
result.fetch_rows_a(fetch_callback, p_param)
|
||||
|
||||
|
||||
|
||||
def query_callback(p_param, p_result, code):
|
||||
# type: (c_void_p, c_void_p, c_int) -> None
|
||||
if p_result == None:
|
||||
return
|
||||
result = TaosResult(p_result)
|
||||
if code == 0:
|
||||
result.fetch_rows_a(fetch_callback, p_param)
|
||||
result.check_error(code)
|
||||
|
||||
|
||||
class Counter(Structure):
|
||||
_fields_ = [("count", c_int), ("done", c_bool)]
|
||||
|
||||
def __str__(self):
|
||||
return "{ count: %d, done: %s }" % (self.count, self.done)
|
||||
|
||||
|
||||
def test_query(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
counter = Counter(count=0)
|
||||
conn.query_a("select * from log.log", query_callback, byref(counter))
|
||||
|
||||
while not counter.done:
|
||||
print("wait query callback")
|
||||
time.sleep(1)
|
||||
print(counter)
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_query(connect())
|
||||
```
|
||||
|
||||
### Statement API - Bind row after row
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
|
||||
conn = connect()
|
||||
|
||||
dbname = "pytest_taos_stmt"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
conn.exec(
|
||||
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
|
||||
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
|
||||
su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
|
||||
)
|
||||
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
|
||||
params = new_bind_params(16)
|
||||
params[0].timestamp(1626861392589)
|
||||
params[1].bool(True)
|
||||
params[2].null()
|
||||
params[3].tinyint(2)
|
||||
params[4].smallint(3)
|
||||
params[5].int(4)
|
||||
params[6].bigint(5)
|
||||
params[7].tinyint_unsigned(6)
|
||||
params[8].smallint_unsigned(7)
|
||||
params[9].int_unsigned(8)
|
||||
params[10].bigint_unsigned(9)
|
||||
params[11].float(10.1)
|
||||
params[12].double(10.11)
|
||||
params[13].binary("hello")
|
||||
params[14].nchar("stmt")
|
||||
params[15].timestamp(1626861392589)
|
||||
stmt.bind_param(params)
|
||||
|
||||
params[0].timestamp(1626861392590)
|
||||
params[15].null()
|
||||
stmt.bind_param(params)
|
||||
stmt.execute()
|
||||
|
||||
|
||||
result = stmt.use_result()
|
||||
assert result.affected_rows == 2
|
||||
result.close()
|
||||
|
||||
result = conn.query("select * from log")
|
||||
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
stmt.close()
|
||||
conn.close()
|
||||
|
||||
```
|
||||
|
||||
### Statement API - Bind multi rows
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
|
||||
conn = connect()
|
||||
|
||||
dbname = "pytest_taos_stmt"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
conn.exec(
|
||||
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
|
||||
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
|
||||
su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
|
||||
)
|
||||
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
|
||||
params = new_multi_binds(16)
|
||||
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, None])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
stmt.bind_param_batch(params)
|
||||
stmt.execute()
|
||||
|
||||
|
||||
result = stmt.use_result()
|
||||
assert result.affected_rows == 3
|
||||
result.close()
|
||||
|
||||
result = conn.query("select * from log")
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
stmt.close()
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Statement API - Subscribe
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
dbname = "pytest_taos_subscribe_callback"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.exec("create table if not exists log(ts timestamp, n int)")
|
||||
for i in range(10):
|
||||
conn.exec("insert into log values(now, %d)" % i)
|
||||
|
||||
sub = conn.subscribe(True, "test", "select * from log", 1000)
|
||||
print("# consume from begin")
|
||||
for ts, n in sub.consume():
|
||||
print(ts, n)
|
||||
|
||||
print("# consume new data")
|
||||
for i in range(5):
|
||||
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
|
||||
result = sub.consume()
|
||||
for ts, n in result:
|
||||
print(ts, n)
|
||||
|
||||
print("# consume with a stop condition")
|
||||
for i in range(10):
|
||||
conn.exec("insert into log values(now, %d)" % int(random() * 10))
|
||||
result = sub.consume()
|
||||
try:
|
||||
ts, n = next(result)
|
||||
print(ts, n)
|
||||
if n > 5:
|
||||
result.stop_query()
|
||||
print("## stopped")
|
||||
break
|
||||
except StopIteration:
|
||||
continue
|
||||
|
||||
sub.close()
|
||||
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
```
|
||||
|
||||
### Statement API - Subscribe asynchronously with callback
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
from ctypes import *
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def subscribe_callback(p_sub, p_result, p_param, errno):
|
||||
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
|
||||
print("# fetch in callback")
|
||||
result = TaosResult(p_result)
|
||||
result.check_error(errno)
|
||||
for row in result.rows_iter():
|
||||
ts, n = row()
|
||||
print(ts, n)
|
||||
|
||||
|
||||
def test_subscribe_callback(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
dbname = "pytest_taos_subscribe_callback"
|
||||
try:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.exec("create table if not exists log(ts timestamp, n int)")
|
||||
|
||||
print("# subscribe with callback")
|
||||
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
|
||||
|
||||
for i in range(10):
|
||||
conn.exec("insert into log values(now, %d)" % i)
|
||||
time.sleep(0.7)
|
||||
sub.close()
|
||||
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
except Exception as err:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
raise err
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_subscribe_callback(connect())
|
||||
|
||||
```
|
||||
|
||||
### Statement API - Stream
|
||||
|
||||
```python
|
||||
from taos import *
|
||||
from ctypes import *
|
||||
|
||||
def stream_callback(p_param, p_result, p_row):
|
||||
# type: (c_void_p, c_void_p, c_void_p) -> None
|
||||
|
||||
if p_result == None or p_row == None:
|
||||
return
|
||||
result = TaosResult(p_result)
|
||||
row = TaosRow(result, p_row)
|
||||
try:
|
||||
ts, count = row()
|
||||
p = cast(p_param, POINTER(Counter))
|
||||
p.contents.count += count
|
||||
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
|
||||
|
||||
except Exception as err:
|
||||
print(err)
|
||||
raise err
|
||||
|
||||
|
||||
class Counter(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("count", c_int),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return "%d" % self.count
|
||||
|
||||
|
||||
def test_stream(conn):
|
||||
# type: (TaosConnection) -> None
|
||||
dbname = "pytest_taos_stream"
|
||||
try:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s" % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.exec("create table if not exists log(ts timestamp, n int)")
|
||||
|
||||
result = conn.query("select count(*) from log interval(5s)")
|
||||
assert result.field_count == 2
|
||||
counter = Counter()
|
||||
counter.count = 0
|
||||
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
|
||||
|
||||
for _ in range(0, 20):
|
||||
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
|
||||
time.sleep(2)
|
||||
stream.close()
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
except Exception as err:
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
raise err
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_stream(connect())
|
||||
```
|
||||
|
||||
### Insert with line protocol
|
||||
|
||||
```python
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
dbname = "pytest_line"
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.exec("create database if not exists %s precision 'us'" % dbname)
|
||||
conn.select_db(dbname)
|
||||
|
||||
lines = [
|
||||
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns',
|
||||
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
|
||||
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
|
||||
]
|
||||
conn.insert_lines(lines)
|
||||
print("inserted")
|
||||
|
||||
lines = [
|
||||
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
|
||||
]
|
||||
conn.insert_lines(lines)
|
||||
|
||||
result = conn.query("show tables")
|
||||
for row in result:
|
||||
print(row)
|
||||
result.close()
|
||||
|
||||
|
||||
conn.exec("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
|
||||
```
|
||||
|
||||
## License - AGPL-3.0
|
||||
|
||||
Keep same with [TDengine](https://github.com/taosdata/TDengine).
|
||||
"""
|
||||
from .connection import TaosConnection
|
||||
|
||||
# For some reason, the following is needed for VS Code (through PyLance) to
|
||||
# recognize that "error" is a valid module of the "taos" package.
|
||||
from .error import ProgrammingError
|
||||
from .error import *
|
||||
from .bind import *
|
||||
from .field import *
|
||||
from .cursor import *
|
||||
from .result import *
|
||||
from .statement import *
|
||||
from .subscription import *
|
||||
|
||||
try:
|
||||
import importlib.metadata
|
||||
|
||||
__version__ = importlib.metadata.version("taos")
|
||||
except:
|
||||
None
|
||||
|
||||
# Globals
|
||||
threadsafety = 0
|
||||
paramstyle = 'pyformat'
|
||||
|
||||
__all__ = ['connection', 'cursor']
|
||||
paramstyle = "pyformat"
|
||||
|
||||
__all__ = [
|
||||
# functions
|
||||
"connect",
|
||||
"new_bind_param",
|
||||
"new_bind_params",
|
||||
"new_multi_binds",
|
||||
"new_multi_bind",
|
||||
# objects
|
||||
"TaosBind",
|
||||
"TaosConnection",
|
||||
"TaosCursor",
|
||||
"TaosResult",
|
||||
"TaosRows",
|
||||
"TaosRow",
|
||||
"TaosStmt",
|
||||
"PrecisionEnum",
|
||||
]
|
||||
|
||||
def connect(*args, **kwargs):
|
||||
""" Function to return a TDengine connector object
|
||||
# type: (..., ...) -> TaosConnection
|
||||
"""Function to return a TDengine connector object
|
||||
|
||||
Current supporting keyword parameters:
|
||||
@dsn: Data source name as string
|
||||
|
@ -25,4 +483,4 @@ def connect(*args, **kwargs):
|
|||
|
||||
@rtype: TDengineConnector
|
||||
"""
|
||||
return TDengineConnection(*args, **kwargs)
|
||||
return TaosConnection(*args, **kwargs)
|
||||
|
|
|
@ -0,0 +1,432 @@
|
|||
# encoding:UTF-8
|
||||
import ctypes
|
||||
from .constants import FieldType
|
||||
from .error import *
|
||||
from .precision import *
|
||||
from datetime import datetime
|
||||
from ctypes import *
|
||||
import sys
|
||||
|
||||
_datetime_epoch = datetime.utcfromtimestamp(0)
|
||||
|
||||
def _is_not_none(obj):
|
||||
obj != None
|
||||
class TaosBind(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("buffer_type", c_int),
|
||||
("buffer", c_void_p),
|
||||
("buffer_length", c_size_t),
|
||||
("length", POINTER(c_size_t)),
|
||||
("is_null", POINTER(c_int)),
|
||||
("is_unsigned", c_int),
|
||||
("error", POINTER(c_int)),
|
||||
("u", c_int64),
|
||||
("allocated", c_int),
|
||||
]
|
||||
|
||||
def null(self):
|
||||
self.buffer_type = FieldType.C_NULL
|
||||
self.is_null = pointer(c_int(1))
|
||||
|
||||
def bool(self, value):
|
||||
self.buffer_type = FieldType.C_BOOL
|
||||
self.buffer = cast(pointer(c_bool(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_bool)
|
||||
|
||||
def tinyint(self, value):
|
||||
self.buffer_type = FieldType.C_TINYINT
|
||||
self.buffer = cast(pointer(c_int8(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_int8)
|
||||
|
||||
def smallint(self, value):
|
||||
self.buffer_type = FieldType.C_SMALLINT
|
||||
self.buffer = cast(pointer(c_int16(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_int16)
|
||||
|
||||
def int(self, value):
|
||||
self.buffer_type = FieldType.C_INT
|
||||
self.buffer = cast(pointer(c_int32(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_int32)
|
||||
|
||||
def bigint(self, value):
|
||||
self.buffer_type = FieldType.C_BIGINT
|
||||
self.buffer = cast(pointer(c_int64(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_int64)
|
||||
|
||||
def float(self, value):
|
||||
self.buffer_type = FieldType.C_FLOAT
|
||||
self.buffer = cast(pointer(c_float(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_float)
|
||||
|
||||
def double(self, value):
|
||||
self.buffer_type = FieldType.C_DOUBLE
|
||||
self.buffer = cast(pointer(c_double(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_double)
|
||||
|
||||
def binary(self, value):
|
||||
buffer = None
|
||||
length = 0
|
||||
if isinstance(value, str):
|
||||
bytes = value.encode("utf-8")
|
||||
buffer = create_string_buffer(bytes)
|
||||
length = len(bytes)
|
||||
else:
|
||||
buffer = value
|
||||
length = len(value)
|
||||
self.buffer_type = FieldType.C_BINARY
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.buffer_length = length
|
||||
self.length = pointer(c_size_t(self.buffer_length))
|
||||
|
||||
def timestamp(self, value, precision=PrecisionEnum.Milliseconds):
|
||||
if type(value) is datetime:
|
||||
if precision == PrecisionEnum.Milliseconds:
|
||||
ts = int(round((value - _datetime_epoch).total_seconds() * 1000))
|
||||
elif precision == PrecisionEnum.Microseconds:
|
||||
ts = int(round((value - _datetime_epoch).total_seconds() * 10000000))
|
||||
else:
|
||||
raise PrecisionError("datetime do not support nanosecond precision")
|
||||
elif type(value) is float:
|
||||
if precision == PrecisionEnum.Milliseconds:
|
||||
ts = int(round(value * 1000))
|
||||
elif precision == PrecisionEnum.Microseconds:
|
||||
ts = int(round(value * 10000000))
|
||||
else:
|
||||
raise PrecisionError("time float do not support nanosecond precision")
|
||||
elif isinstance(value, int) and not isinstance(value, bool):
|
||||
ts = value
|
||||
elif isinstance(value, str):
|
||||
value = datetime.fromisoformat(value)
|
||||
if precision == PrecisionEnum.Milliseconds:
|
||||
ts = int(round(value * 1000))
|
||||
elif precision == PrecisionEnum.Microseconds:
|
||||
ts = int(round(value * 10000000))
|
||||
else:
|
||||
raise PrecisionError("datetime do not support nanosecond precision")
|
||||
|
||||
self.buffer_type = FieldType.C_TIMESTAMP
|
||||
self.buffer = cast(pointer(c_int64(ts)), c_void_p)
|
||||
self.buffer_length = sizeof(c_int64)
|
||||
|
||||
def nchar(self, value):
|
||||
buffer = None
|
||||
length = 0
|
||||
if isinstance(value, str):
|
||||
bytes = value.encode("utf-8")
|
||||
buffer = create_string_buffer(bytes)
|
||||
length = len(bytes)
|
||||
else:
|
||||
buffer = value
|
||||
length = len(value)
|
||||
self.buffer_type = FieldType.C_NCHAR
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.buffer_length = length
|
||||
self.length = pointer(c_size_t(self.buffer_length))
|
||||
|
||||
def tinyint_unsigned(self, value):
|
||||
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
|
||||
self.buffer = cast(pointer(c_uint8(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_uint8)
|
||||
|
||||
def smallint_unsigned(self, value):
|
||||
self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
|
||||
self.buffer = cast(pointer(c_uint16(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_uint16)
|
||||
|
||||
def int_unsigned(self, value):
|
||||
self.buffer_type = FieldType.C_INT_UNSIGNED
|
||||
self.buffer = cast(pointer(c_uint32(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_uint32)
|
||||
|
||||
def bigint_unsigned(self, value):
|
||||
self.buffer_type = FieldType.C_BIGINT_UNSIGNED
|
||||
self.buffer = cast(pointer(c_uint64(value)), c_void_p)
|
||||
self.buffer_length = sizeof(c_uint64)
|
||||
|
||||
|
||||
def _datetime_to_timestamp(value, precision):
|
||||
# type: (datetime | float | int | str | c_int64, PrecisionEnum) -> c_int64
|
||||
if value is None:
|
||||
return FieldType.C_BIGINT_NULL
|
||||
if type(value) is datetime:
|
||||
if precision == PrecisionEnum.Milliseconds:
|
||||
return int(round((value - _datetime_epoch).total_seconds() * 1000))
|
||||
elif precision == PrecisionEnum.Microseconds:
|
||||
return int(round((value - _datetime_epoch).total_seconds() * 10000000))
|
||||
else:
|
||||
raise PrecisionError("datetime do not support nanosecond precision")
|
||||
elif type(value) is float:
|
||||
if precision == PrecisionEnum.Milliseconds:
|
||||
return int(round(value * 1000))
|
||||
elif precision == PrecisionEnum.Microseconds:
|
||||
return int(round(value * 10000000))
|
||||
else:
|
||||
raise PrecisionError("time float do not support nanosecond precision")
|
||||
elif isinstance(value, int) and not isinstance(value, bool):
|
||||
return c_int64(value)
|
||||
elif isinstance(value, str):
|
||||
value = datetime.fromisoformat(value)
|
||||
if precision == PrecisionEnum.Milliseconds:
|
||||
return int(round(value * 1000))
|
||||
elif precision == PrecisionEnum.Microseconds:
|
||||
return int(round(value * 10000000))
|
||||
else:
|
||||
raise PrecisionError("datetime do not support nanosecond precision")
|
||||
elif isinstance(value, c_int64):
|
||||
return value
|
||||
return FieldType.C_BIGINT_NULL
|
||||
|
||||
|
||||
class TaosMultiBind(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("buffer_type", c_int),
|
||||
("buffer", c_void_p),
|
||||
("buffer_length", c_size_t),
|
||||
("length", POINTER(c_int32)),
|
||||
("is_null", c_char_p),
|
||||
("num", c_int),
|
||||
]
|
||||
|
||||
def null(self, num):
|
||||
self.buffer_type = FieldType.C_NULL
|
||||
self.is_null = cast((c_char * num)(*[1 for _ in range(num)]), c_char_p)
|
||||
self.buffer = c_void_p(None)
|
||||
self.num = num
|
||||
|
||||
def bool(self, values):
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_int8 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_BOOL_NULL for v in values])
|
||||
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
self.buffer_type = FieldType.C_BOOL
|
||||
self.buffer_length = sizeof(c_bool)
|
||||
|
||||
def tinyint(self, values):
|
||||
self.buffer_type = FieldType.C_TINYINT
|
||||
self.buffer_length = sizeof(c_int8)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_int8 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_NULL for v in values])
|
||||
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def smallint(self, values):
|
||||
self.buffer_type = FieldType.C_SMALLINT
|
||||
self.buffer_length = sizeof(c_int16)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_int16 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def int(self, values):
|
||||
self.buffer_type = FieldType.C_INT
|
||||
self.buffer_length = sizeof(c_int32)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_int32 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_INT_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def bigint(self, values):
|
||||
self.buffer_type = FieldType.C_BIGINT
|
||||
self.buffer_length = sizeof(c_int64)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_int64 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def float(self, values):
|
||||
self.buffer_type = FieldType.C_FLOAT
|
||||
self.buffer_length = sizeof(c_float)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_float * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_FLOAT_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def double(self, values):
|
||||
self.buffer_type = FieldType.C_DOUBLE
|
||||
self.buffer_length = sizeof(c_double)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_double * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_DOUBLE_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def binary(self, values):
|
||||
self.num = len(values)
|
||||
self.buffer = cast(c_char_p("".join(filter(_is_not_none, values)).encode("utf-8")), c_void_p)
|
||||
self.length = (c_int * len(values))(*[len(value) if value is not None else 0 for value in values])
|
||||
self.buffer_type = FieldType.C_BINARY
|
||||
self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
|
||||
|
||||
def timestamp(self, values, precision=PrecisionEnum.Milliseconds):
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_int64 * len(values)
|
||||
buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values])
|
||||
|
||||
self.buffer_type = FieldType.C_TIMESTAMP
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.buffer_length = sizeof(c_int64)
|
||||
self.num = len(values)
|
||||
|
||||
def nchar(self, values):
|
||||
# type: (list[str]) -> None
|
||||
if sys.version_info < (3, 0):
|
||||
_bytes = [bytes(value) if value is not None else None for value in values]
|
||||
buffer_length = max(len(b) + 1 for b in _bytes if b is not None)
|
||||
buffers = [
|
||||
create_string_buffer(b, buffer_length) if b is not None else create_string_buffer(buffer_length)
|
||||
for b in _bytes
|
||||
]
|
||||
buffer_all = b''.join(v[:] for v in buffers)
|
||||
self.buffer = cast(c_char_p(buffer_all), c_void_p)
|
||||
else:
|
||||
_bytes = [value.encode("utf-8") if value is not None else None for value in values]
|
||||
buffer_length = max(len(b) for b in _bytes if b is not None)
|
||||
self.buffer = cast(
|
||||
c_char_p(
|
||||
b"".join(
|
||||
[
|
||||
create_string_buffer(b, buffer_length)
|
||||
if b is not None
|
||||
else create_string_buffer(buffer_length)
|
||||
for b in _bytes
|
||||
]
|
||||
)
|
||||
),
|
||||
c_void_p,
|
||||
)
|
||||
self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes])
|
||||
self.buffer_length = buffer_length
|
||||
self.num = len(values)
|
||||
self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
|
||||
self.buffer_type = FieldType.C_NCHAR
|
||||
|
||||
def tinyint_unsigned(self, values):
|
||||
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
|
||||
self.buffer_length = sizeof(c_uint8)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_uint8 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_UNSIGNED_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def smallint_unsigned(self, values):
|
||||
self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
|
||||
self.buffer_length = sizeof(c_uint16)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_uint16 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_UNSIGNED_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def int_unsigned(self, values):
|
||||
self.buffer_type = FieldType.C_INT_UNSIGNED
|
||||
self.buffer_length = sizeof(c_uint32)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_uint32 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_INT_UNSIGNED_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
def bigint_unsigned(self, values):
|
||||
self.buffer_type = FieldType.C_BIGINT_UNSIGNED
|
||||
self.buffer_length = sizeof(c_uint64)
|
||||
|
||||
try:
|
||||
buffer = cast(values, c_void_p)
|
||||
except:
|
||||
buffer_type = c_uint64 * len(values)
|
||||
try:
|
||||
buffer = buffer_type(*values)
|
||||
except:
|
||||
buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_UNSIGNED_NULL for v in values])
|
||||
self.buffer = cast(buffer, c_void_p)
|
||||
self.num = len(values)
|
||||
|
||||
|
||||
def new_bind_param():
|
||||
# type: () -> TaosBind
|
||||
return TaosBind()
|
||||
|
||||
|
||||
def new_bind_params(size):
|
||||
# type: (int) -> Array[TaosBind]
|
||||
return (TaosBind * size)()
|
||||
|
||||
|
||||
def new_multi_bind():
|
||||
# type: () -> TaosMultiBind
|
||||
return TaosMultiBind()
|
||||
|
||||
|
||||
def new_multi_binds(size):
|
||||
# type: (int) -> Array[TaosMultiBind]
|
||||
return (TaosMultiBind * size)()
|
File diff suppressed because it is too large
Load Diff
|
@ -1,11 +1,15 @@
|
|||
from .cursor import TDengineCursor
|
||||
from .subscription import TDengineSubscription
|
||||
from .cinterface import CTaosInterface
|
||||
# encoding:UTF-8
|
||||
from types import FunctionType
|
||||
from .cinterface import *
|
||||
from .cursor import TaosCursor
|
||||
from .subscription import TaosSubscription
|
||||
from .statement import TaosStmt
|
||||
from .stream import TaosStream
|
||||
from .result import *
|
||||
|
||||
|
||||
class TDengineConnection(object):
|
||||
""" TDengine connection object
|
||||
"""
|
||||
class TaosConnection(object):
|
||||
"""TDengine connection object"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._conn = None
|
||||
|
@ -21,63 +25,130 @@ class TDengineConnection(object):
|
|||
|
||||
def config(self, **kwargs):
|
||||
# host
|
||||
if 'host' in kwargs:
|
||||
self._host = kwargs['host']
|
||||
if "host" in kwargs:
|
||||
self._host = kwargs["host"]
|
||||
|
||||
# user
|
||||
if 'user' in kwargs:
|
||||
self._user = kwargs['user']
|
||||
if "user" in kwargs:
|
||||
self._user = kwargs["user"]
|
||||
|
||||
# password
|
||||
if 'password' in kwargs:
|
||||
self._password = kwargs['password']
|
||||
if "password" in kwargs:
|
||||
self._password = kwargs["password"]
|
||||
|
||||
# database
|
||||
if 'database' in kwargs:
|
||||
self._database = kwargs['database']
|
||||
if "database" in kwargs:
|
||||
self._database = kwargs["database"]
|
||||
|
||||
# port
|
||||
if 'port' in kwargs:
|
||||
self._port = kwargs['port']
|
||||
if "port" in kwargs:
|
||||
self._port = kwargs["port"]
|
||||
|
||||
# config
|
||||
if 'config' in kwargs:
|
||||
self._config = kwargs['config']
|
||||
if "config" in kwargs:
|
||||
self._config = kwargs["config"]
|
||||
|
||||
self._chandle = CTaosInterface(self._config)
|
||||
self._conn = self._chandle.connect(
|
||||
self._host,
|
||||
self._user,
|
||||
self._password,
|
||||
self._database,
|
||||
self._port)
|
||||
self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port)
|
||||
|
||||
def close(self):
|
||||
"""Close current connection.
|
||||
"""
|
||||
return CTaosInterface.close(self._conn)
|
||||
"""Close current connection."""
|
||||
if self._conn:
|
||||
taos_close(self._conn)
|
||||
self._conn = None
|
||||
|
||||
def subscribe(self, restart, topic, sql, interval):
|
||||
"""Create a subscription.
|
||||
"""
|
||||
@property
|
||||
def client_info(self):
|
||||
# type: () -> str
|
||||
return taos_get_client_info()
|
||||
|
||||
@property
|
||||
def server_info(self):
|
||||
# type: () -> str
|
||||
return taos_get_server_info(self._conn)
|
||||
|
||||
def select_db(self, database):
|
||||
# type: (str) -> None
|
||||
taos_select_db(self._conn, database)
|
||||
|
||||
def execute(self, sql):
|
||||
# type: (str) -> None
|
||||
"""Simplely execute sql ignoring the results"""
|
||||
res = taos_query(self._conn, sql)
|
||||
taos_free_result(res)
|
||||
|
||||
def query(self, sql):
|
||||
# type: (str) -> TaosResult
|
||||
result = taos_query(self._conn, sql)
|
||||
return TaosResult(result, True, self)
|
||||
|
||||
def query_a(self, sql, callback, param):
|
||||
# type: (str, async_query_callback_type, c_void_p) -> None
|
||||
"""Asynchronously query a sql with callback function"""
|
||||
taos_query_a(self._conn, sql, callback, param)
|
||||
|
||||
def subscribe(self, restart, topic, sql, interval, callback=None, param=None):
|
||||
# type: (bool, str, str, int, subscribe_callback_type, c_void_p) -> TaosSubscription
|
||||
"""Create a subscription."""
|
||||
if self._conn is None:
|
||||
return None
|
||||
sub = CTaosInterface.subscribe(
|
||||
self._conn, restart, topic, sql, interval)
|
||||
return TDengineSubscription(sub)
|
||||
sub = taos_subscribe(self._conn, restart, topic, sql, interval, callback, param)
|
||||
return TaosSubscription(sub, callback != None)
|
||||
|
||||
def insertLines(self, lines):
|
||||
"""
|
||||
insert lines through line protocol
|
||||
"""
|
||||
def statement(self, sql=None):
|
||||
# type: (str | None) -> TaosStmt
|
||||
if self._conn is None:
|
||||
return None
|
||||
return CTaosInterface.insertLines(self._conn, lines)
|
||||
|
||||
stmt = taos_stmt_init(self._conn)
|
||||
if sql != None:
|
||||
taos_stmt_prepare(stmt, sql)
|
||||
|
||||
return TaosStmt(stmt)
|
||||
|
||||
def load_table_info(self, tables):
|
||||
# type: (str) -> None
|
||||
taos_load_table_info(self._conn, tables)
|
||||
|
||||
def stream(self, sql, callback, stime=0, param=None, callback2=None):
|
||||
# type: (str, Callable[[Any, TaosResult, TaosRows], None], int, Any, c_void_p) -> TaosStream
|
||||
# cb = cast(callback, stream_callback_type)
|
||||
# ref = byref(cb)
|
||||
|
||||
stream = taos_open_stream(self._conn, sql, callback, stime, param, callback2)
|
||||
return TaosStream(stream)
|
||||
|
||||
def insert_lines(self, lines):
|
||||
# type: (list[str]) -> None
|
||||
"""Line protocol and schemaless support
|
||||
|
||||
## Example
|
||||
|
||||
```python
|
||||
import taos
|
||||
conn = taos.connect()
|
||||
conn.exec("drop database if exists test")
|
||||
conn.select_db("test")
|
||||
lines = [
|
||||
'ste,t2=5,t3=L"ste" c1=true,c2=4,c3="string" 1626056811855516532',
|
||||
]
|
||||
conn.insert_lines(lines)
|
||||
```
|
||||
|
||||
## Exception
|
||||
|
||||
```python
|
||||
try:
|
||||
conn.insert_lines(lines)
|
||||
except SchemalessError as err:
|
||||
print(err)
|
||||
```
|
||||
"""
|
||||
return taos_insert_lines(self._conn, lines)
|
||||
|
||||
def cursor(self):
|
||||
"""Return a new Cursor object using the connection.
|
||||
"""
|
||||
return TDengineCursor(self)
|
||||
# type: () -> TaosCursor
|
||||
"""Return a new Cursor object using the connection."""
|
||||
return TaosCursor(self)
|
||||
|
||||
def commit(self):
|
||||
"""Commit any pending transaction to the database.
|
||||
|
@ -87,17 +158,18 @@ class TDengineConnection(object):
|
|||
pass
|
||||
|
||||
def rollback(self):
|
||||
"""Void functionality
|
||||
"""
|
||||
"""Void functionality"""
|
||||
pass
|
||||
|
||||
def clear_result_set(self):
|
||||
"""Clear unused result set on this connection.
|
||||
"""
|
||||
"""Clear unused result set on this connection."""
|
||||
pass
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
conn = TDengineConnection(host='192.168.1.107')
|
||||
conn = TaosConnection()
|
||||
conn.close()
|
||||
print("Hello world")
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
# encoding:UTF-8
|
||||
|
||||
"""Constants in TDengine python
|
||||
"""
|
||||
|
||||
from .dbapi import *
|
||||
|
||||
|
||||
class FieldType(object):
|
||||
"""TDengine Field Types
|
||||
"""
|
||||
"""TDengine Field Types"""
|
||||
|
||||
# type_code
|
||||
C_NULL = 0
|
||||
C_BOOL = 1
|
||||
|
@ -34,9 +33,9 @@ class FieldType(object):
|
|||
C_INT_UNSIGNED_NULL = 4294967295
|
||||
C_BIGINT_NULL = -9223372036854775808
|
||||
C_BIGINT_UNSIGNED_NULL = 18446744073709551615
|
||||
C_FLOAT_NULL = float('nan')
|
||||
C_DOUBLE_NULL = float('nan')
|
||||
C_BINARY_NULL = bytearray([int('0xff', 16)])
|
||||
C_FLOAT_NULL = float("nan")
|
||||
C_DOUBLE_NULL = float("nan")
|
||||
C_BINARY_NULL = bytearray([int("0xff", 16)])
|
||||
# Timestamp precision definition
|
||||
C_TIMESTAMP_MILLI = 0
|
||||
C_TIMESTAMP_MICRO = 1
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
from .cinterface import CTaosInterface
|
||||
# encoding:UTF-8
|
||||
from .cinterface import *
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
|
||||
# querySeqNum = 0
|
||||
from .result import *
|
||||
|
||||
|
||||
class TDengineCursor(object):
|
||||
class TaosCursor(object):
|
||||
"""Database cursor which is used to manage the context of a fetch operation.
|
||||
|
||||
Attributes:
|
||||
.description: Read-only attribute consists of 7-item sequences:
|
||||
|
||||
> name (mondatory)
|
||||
> type_code (mondatory)
|
||||
> name (mandatory)
|
||||
> type_code (mandatory)
|
||||
> display_size
|
||||
> internal_size
|
||||
> precision
|
||||
|
@ -55,8 +55,7 @@ class TDengineCursor(object):
|
|||
raise OperationalError("Invalid use of fetch iterator")
|
||||
|
||||
if self._block_rows <= self._block_iter:
|
||||
block, self._block_rows = CTaosInterface.fetchRow(
|
||||
self._result, self._fields)
|
||||
block, self._block_rows = taos_fetch_row(self._result, self._fields)
|
||||
if self._block_rows == 0:
|
||||
raise StopIteration
|
||||
self._block = list(map(tuple, zip(*block)))
|
||||
|
@ -69,20 +68,17 @@ class TDengineCursor(object):
|
|||
|
||||
@property
|
||||
def description(self):
|
||||
"""Return the description of the object.
|
||||
"""
|
||||
"""Return the description of the object."""
|
||||
return self._description
|
||||
|
||||
@property
|
||||
def rowcount(self):
|
||||
"""Return the rowcount of the object
|
||||
"""
|
||||
"""Return the rowcount of the object"""
|
||||
return self._rowcount
|
||||
|
||||
@property
|
||||
def affected_rows(self):
|
||||
"""Return the rowcount of insertion
|
||||
"""
|
||||
"""Return the rowcount of insertion"""
|
||||
return self._affected_rows
|
||||
|
||||
def callproc(self, procname, *args):
|
||||
|
@ -96,8 +92,7 @@ class TDengineCursor(object):
|
|||
self._logfile = logfile
|
||||
|
||||
def close(self):
|
||||
"""Close the cursor.
|
||||
"""
|
||||
"""Close the cursor."""
|
||||
if self._connection is None:
|
||||
return False
|
||||
|
||||
|
@ -107,8 +102,7 @@ class TDengineCursor(object):
|
|||
return True
|
||||
|
||||
def execute(self, operation, params=None):
|
||||
"""Prepare and execute a database operation (query or command).
|
||||
"""
|
||||
"""Prepare and execute a database operation (query or command)."""
|
||||
if not operation:
|
||||
return None
|
||||
|
||||
|
@ -124,104 +118,91 @@ class TDengineCursor(object):
|
|||
|
||||
# global querySeqNum
|
||||
# querySeqNum += 1
|
||||
# localSeqNum = querySeqNum # avoid raice condition
|
||||
# localSeqNum = querySeqNum # avoid race condition
|
||||
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
|
||||
self._result = CTaosInterface.query(self._connection._conn, stmt)
|
||||
self._result = taos_query(self._connection._conn, stmt)
|
||||
# print(" << Query ({}) Exec Done".format(localSeqNum))
|
||||
if (self._logfile):
|
||||
if self._logfile:
|
||||
with open(self._logfile, "a") as logfile:
|
||||
logfile.write("%s;\n" % operation)
|
||||
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
if errno == 0:
|
||||
if CTaosInterface.fieldsCount(self._result) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(
|
||||
self._result)
|
||||
return CTaosInterface.affectedRows(self._result)
|
||||
else:
|
||||
self._fields = CTaosInterface.useResult(
|
||||
self._result)
|
||||
return self._handle_result()
|
||||
if taos_field_count(self._result) == 0:
|
||||
affected_rows = taos_affected_rows(self._result)
|
||||
self._affected_rows += affected_rows
|
||||
return affected_rows
|
||||
else:
|
||||
raise ProgrammingError(
|
||||
CTaosInterface.errStr(
|
||||
self._result), errno)
|
||||
self._fields = taos_fetch_fields(self._result)
|
||||
return self._handle_result()
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
|
||||
"""
|
||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters."""
|
||||
pass
|
||||
|
||||
def fetchone(self):
|
||||
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
|
||||
"""
|
||||
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available."""
|
||||
pass
|
||||
|
||||
def fetchmany(self):
|
||||
pass
|
||||
|
||||
def istype(self, col, dataType):
|
||||
if (dataType.upper() == "BOOL"):
|
||||
if (self._description[col][1] == FieldType.C_BOOL):
|
||||
if dataType.upper() == "BOOL":
|
||||
if self._description[col][1] == FieldType.C_BOOL:
|
||||
return True
|
||||
if (dataType.upper() == "TINYINT"):
|
||||
if (self._description[col][1] == FieldType.C_TINYINT):
|
||||
if dataType.upper() == "TINYINT":
|
||||
if self._description[col][1] == FieldType.C_TINYINT:
|
||||
return True
|
||||
if (dataType.upper() == "TINYINT UNSIGNED"):
|
||||
if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
|
||||
if dataType.upper() == "TINYINT UNSIGNED":
|
||||
if self._description[col][1] == FieldType.C_TINYINT_UNSIGNED:
|
||||
return True
|
||||
if (dataType.upper() == "SMALLINT"):
|
||||
if (self._description[col][1] == FieldType.C_SMALLINT):
|
||||
if dataType.upper() == "SMALLINT":
|
||||
if self._description[col][1] == FieldType.C_SMALLINT:
|
||||
return True
|
||||
if (dataType.upper() == "SMALLINT UNSIGNED"):
|
||||
if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
|
||||
if dataType.upper() == "SMALLINT UNSIGNED":
|
||||
if self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED:
|
||||
return True
|
||||
if (dataType.upper() == "INT"):
|
||||
if (self._description[col][1] == FieldType.C_INT):
|
||||
if dataType.upper() == "INT":
|
||||
if self._description[col][1] == FieldType.C_INT:
|
||||
return True
|
||||
if (dataType.upper() == "INT UNSIGNED"):
|
||||
if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
|
||||
if dataType.upper() == "INT UNSIGNED":
|
||||
if self._description[col][1] == FieldType.C_INT_UNSIGNED:
|
||||
return True
|
||||
if (dataType.upper() == "BIGINT"):
|
||||
if (self._description[col][1] == FieldType.C_BIGINT):
|
||||
if dataType.upper() == "BIGINT":
|
||||
if self._description[col][1] == FieldType.C_BIGINT:
|
||||
return True
|
||||
if (dataType.upper() == "BIGINT UNSIGNED"):
|
||||
if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
|
||||
if dataType.upper() == "BIGINT UNSIGNED":
|
||||
if self._description[col][1] == FieldType.C_BIGINT_UNSIGNED:
|
||||
return True
|
||||
if (dataType.upper() == "FLOAT"):
|
||||
if (self._description[col][1] == FieldType.C_FLOAT):
|
||||
if dataType.upper() == "FLOAT":
|
||||
if self._description[col][1] == FieldType.C_FLOAT:
|
||||
return True
|
||||
if (dataType.upper() == "DOUBLE"):
|
||||
if (self._description[col][1] == FieldType.C_DOUBLE):
|
||||
if dataType.upper() == "DOUBLE":
|
||||
if self._description[col][1] == FieldType.C_DOUBLE:
|
||||
return True
|
||||
if (dataType.upper() == "BINARY"):
|
||||
if (self._description[col][1] == FieldType.C_BINARY):
|
||||
if dataType.upper() == "BINARY":
|
||||
if self._description[col][1] == FieldType.C_BINARY:
|
||||
return True
|
||||
if (dataType.upper() == "TIMESTAMP"):
|
||||
if (self._description[col][1] == FieldType.C_TIMESTAMP):
|
||||
if dataType.upper() == "TIMESTAMP":
|
||||
if self._description[col][1] == FieldType.C_TIMESTAMP:
|
||||
return True
|
||||
if (dataType.upper() == "NCHAR"):
|
||||
if (self._description[col][1] == FieldType.C_NCHAR):
|
||||
if dataType.upper() == "NCHAR":
|
||||
if self._description[col][1] == FieldType.C_NCHAR:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def fetchall_row(self):
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
||||
"""
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation."""
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
buffer = [[] for i in range(len(self._fields))]
|
||||
self._rowcount = 0
|
||||
while True:
|
||||
block, num_of_fields = CTaosInterface.fetchRow(
|
||||
self._result, self._fields)
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
block, num_of_fields = taos_fetch_row(self._result, self._fields)
|
||||
errno = taos_errno(self._result)
|
||||
if errno != 0:
|
||||
raise ProgrammingError(
|
||||
CTaosInterface.errStr(
|
||||
self._result), errno)
|
||||
raise ProgrammingError(taos_errstr(self._result), errno)
|
||||
if num_of_fields == 0:
|
||||
break
|
||||
self._rowcount += num_of_fields
|
||||
|
@ -230,19 +211,16 @@ class TDengineCursor(object):
|
|||
return list(map(tuple, zip(*buffer)))
|
||||
|
||||
def fetchall(self):
|
||||
if self._result is None or self._fields is None:
|
||||
if self._result is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
buffer = [[] for i in range(len(self._fields))]
|
||||
fields = self._fields if self._fields is not None else taos_fetch_fields(self._result)
|
||||
buffer = [[] for i in range(len(fields))]
|
||||
self._rowcount = 0
|
||||
while True:
|
||||
block, num_of_fields = CTaosInterface.fetchBlock(
|
||||
self._result, self._fields)
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
block, num_of_fields = taos_fetch_block(self._result, self._fields)
|
||||
errno = taos_errno(self._result)
|
||||
if errno != 0:
|
||||
raise ProgrammingError(
|
||||
CTaosInterface.errStr(
|
||||
self._result), errno)
|
||||
raise ProgrammingError(taos_errstr(self._result), errno)
|
||||
if num_of_fields == 0:
|
||||
break
|
||||
self._rowcount += num_of_fields
|
||||
|
@ -250,9 +228,12 @@ class TDengineCursor(object):
|
|||
buffer[i].extend(block[i])
|
||||
return list(map(tuple, zip(*buffer)))
|
||||
|
||||
def stop_query(self):
|
||||
if self._result != None:
|
||||
taos_stop_query(self._result)
|
||||
|
||||
def nextset(self):
|
||||
"""
|
||||
"""
|
||||
""" """
|
||||
pass
|
||||
|
||||
def setinputsize(self, sizes):
|
||||
|
@ -262,12 +243,11 @@ class TDengineCursor(object):
|
|||
pass
|
||||
|
||||
def _reset_result(self):
|
||||
"""Reset the result to unused version.
|
||||
"""
|
||||
"""Reset the result to unused version."""
|
||||
self._description = []
|
||||
self._rowcount = -1
|
||||
if self._result is not None:
|
||||
CTaosInterface.freeResult(self._result)
|
||||
taos_free_result(self._result)
|
||||
self._result = None
|
||||
self._fields = None
|
||||
self._block = None
|
||||
|
@ -276,11 +256,12 @@ class TDengineCursor(object):
|
|||
self._affected_rows = 0
|
||||
|
||||
def _handle_result(self):
|
||||
"""Handle the return result from query.
|
||||
"""
|
||||
"""Handle the return result from query."""
|
||||
self._description = []
|
||||
for ele in self._fields:
|
||||
self._description.append(
|
||||
(ele['name'], ele['type'], None, None, None, None, False))
|
||||
self._description.append((ele["name"], ele["type"], None, None, None, None, False))
|
||||
|
||||
return self._result
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
"""Type Objects and Constructors.
|
||||
"""
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
|
||||
class DBAPITypeObject(object):
|
||||
def __init__(self, *values):
|
||||
self.values = values
|
||||
|
||||
def __com__(self, other):
|
||||
if other in self.values:
|
||||
return 0
|
||||
if other < self.values:
|
||||
return 1
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
Date = datetime.date
|
||||
Time = datetime.time
|
||||
Timestamp = datetime.datetime
|
||||
|
||||
|
||||
def DataFromTicks(ticks):
|
||||
return Date(*time.localtime(ticks)[:3])
|
||||
|
||||
|
||||
def TimeFromTicks(ticks):
|
||||
return Time(*time.localtime(ticks)[3:6])
|
||||
|
||||
|
||||
def TimestampFromTicks(ticks):
|
||||
return Timestamp(*time.localtime(ticks)[:6])
|
||||
|
||||
|
||||
Binary = bytes
|
||||
|
||||
# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
|
||||
# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
|
||||
# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
|
||||
# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
|
||||
# ROWID = DBAPITypeObject()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue