From ae1b233db3ce7463c4cbf89e75273f370c964cb4 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 27 Apr 2021 09:30:40 +0800 Subject: [PATCH 001/140] [TD-2039] verify the bug of TD-2039 --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/computeErrorinWhere.py | 136 +++++++++++++++++++++ tests/pytest/stream/cqSupportBefore1970.py | 2 +- 3 files changed, 138 insertions(+), 1 deletion(-) create mode 100644 tests/pytest/query/computeErrorinWhere.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 3528b01dbd..3a6e0d5ba2 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -225,6 +225,7 @@ python3 ./test.py -f query/queryStddevWithGroupby.py python3 ./test.py -f query/querySecondtscolumnTowherenow.py python3 ./test.py -f query/queryFilterTswithDateUnit.py python3 ./test.py -f query/queryTscomputWithNow.py +python3 ./test.py -f query/computeErrorinWhere.py diff --git a/tests/pytest/query/computeErrorinWhere.py b/tests/pytest/query/computeErrorinWhere.py new file mode 100644 index 0000000000..4ceb1ab89f --- /dev/null +++ b/tests/pytest/query/computeErrorinWhere.py @@ -0,0 +1,136 @@ +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def insertnow(self): + tdSql.execute("drop database if exists dbcom") + tdSql.execute("create database if not exists dbcom keep 36500") + tdSql.execute("use dbcom") + + tdSql.execute( + "create table stbcom (ts timestamp, c1 int, c2 tinyint, c3 smallint, c4 bigint, c5 float, c6 double) TAGS(t1 int)" + ) + tdSql.execute("create table tcom1 using stbcom tags(1)") + + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + + tdSql.execute("insert into tcom1 values (now-1d, 1, 11, 21, 31, 41.0, 51.1)") + tdSql.execute("insert into tcom1 values (now-2d, 2, 12, 22, 32, 42.0, 52.1)") + tdSql.execute("insert into tcom1 values (now-3d, 3, 13, 23, 33, 43.0, 53.1)") + tdSql.execute("insert into tcom1 values (now-4d, 4, 14, 24, 34, 44.0, 54.1)") + + def querycom(self): + tdSql.query("select * from tcom1 where c1=2-1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c1=-1+2") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c1=1.0*1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c1=1.0/1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c1>1.0/1.0") + tdSql.checkRows(3) + tdSql.query("select * from tcom1 where c1<1.0/1.0") + tdSql.checkRows(0) + + tdSql.query("select * from tcom1 where c2=12-1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c2=-1+12") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c2=11.0*1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c2=11.0/1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c2>11.0/1.0") + tdSql.checkRows(3) + tdSql.query("select * from tcom1 where c2<11.0/1.0") + tdSql.checkRows(0) + + tdSql.query("select * from tcom1 where c3=22-1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c3=-1+22") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c3=21.0*1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c3=21.0/1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c3>21.0/1.0") + tdSql.checkRows(3) + tdSql.query("select * from tcom1 where c3<21.0/1.0") + tdSql.checkRows(0) + + tdSql.query("select * from tcom1 where c4=32-1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c4=-1+32") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c4=31.0*1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c4=31.0/1.0") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c4>31.0/1.0") + tdSql.checkRows(3) + tdSql.query("select * from tcom1 where c4<31.0/1.0") + tdSql.checkRows(0) + + tdSql.query("select * from tcom1 where c5=42-1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c5=-1+42") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c5=41*1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c5=41/1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c5>41/1") + tdSql.checkRows(3) + tdSql.query("select * from tcom1 where c5<41/1") + tdSql.checkRows(0) + tdSql.query("select * from tcom1 where c5=42.000000008-1.0000000099999999999999") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c5=42.0008-1.0000099999999999999") + tdSql.checkRows(0) + + tdSql.query("select * from tcom1 where c6=52-0.9") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c6=-0.9+52") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c6=51.1*1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c6=51.1/1") + tdSql.checkRows(1) + tdSql.query("select * from tcom1 where c6>51.1/1") + tdSql.checkRows(3) + tdSql.query("select * from tcom1 where c6<51.1/1") + tdSql.checkRows(0) + tdSql.query("select * from tcom1 where c6=52.100000000000008-1.000000000000009") + tdSql.checkRows(1) + + + def run(self): + self.insertnow() + self.querycom() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/cqSupportBefore1970.py b/tests/pytest/stream/cqSupportBefore1970.py index 75587d1743..01ba5234fc 100644 --- a/tests/pytest/stream/cqSupportBefore1970.py +++ b/tests/pytest/stream/cqSupportBefore1970.py @@ -75,7 +75,7 @@ class TDTestCase: self.insertnow() self.cq() self.querycq() - + # after wal and sync, check again tdSql.query("show dnodes") index = tdSql.getData(0, 0) From 6a0d83e8983938c0028dcbc8f20813fb4ac2a7ea Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 27 Apr 2021 17:44:59 +0800 Subject: [PATCH 002/140] [TD-3988]add mac test on Appveyor --- .appveyor.yml | 47 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index fe4816688b..00d585ea9b 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,30 +1,49 @@ version: 1.0.{build} -os: Visual Studio 2015 +image: + - Visual Studio 2015 + - macos-mojave environment: matrix: - ARCH: amd64 - ARCH: x86 +matrix: + exclude: + - image: macos-mojave + ARCH: x86 +for: + - + matrix: + only: + - image: Visual Studio 2015 + clone_folder: c:\dev\TDengine + clone_depth: 1 -clone_folder: c:\dev\TDengine -clone_depth: 1 + init: + - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH% -init: - - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH% + before_build: + - cd c:\dev\TDengine + - md build -before_build: - - cd c:\dev\TDengine - - md build - -build_script: - - cd build - - cmake -G "NMake Makefiles" .. - - nmake install + build_script: + - cd build + - cmake -G "NMake Makefiles" .. + - nmake install + - + matrix: + only: + - image: macos-mojave + clone_depth: 1 + build_script: + - mkdir debug + - cd debug + - cmake .. > /dev/null + - make > /dev/null notifications: - provider: Email to: - sangshuduo@gmail.com - on_build_success: true on_build_failure: true on_build_status_changed: true From 5caee8ad177d016141d0ad0b2c97c269549c8dd3 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 27 Apr 2021 18:20:48 +0800 Subject: [PATCH 003/140] test for macos --- .appveyor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.appveyor.yml b/.appveyor.yml index 00d585ea9b..40210a6cdc 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -32,7 +32,7 @@ for: - matrix: only: - - image: macos-mojave + - image: macos clone_depth: 1 build_script: From 25f5281687d8dadf8ae64d0f17db4ce3068528c2 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 27 Apr 2021 18:23:06 +0800 Subject: [PATCH 004/140] [TD-3987]add drone CI --- .drone.yml | 188 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 .drone.yml diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000000..bafc8ef8b0 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,188 @@ +--- +kind: pipeline +name: test_amd64 + +platform: + os: linux + arch: amd64 + +steps: +- name: build + image: gcc + commands: + - apt-get update + - apt-get install -y cmake build-essential git + - mkdir debug + - cd debug + - cmake .. + - make + when: + branch: + - develop + - master + +- name: smoke_test + image: python:3.8 + commands: + - pip3 install psutil + - pip3 install guppy3 + - pip3 install src/connector/python/linux/python3/ + - cd tests + - ./test-all.sh smoke + when: + branch: + - develop + - master + + +- name: crash_gen + image: python:3.8 + commands: + - pip3 install requests + - pip3 install src/connector/python/linux/python3/ + - pip3 install psutil + - pip3 install guppy3 + - cd tests/pytest + - ./crash_gen.sh -a -p -t 4 -s 200 + when: + branch: + - develop + - master + + +--- +kind: pipeline +name: test_arm64 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: gcc + commands: + - apt-get update + - apt-get install -y cmake build-essential + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + when: + branch: + - develop + - master +--- +kind: pipeline +name: test_arm + +platform: + os: linux + arch: arm + +steps: +- name: build + image: gcc + commands: + - apt-get update + - apt-get install -y cmake build-essential + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch32 > /dev/null + - make + when: + branch: + - develop + - master + +--- +kind: pipeline +name: build_trusty + +platform: + os: linux + arch: amd64 + +steps: +- name: build + image: ubuntu:trusty + commands: + - apt-get update + - apt-get install -y gcc cmake3 build-essential git binutils-2.26 + + - mkdir debug + - cd debug + - cmake .. + - make + when: + branch: + - develop + - master + +--- +kind: pipeline +name: build_xenial + +platform: + os: linux + arch: amd64 + +steps: +- name: build + image: ubuntu:xenial + commands: + - apt-get update + - apt-get install -y gcc cmake build-essential + - mkdir debug + - cd debug + - cmake .. + - make + when: + branch: + - develop + - master + +--- +kind: pipeline +name: build_bionic +platform: + os: linux + arch: amd64 + +steps: +- name: build + image: ubuntu:bionic + commands: + - apt-get update + - apt-get install -y gcc cmake build-essential + - mkdir debug + - cd debug + - cmake .. + - make + when: + branch: + - develop + - master + +--- +kind: pipeline +name: goodbye + +platform: + os: linux + arch: amd64 + +steps: +- name: 64-bit + image: alpine + commands: + - echo 64-bit is good + when: + branch: + - develop + - master + + +depends_on: +- test_arm64 +- test_amd64 \ No newline at end of file From 4d21403d3cbc3b7e85ead8c010994f506b95760d Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 27 Apr 2021 18:52:24 +0800 Subject: [PATCH 005/140] modify for mac --- .appveyor.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 40210a6cdc..ee1dc91767 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,14 +1,14 @@ version: 1.0.{build} image: - Visual Studio 2015 - - macos-mojave + - macos environment: matrix: - ARCH: amd64 - ARCH: x86 matrix: exclude: - - image: macos-mojave + - image: macos ARCH: x86 for: - From 2677b8867d31b9de560583a1029888da0c402358 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 27 Apr 2021 19:11:09 +0800 Subject: [PATCH 006/140] test for drone --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index bafc8ef8b0..2bf88a9022 100644 --- a/.drone.yml +++ b/.drone.yml @@ -176,7 +176,7 @@ steps: - name: 64-bit image: alpine commands: - - echo 64-bit is good + - echo 64-bit is good. when: branch: - develop From 240e28ae66f10c6a2ed6cd24f26554f179b704a1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 27 Apr 2021 20:00:03 +0800 Subject: [PATCH 007/140] [td-225]refactor. --- src/client/inc/tscUtil.h | 2 +- src/client/src/tscParseInsert.c | 10 +++++----- src/client/src/tscServer.c | 13 +++++++++---- src/client/src/tscUtil.c | 12 ++++++------ 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0eda49b1f4..e53a8a7abd 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -297,7 +297,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild); uint32_t tscGetTableMetaSize(STableMeta* pTableMeta); CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta); uint32_t tscGetTableMetaMaxSize(); -int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name); +int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf); STableMeta* tscTableMetaDup(STableMeta* pTableMeta); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 920937928f..6c53074426 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1317,15 +1317,15 @@ int tsParseSql(SSqlObj *pSql, bool initial) { } // make a backup as tsParseInsertSql may modify the string - char* sqlstr = strdup(pSql->sqlstr); +// char* sqlstr = strdup(pSql->sqlstr); ret = tsParseInsertSql(pSql); - if ((sqlstr == NULL) || (pSql->parseRetry >= 1) || + if (/*(sqlstr == NULL) || */(pSql->parseRetry >= 1) || (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) { - free(sqlstr); +// free(sqlstr); } else { tscResetSqlCmd(pCmd, true); - free(pSql->sqlstr); - pSql->sqlstr = sqlstr; +// free(pSql->sqlstr); +// pSql->sqlstr = sqlstr; pSql->parseRetry++; if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) { ret = tsParseInsertSql(pSql); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8889e25177..021aba171b 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2529,10 +2529,12 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { assert(tIsValidName(&pTableMetaInfo->name)); - tfree(pTableMetaInfo->pTableMeta); - uint32_t size = tscGetTableMetaMaxSize(); - pTableMetaInfo->pTableMeta = calloc(1, size); + if (pTableMetaInfo->pTableMeta == NULL) { + pTableMetaInfo->pTableMeta = calloc(1, size); + } else { + memset(pTableMetaInfo->pTableMeta, 0, size); + } pTableMetaInfo->pTableMeta->tableType = -1; pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1; @@ -2544,10 +2546,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1); // TODO resize the tableMeta + char buf[80*1024] = {0}; + assert(size < 80*1024); + STableMeta* pMeta = pTableMetaInfo->pTableMeta; if (pMeta->id.uid > 0) { if (pMeta->tableType == TSDB_CHILD_TABLE) { - int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name); + int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf); if (code != TSDB_CODE_SUCCESS) { return getTableMetaFromMnode(pSql, pTableMetaInfo); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4e8bfdf064..c9c6c130d6 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2915,11 +2915,11 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) { return cMeta; } -int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) { - assert(pChild != NULL); +int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) { + assert(pChild != NULL && buf != NULL); - uint32_t size = tscGetTableMetaMaxSize(); - STableMeta* p = calloc(1, size); +// uint32_t size = tscGetTableMetaMaxSize(); + STableMeta* p = buf;//calloc(1, size); taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1); if (p->id.uid > 0) { // tableMeta exists, build child table meta and return @@ -2931,12 +2931,12 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) { memcpy(pChild->schema, p->schema, sizeof(SSchema) *total); - tfree(p); +// tfree(p); return TSDB_CODE_SUCCESS; } else { // super table has been removed, current tableMeta is also expired. remove it here taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - tfree(p); +// tfree(p); return -1; } } From 1b0e7c70fc635bc358bd36923654a4367b167fc0 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 27 Apr 2021 20:06:33 +0800 Subject: [PATCH 008/140] [td-225] refactor. --- src/client/src/tscServer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 021aba171b..680a39c7cf 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2533,7 +2533,8 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { if (pTableMetaInfo->pTableMeta == NULL) { pTableMetaInfo->pTableMeta = calloc(1, size); } else { - memset(pTableMetaInfo->pTableMeta, 0, size); + uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); + memset(pTableMetaInfo->pTableMeta, 0, s); } pTableMetaInfo->pTableMeta->tableType = -1; From aa2e6abbe70e8f824519170feb35ff03173016d2 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Apr 2021 09:34:03 +0800 Subject: [PATCH 009/140] modify case for crash_gen --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 2bf88a9022..4009b563b4 100644 --- a/.drone.yml +++ b/.drone.yml @@ -43,7 +43,7 @@ steps: - pip3 install psutil - pip3 install guppy3 - cd tests/pytest - - ./crash_gen.sh -a -p -t 4 -s 200 + - ./crash_gen.sh -a -p -t 4 -s 2000 when: branch: - develop From 8a59bbfcf292359b4be9ffcc9b02925b76bb1d60 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Apr 2021 09:48:34 +0800 Subject: [PATCH 010/140] add drone badges --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 45a955f458..cc413c400f 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ [![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine) +[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg)](https://cloud.drone.io/taosdata/TDengine) [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) From 46fcd89aa3d02df78a00c1516c2fe40c23e285b7 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Apr 2021 09:59:33 +0800 Subject: [PATCH 011/140] update badges --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index cc413c400f..78f902babe 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine) -[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg)](https://cloud.drone.io/taosdata/TDengine) +[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine) [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) From b6e41d48e054ae7fe3875109616c4287ad153528 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 28 Apr 2021 10:44:10 +0800 Subject: [PATCH 012/140] Hotfix/sangshuduo/td 3985 taosdemo timestamp step overflow (#5951) * [TD-3985]: taosdemo timestamp step overflow. * fix specified subscribe test. * replace potential overflow of int32 to int64 * replace potential int32 overflow variables to int64. * replace potential int32 overflow variables to int64. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 390 +++++++++++++++++++----------------- 1 file changed, 205 insertions(+), 185 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 8ab4918779..e6d7fb6a09 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -210,13 +210,13 @@ typedef struct SArguments_S { int len_of_binary; int num_of_CPR; int num_of_threads; - int insert_interval; - int query_times; - int interlace_rows; - int num_of_RPR; - int max_sql_len; - int num_of_tables; - int num_of_DPT; + int64_t insert_interval; + int64_t query_times; + int64_t interlace_rows; + int64_t num_of_RPR; // num_of_records_per_req + int64_t max_sql_len; + int64_t num_of_tables; + int64_t num_of_DPT; int abort; int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision @@ -235,23 +235,23 @@ typedef struct SColumn_S { typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; - int childTblCount; + int64_t childTblCount; bool childTblExists; // 0: no, 1: yes - int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful - int childTblLimit; - int childTblOffset; + int64_t childTblLimit; + int64_t childTblOffset; - int multiThreadWriteOneTbl; // 0: no, 1: yes - int interlaceRows; // +// int multiThreadWriteOneTbl; // 0: no, 1: yes + int64_t interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int maxSqlLen; // + int64_t maxSqlLen; // - int insertInterval; // insert interval, will override global insert interval + int64_t insertInterval; // insert interval, will override global insert interval int64_t insertRows; int64_t timeStampStep; char startTimestamp[MAX_TB_NAME_SIZE]; @@ -266,8 +266,8 @@ typedef struct SSuperTable_S { char* childTblName; char* colsOfCreateChildTable; - int lenOfOneRow; - int lenOfTagOfOneRow; + int64_t lenOfOneRow; + int64_t lenOfTagOfOneRow; char* sampleDataBuf; //int sampleRowCount; @@ -279,8 +279,8 @@ typedef struct SSuperTable_S { int tagUsePos; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + int64_t totalInsertRows; + int64_t totalAffectedRows; } SSuperTable; typedef struct { @@ -327,7 +327,7 @@ typedef struct SDataBase_S { char dbName[MAX_DB_NAME_SIZE]; bool drop; // 0: use exists, 1: if exists, drop then new create SDbCfg dbCfg; - int superTblCount; + int64_t superTblCount; SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; } SDataBase; @@ -349,44 +349,44 @@ typedef struct SDbs_S { SDataBase db[MAX_DB_COUNT]; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + int64_t totalInsertRows; + int64_t totalAffectedRows; } SDbs; typedef struct SpecifiedQueryInfo_S { - int queryInterval; // 0: unlimit > 0 loop/s - int concurrent; - int sqlCount; + int64_t queryInterval; // 0: unlimit > 0 loop/s + int64_t concurrent; + int64_t sqlCount; int mode; // 0: sync, 1: async - int subscribeInterval; // ms - int queryTimes; + int64_t subscribeInterval; // ms + int64_t queryTimes; int subscribeRestart; int subscribeKeepProgress; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - int totalQueried; + int64_t totalQueried; } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; - int queryInterval; // 0: unlimit > 0 loop/s + int64_t queryInterval; // 0: unlimit > 0 loop/s int threadCnt; int mode; // 0: sync, 1: async - int subscribeInterval; // ms + int64_t subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; - int queryTimes; - int childTblCount; + int64_t queryTimes; + int64_t childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; - int sqlCount; + int64_t sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; - int totalQueried; + int64_t totalQueried; } SuperQueryInfo; typedef struct SQueryMetaInfo_S { @@ -400,7 +400,7 @@ typedef struct SQueryMetaInfo_S { SpecifiedQueryInfo specifiedQueryInfo; SuperQueryInfo superQueryInfo; - int totalQueried; + int64_t totalQueried; } SQueryMetaInfo; typedef struct SThreadInfo_S { @@ -410,11 +410,11 @@ typedef struct SThreadInfo_S { uint32_t time_precision; char fp[4096]; char tb_prefix[MAX_TB_NAME_SIZE]; - int start_table_from; - int end_table_to; - int ntables; - int data_of_rate; - uint64_t start_time; + int64_t start_table_from; + int64_t end_table_to; + int64_t ntables; + int64_t data_of_rate; + int64_t start_time; char* cols; bool use_metric; SSuperTable* superTblInfo; @@ -427,7 +427,7 @@ typedef struct SThreadInfo_S { int64_t lastTs; // sample data - int samplePos; + int64_t samplePos; // statistics int64_t totalInsertRows; int64_t totalAffectedRows; @@ -440,7 +440,7 @@ typedef struct SThreadInfo_S { int64_t minDelay; // query - int querySeq; // sequence number of sql command + int64_t querySeq; // sequence number of sql command } threadInfo; #ifdef WINDOWS @@ -1001,13 +1001,18 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { break; printf("\n"); } - printf("# Insertion interval: %d\n", arguments->insert_interval); - printf("# Number of records per req: %d\n", arguments->num_of_RPR); - printf("# Max SQL length: %d\n", arguments->max_sql_len); + printf("# Insertion interval: %"PRId64"\n", + arguments->insert_interval); + printf("# Number of records per req: %"PRId64"\n", + arguments->num_of_RPR); + printf("# Max SQL length: %"PRId64"\n", + arguments->max_sql_len); printf("# Length of Binary: %d\n", arguments->len_of_binary); printf("# Number of Threads: %d\n", arguments->num_of_threads); - printf("# Number of Tables: %d\n", arguments->num_of_tables); - printf("# Number of Data per Table: %d\n", arguments->num_of_DPT); + printf("# Number of Tables: %"PRId64"\n", + arguments->num_of_tables); + printf("# Number of Data per Table: %"PRId64"\n", + arguments->num_of_DPT); printf("# Database name: %s\n", arguments->database); printf("# Table prefix: %s\n", arguments->tb_prefix); if (arguments->disorderRatio) { @@ -1244,9 +1249,12 @@ static int printfInsertMeta() { printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - printf("top insert interval: \033[33m%d\033[0m\n", g_args.insert_interval); - printf("number of records per req: \033[33m%d\033[0m\n", g_args.num_of_RPR); - printf("max sql length: \033[33m%d\033[0m\n", g_args.max_sql_len); + printf("top insert interval: \033[33m%"PRId64"\033[0m\n", + g_args.insert_interval); + printf("number of records per req: \033[33m%"PRId64"\033[0m\n", + g_args.num_of_RPR); + printf("max sql length: \033[33m%"PRId64"\033[0m\n", + g_args.max_sql_len); printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); @@ -1307,10 +1315,10 @@ static int printfInsertMeta() { } } - printf(" super table count: \033[33m%d\033[0m\n", + printf(" super table count: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%d\033[0m]:\n", j); + for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j); printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); @@ -1331,7 +1339,7 @@ static int printfInsertMeta() { printf(" childTblExists: \033[33m%s\033[0m\n", "error"); } - printf(" childTblCount: \033[33m%d\033[0m\n", + printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); @@ -1340,26 +1348,27 @@ static int printfInsertMeta() { printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%d\033[0m\n", + printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblLimit); } if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) { - printf(" childTblOffset: \033[33m%d\033[0m\n", + printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblOffset); } printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); - +/* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); }else { printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); } - printf(" interlaceRows: \033[33m%d\033[0m\n", + */ + printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%d\033[0m\n", + printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertInterval); } @@ -1367,7 +1376,7 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].disorderRange); printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%d\033[0m\n", + printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); @@ -1433,8 +1442,8 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - fprintf(fp, "number of records per req: %d\n", g_args.num_of_RPR); - fprintf(fp, "max sql length: %d\n", g_args.max_sql_len); + fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR); + fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -1491,7 +1500,7 @@ static void printfInsertMetaToFile(FILE* fp) { } } - fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); + fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { fprintf(fp, " super table[%d]:\n", j); @@ -1513,7 +1522,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " childTblExists: %s\n", "error"); } - fprintf(fp, " childTblCount: %d\n", + fprintf(fp, " childTblCount: %"PRId64"\n", g_Dbs.db[i].superTbls[j].childTblCount); fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); @@ -1523,26 +1532,30 @@ static void printfInsertMetaToFile(FILE* fp) { g_Dbs.db[i].superTbls[j].insertMode); fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %d\n", + fprintf(fp, " interlace rows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %d\n", + fprintf(fp, " stable insert interval: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertInterval); } - +/* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { fprintf(fp, " multiThreadWriteOneTbl: no\n"); }else { fprintf(fp, " multiThreadWriteOneTbl: yes\n"); } - fprintf(fp, " interlaceRows: %d\n", + */ + fprintf(fp, " interlaceRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + fprintf(fp, " maxSqlLen: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].maxSqlLen); - fprintf(fp, " timeStampStep: %"PRId64"\n", g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " timeStampStep: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", + g_Dbs.db[i].superTbls[j].startTimestamp); fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); @@ -1597,21 +1610,21 @@ static void printfQueryMeta() { printf("\n"); printf("specified table query info: \n"); - printf("query interval: \033[33m%d ms\033[0m\n", + printf("query interval: \033[33m%"PRId64" ms\033[0m\n", g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", + printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", + printf("sqlCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); - printf(" \033[33m%d\033[0m\n", + printf(" \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.mode); - printf("interval: \033[33m%d\033[0m\n", + printf("interval: \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); @@ -1619,27 +1632,27 @@ static void printfQueryMeta() { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", + for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); printf("super table query info:\n"); - printf("query interval: \033[33m%d\033[0m\n", + printf("query interval: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.queryInterval); printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", + printf("childTblCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%d\033[0m\n", + printf("stb query times:\033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.mode); - printf("interval: \033[33m%d\033[0m\n", + printf("interval: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); @@ -1647,7 +1660,7 @@ static void printfQueryMeta() { g_queryInfo.superQueryInfo.subscribeKeepProgress); } - printf("sqlCount: \033[33m%d\033[0m\n", + printf("sqlCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { printf(" sql[%d]: \033[33m%s\033[0m\n", @@ -2278,7 +2291,7 @@ static int calcRowLen(SSuperTable* superTbls) { static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int* childTblCountOfSuperTbl, int limit, int offset) { + int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) { char command[BUFFER_SIZE] = "\0"; char limitBuf[100] = "\0"; @@ -2289,7 +2302,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* childTblName = *childTblNameOfSuperTbl; if (offset >= 0) { - snprintf(limitBuf, 100, " limit %d offset %d", limit, offset); + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"", + limit, offset); } //get all child table name use cmd: select tbname from superTblName; @@ -2354,7 +2368,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int* childTblCountOfSuperTbl) { + int64_t* childTblCountOfSuperTbl) { return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, @@ -2694,7 +2708,7 @@ static int createDatabasesAndStables() { printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); } - debugPrint("%s() %d supertbl count:%d\n", + debugPrint("%s() LN%d supertbl count:%"PRId64"\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); int validStbCount = 0; @@ -2753,14 +2767,15 @@ static void* createTable(void *sarg) int len = 0; int batchNum = 0; - verbosePrint("%s() LN%d: Creating table from %d to %d\n", + verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n", __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + for (int64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(buffer, buff_len, - "create table if not exists %s.%s%d %s;", + "create table if not exists %s.%s%"PRId64" %s;", pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); @@ -2791,7 +2806,7 @@ static void* createTable(void *sarg) } len += snprintf(buffer + len, buff_len - len, - "if not exists %s.%s%d using %s.%s tags %s ", + "if not exists %s.%s%"PRId64" using %s.%s tags %s ", pThreadInfo->db_name, superTblInfo->childTblPrefix, i, pThreadInfo->db_name, superTblInfo->sTblName, tagsValBuf); @@ -2815,7 +2830,7 @@ static void* createTable(void *sarg) int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %d - %d tables\n", + printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n", pThreadInfo->threadID, pThreadInfo->start_table_from, i); lastPrintTime = currentPrintTime; } @@ -2833,7 +2848,7 @@ static void* createTable(void *sarg) } static int startMultiThreadCreateChildTable( - char* cols, int threads, int startFrom, int ntables, + char* cols, int threads, int64_t startFrom, int64_t ntables, char* db_name, SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); @@ -2848,16 +2863,16 @@ static int startMultiThreadCreateChildTable( threads = 1; } - int a = ntables / threads; + int64_t a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - int b = 0; + int64_t b = 0; b = ntables % threads; - for (int i = 0; i < threads; i++) { + for (int64_t i = 0; i < threads; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); @@ -2949,7 +2964,7 @@ static void createChildTables() { snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", + verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); startMultiThreadCreateChildTable( @@ -3077,7 +3092,7 @@ static int readSampleFromCsvFileToMem( } if (readLen > superTblInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", + printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow); continue; } @@ -3344,9 +3359,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n", + printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n", g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3375,7 +3390,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = INT32_MAX; + g_args.num_of_RPR = INT64_MAX; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); @@ -3847,9 +3862,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n", + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n", i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3905,7 +3920,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (insertInterval && insertInterval->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %d.\n", + verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { @@ -4045,7 +4060,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); goto PARSE_OVER; @@ -4410,8 +4425,9 @@ static void postFreeResource() { } } -static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int* sampleUsePos) { +static int getRowDataFromSample( + char* dataBuf, int64_t maxLen, int64_t timestamp, + SSuperTable* superTblInfo, int64_t* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { /* int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { @@ -4436,10 +4452,10 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, return dataLen; } -static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; +static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { + int64_t dataLen = 0; char *pstr = recBuf; - int maxLen = MAX_DATA_SIZE; + int64_t maxLen = MAX_DATA_SIZE; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); @@ -4506,7 +4522,7 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo return strlen(recBuf); } -static int32_t generateData(char *recBuf, char **data_type, +static int64_t generateData(char *recBuf, char **data_type, int num_of_cols, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; @@ -4572,7 +4588,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n", __func__, __LINE__, superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); @@ -4593,7 +4609,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { return 0; } -static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) +static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k) { int affectedRows; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4619,7 +4635,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) return affectedRows; } -static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) +static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; if (superTblInfo) { @@ -4630,7 +4646,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { - verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n", + verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); @@ -4638,16 +4654,16 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%d", + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"", g_args.tb_prefix, tableSeq); } } -static int generateDataTail( +static int64_t generateDataTail( SSuperTable* superTblInfo, - int batch, char* buffer, int remainderBufLen, int64_t insertRows, - int64_t startFrom, int64_t startTime, int *pSamplePos, int *dataLen) { - int len = 0; + int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, + int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) { + int64_t len = 0; int ncols_per_record = 1; // count first col ts char *pstr = buffer; @@ -4660,14 +4676,14 @@ static int generateDataTail( } } - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); + verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch); - int k = 0; + int64_t k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); - int retLen = 0; + int64_t retLen = 0; if (superTblInfo) { if (0 == strncasecmp(superTblInfo->dataSource, @@ -4681,16 +4697,16 @@ static int generateDataTail( } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - int randTail = superTblInfo->timeStampStep * k; + int64_t randTail = superTblInfo->timeStampStep * k; if (superTblInfo->disorderRatio > 0) { int rand_num = taosRandom() % 100; if(rand_num < superTblInfo->disorderRatio) { randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %d\n", randTail); + debugPrint("rand data generated, back %"PRId64"\n", randTail); } } - uint64_t d = startTime + int64_t d = startTime + randTail; retLen = generateRowData( data, @@ -4710,14 +4726,15 @@ static int generateDataTail( char **data_type = g_args.datatype; int lenOfBinary = g_args.len_of_binary; - int rand_num = taosRandom() % 100; - int randTail; + int64_t randTail = DEFAULT_TIMESTAMP_STEP * k; - if ((g_args.disorderRatio != 0) - && (rand_num < g_args.disorderRatio)) { - randTail = (DEFAULT_TIMESTAMP_STEP * k - + (taosRandom() % g_args.disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %d\n", randTail); + if (g_args.disorderRatio != 0) { + int rand_num = taosRandom() % 100; + if (rand_num < g_args.disorderRatio) { + randTail = (randTail + (taosRandom() % g_args.disorderRange + 1)) * (-1); + + debugPrint("rand data generated, back %"PRId64"\n", randTail); + } } else { randTail = DEFAULT_TIMESTAMP_STEP * k; } @@ -4736,7 +4753,7 @@ static int generateDataTail( remainderBufLen -= retLen; } - verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n", + verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); startFrom ++; @@ -4817,13 +4834,13 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, return len; } -static int generateInterlaceDataBuffer( - char *tableName, int batchPerTbl, int i, int batchPerTblTimes, - int32_t tableSeq, +static int64_t generateInterlaceDataBuffer( + char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes, + int64_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, int64_t startTime, - int *pRemainderBufLen) + int64_t *pRemainderBufLen) { assert(buffer); char *pstr = buffer; @@ -4836,15 +4853,15 @@ static int generateInterlaceDataBuffer( return 0; } // generate data buffer - verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", + verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n", pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; *pRemainderBufLen -= headLen; - int dataLen = 0; + int64_t dataLen = 0; - verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", + verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); @@ -4856,7 +4873,7 @@ static int generateInterlaceDataBuffer( startTime = 1500000000000; } - int k = generateDataTail( + int64_t k = generateDataTail( superTblInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, @@ -4866,7 +4883,7 @@ static int generateInterlaceDataBuffer( pstr += dataLen; *pRemainderBufLen -= dataLen; } else { - debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %d\n", + debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n", __func__, __LINE__, k, batchPerTbl); pstr -= headLen; pstr[0] = '\0'; @@ -4878,11 +4895,11 @@ static int generateInterlaceDataBuffer( static int generateProgressiveDataBuffer( char *tableName, - int32_t tableSeq, + int64_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - int64_t startFrom, int64_t startTime, int *pSamplePos, - int *pRemainderBufLen) + int64_t startFrom, int64_t startTime, int64_t *pSamplePos, + int64_t *pRemainderBufLen) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4899,11 +4916,11 @@ static int generateProgressiveDataBuffer( assert(buffer != NULL); char *pstr = buffer; - int k = 0; + int64_t k = 0; memset(buffer, 0, *pRemainderBufLen); - int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, + int64_t headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, buffer, *pRemainderBufLen); if (headLen <= 0) { @@ -4912,7 +4929,7 @@ static int generateProgressiveDataBuffer( pstr += headLen; *pRemainderBufLen -= headLen; - int dataLen; + int64_t dataLen; k = generateDataTail(superTblInfo, g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, startTime, @@ -4926,7 +4943,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__); int64_t insertRows; - int interlaceRows; + int64_t interlaceRows; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4961,10 +4978,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // TODO: prompt tbl count multple interlace rows and batch // - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n", + errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n", __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4978,16 +4995,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - uint64_t st = 0; - uint64_t et = 0xffffffff; + int64_t st = 0; + int64_t et = 0xffffffff; int64_t lastPrintTime = taosGetTimestampMs(); int64_t startTs = taosGetTimestampMs(); int64_t endTs; - int tableSeq = pThreadInfo->start_table_from; + int64_t tableSeq = pThreadInfo->start_table_from; - debugPrint("[%d] %s() LN%d: start_table_from=%d ntables=%d insertRows=%"PRId64"\n", + debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); @@ -4995,9 +5012,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { assert(pThreadInfo->ntables > 0); - int batchPerTbl = interlaceRows; + int64_t batchPerTbl = interlaceRows; - int batchPerTblTimes; + int64_t batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = g_args.num_of_RPR / interlaceRows; @@ -5005,9 +5022,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { batchPerTblTimes = 1; } - int generatedRecPerTbl = 0; + int64_t generatedRecPerTbl = 0; bool flagSleep = true; - int sleepTimeTotal = 0; + int64_t sleepTimeTotal = 0; char *strInsertInto = "insert into "; int nInsertBufLen = strlen(strInsertInto); @@ -5019,7 +5036,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } // generate data memset(buffer, 0, maxSqlLen); - int remainderBufLen = maxSqlLen; + int64_t remainderBufLen = maxSqlLen; char *pstr = buffer; @@ -5027,9 +5044,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += len; remainderBufLen -= len; - int recOfBatch = 0; + int64_t recOfBatch = 0; - for (int i = 0; i < batchPerTblTimes; i ++) { + for (int64_t i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { errorPrint("[%d] %s() LN%d, getTableName return null\n", @@ -5038,8 +5055,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } - int oldRemainderLen = remainderBufLen; - int generated = generateInterlaceDataBuffer( + int64_t oldRemainderLen = remainderBufLen; + int64_t generated = generateInterlaceDataBuffer( tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, pThreadInfo, pstr, @@ -5048,7 +5065,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { &remainderBufLen); if (generated < 0) { - debugPrint("[%d] %s() LN%d, generated data is %d\n", + debugPrint("[%d] %s() LN%d, generated data is %"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_and_statistics_interlace; } else if (generated == 0) { @@ -5060,7 +5077,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += (oldRemainderLen - remainderBufLen); // startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + verbosePrint("[%d] %s() LN%d batchPerTbl=%"PRId64" recOfBatch=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); @@ -5086,7 +5103,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } } - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, generatedRecPerTbl, insertRows); @@ -5094,7 +5111,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { break; } - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d recOfBatch=%"PRId64" totalInsertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, pThreadInfo->totalInsertRows); verbosePrint("[%d] %s() LN%d, buffer=%s\n", @@ -5102,7 +5119,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTs = taosGetTimestampMs(); - int affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); + int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); endTs = taosGetTimestampMs(); int64_t delay = endTs - startTs; @@ -5114,10 +5131,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->cntDelay++; pThreadInfo->totalDelay += delay; - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, affectedRows); if ((affectedRows < 0) || (recOfBatch != affectedRows)) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %d\n%s\n", + errorPrint("[%d] %s() LN%d execInsert insert %"PRId64", affected rows: %"PRId64"\n%s\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, affectedRows, buffer); goto free_and_statistics_interlace; @@ -5196,7 +5214,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->samplePos = 0; - for (uint32_t tableSeq = + for (int64_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { int64_t start_time = pThreadInfo->start_time; @@ -5213,11 +5231,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { char tableName[TSDB_TABLE_NAME_LEN]; getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%d tableName=%s\n", + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); - int remainderBufLen = maxSqlLen; + int64_t remainderBufLen = maxSqlLen; char *pstr = buffer; int nInsertBufLen = strlen("insert into "); @@ -5241,7 +5259,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { startTs = taosGetTimestampMs(); - int affectedRows = execInsert(pThreadInfo, buffer, generated); + int64_t affectedRows = execInsert(pThreadInfo, buffer, generated); endTs = taosGetTimestampMs(); int64_t delay = endTs - startTs; @@ -5287,7 +5305,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && (0 == strncasecmp( superTblInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%d\n", + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); } } @@ -5346,7 +5364,8 @@ static void callBack(void *param, TAOS_RES *res, int code) { char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix, + pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", + pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { @@ -5544,7 +5563,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - int childTblCount; + int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos, db_name, superTblInfo->sTblName, @@ -5595,18 +5614,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, t_info->taos = NULL; } - if ((NULL == superTblInfo) +/* if ((NULL == superTblInfo) || (0 == superTblInfo->multiThreadWriteOneTbl)) { + */ t_info->start_table_from = startFrom; t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; - } else { +/* } else { t_info->start_table_from = 0; t_info->ntables = superTblInfo->childTblCount; t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); } - +*/ tsem_init(&(t_info->lock_sem), 0, 0); if (SYNC == g_Dbs.queryMode) { pthread_create(pids + i, NULL, syncWrite, t_info); @@ -6108,7 +6128,7 @@ static void *superTableQuery(void *sarg) { } } et = taosGetTimestampMs(); - printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), pThreadInfo->start_table_from, pThreadInfo->end_table_to, @@ -6524,7 +6544,7 @@ static int subscribeTestProcess() { //==== create sub threads for query from super table if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) || (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) { - errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); exit(-1); From 6e985b443c69d8468e522fa5f2a1dd67253b7471 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 28 Apr 2021 10:53:10 +0800 Subject: [PATCH 013/140] fix bug --- src/client/inc/tscUtil.h | 3 ++- src/client/src/tscSQLParser.c | 12 +++++++++-- src/client/src/tscUtil.c | 31 +++++++++++++++++++++++++-- tests/script/general/parser/union.sim | 30 +++++++++++++++++++++++++- 4 files changed, 70 insertions(+), 6 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0eda49b1f4..481a0a4d22 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -168,7 +168,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo); static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; } -int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); +int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize); +int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 751c4bf39f..624a4996cd 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -642,17 +642,25 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { // set the command/global limit parameters from the first subclause to the sqlcmd object SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0); pCmd->command = pQueryInfo1->command; - + int32_t diffSize = 0; + // if there is only one element, the limit of clause is the limit of global result. for (int32_t i = 1; i < pCmd->numOfClause; ++i) { SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i); - int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo); + int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize); if (ret != 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } + if (diffSize) { + for (int32_t i = 1; i < pCmd->numOfClause; ++i) { + SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i); + tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo); + } + } + pCmd->parseFinished = 1; return TSDB_CODE_SUCCESS; // do not build query message here } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index fa9e53b0a6..e8d7d5d03c 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1098,7 +1098,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { return pInfo->pSqlExpr->offset; } -int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) { +int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) { assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL); if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) { @@ -1110,15 +1110,36 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i); if (pField1->type != pField2->type || - pField1->bytes != pField2->bytes || strcasecmp(pField1->name, pField2->name) != 0) { return 1; } + + if (pField1->bytes != pField2->bytes) { + *diffSize = 1; + + if (pField2->bytes > pField1->bytes) { + pField1->bytes = pField2->bytes; + } + } } return 0; } +int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) { + assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL); + + for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) { + TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i); + TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i); + + pField2->bytes = pField1->bytes; + } + + return 0; +} + + int32_t tscGetResRowLength(SArray* pExprList) { size_t num = taosArrayGetSize(pExprList); if (num == 0) { @@ -2682,7 +2703,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { //backup the total number of result first int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal; + + + // DON't free final since it may be recoreded and used later in APP + TAOS_FIELD* finalBk = pRes->final; + pRes->final = NULL; tscFreeSqlResult(pSql); + pRes->final = finalBk; pRes->numOfTotal = num; diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index d50daea656..a02c626b7d 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -139,6 +139,34 @@ if $data10 != 1 then return -1 endi +sql select 'ab' as options from union_tb1 limit 1 union all select 'dd' as options from union_tb0 limit 1; +if $rows != 2 then + return -1 +endi + +if $data00 != @ab@ then + return -1 +endi + +if $data10 != @dd@ then + return -1 +endi + + +sql select 'ab' as options from union_tb1 limit 1 union all select '1234567' as options from union_tb0 limit 1; +if $rows != 2 then + return -1 +endi + +if $data00 != @ab@ then + return -1 +endi + +if $data10 != @1234567@ then + return -1 +endi + + # mixed order sql select ts, c1 from union_tb1 order by ts asc limit 10 union all select ts, c1 from union_tb0 order by ts desc limit 2 union all select ts, c1 from union_tb2 order by ts asc limit 10 if $rows != 22 then @@ -425,4 +453,4 @@ sql_error show tables union all show tables sql_error show stables union all show stables sql_error show databases union all show databases -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 5f2f20cf1a1c76be386def7aeafe5621a74a5563 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 27 Apr 2021 14:49:41 +0800 Subject: [PATCH 014/140] [TD-3948]: insert syntax - specify columns after supertable tags --- src/client/src/tscParseInsert.c | 36 ++++++++++++++++++++++++ tests/script/general/parser/function.sim | 13 +++++++++ 2 files changed, 49 insertions(+) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index ae6e2430b7..79ceb81cbc 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -928,6 +928,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z); } + /* parse columns after super table tags values. + * insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2) + * (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val); + * */ + index = 0; + sToken = tStrGetToken(sql, &index, false); + sql += index; + int numOfColsAfterTags = 0; + if (sToken.type == TK_LP) { + if (*boundColumn != NULL) { + return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z); + } else { + *boundColumn = &sToken.z[0]; + } + + while (1) { + index = 0; + sToken = tStrGetToken(sql, &index, false); + + if (sToken.type == TK_RP) { + break; + } + + sql += index; + ++numOfColsAfterTags; + } + + if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + sToken = tStrGetToken(sql, &index, false); + } + + sql = sToken.z; + if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr); } diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 9165b7e98e..65058333fb 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -393,6 +393,19 @@ if $rows != 24 then return -1 endi +print ========================> TD-3948 +sql drop table if exists meters +sql create stable meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); +sql_error insert into td3948Err1(phase) using meters tags ("Beijng.Chaoyang", 2) (ts, current) values (now, 10.2); +sql_error insert into td3948Err2(phase, voltage) using meters tags ("Beijng.Chaoyang", 2) (ts, current) values (now, 10.2); +sql_error insert into td3948Err3(phase, current) using meters tags ("Beijng.Chaoyang", 2) (ts, current) values (now, 10.2); +sql insert into td3948 using meters tags ("Beijng.Chaoyang", 2) (ts, current) values (now, 10.2); +sql select count(ts) from td3948; +if $rows != 1 then + print expect 1, actual:$rows + return -1 +endi + print ========================> TD-2740 sql drop table if exists m1; sql create table m1(ts timestamp, k int) tags(a int); From b13e65bd93f3190b8ffeb1a3e3b033b51dad27a9 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 28 Apr 2021 14:27:15 +0800 Subject: [PATCH 015/140] fix bug --- src/client/src/tscSQLParser.c | 18 +++++++++++++++++- tests/script/general/parser/constCol.sim | 7 +++++++ tests/script/general/parser/union.sim | 12 +++++++++++- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 751c4bf39f..5ad372f633 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1606,6 +1606,22 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) { return false; } +static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) { + size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList); + for (int32_t i = 0; i < numOfExprs; ++i) { + SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i); + + if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { + continue; + } + + return true; + } + + return false; +} + + int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery) { assert(pSelectList != NULL && pCmd != NULL); const char* msg1 = "too many columns in selection clause"; @@ -1670,7 +1686,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis // there is only one user-defined column in the final result field, add the timestamp column. size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList); - if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) { + if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) { addPrimaryTsColIntoResult(pQueryInfo); } diff --git a/tests/script/general/parser/constCol.sim b/tests/script/general/parser/constCol.sim index 716d36e82b..66523517be 100644 --- a/tests/script/general/parser/constCol.sim +++ b/tests/script/general/parser/constCol.sim @@ -358,6 +358,13 @@ if $data00 != 0.300000000 then return -1 endi +print =============================> td-3996 +sql select 'abc' as res from t1 where f1 < 0 +if $rows != 0 then + return -1 +endi + + print ======================udc with normal column group by sql_error select from t1 diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index d50daea656..6d3446d1c8 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -421,8 +421,18 @@ if $data10 != @union_db0@ then return -1 endi +sql select 'aaa' as option from union_tb1 where c1 < 0 limit 1 union all select 'bbb' as option from union_tb0 limit 1 +if $rows != 1 then + return -1 +endi + +if $data00 != @bbb@ then + return -1 +endi + + sql_error show tables union all show tables sql_error show stables union all show stables sql_error show databases union all show databases -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 879bacd8e10153efa01636d5d5ff10c760faaa39 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 29 Apr 2021 11:42:44 +0800 Subject: [PATCH 016/140] Hotfix/sangshuduo/td 3985 taosdemo timestamp step overflow (#5960) * [TD-3985]: taosdemo timestamp step overflow. * fix specified subscribe test. * replace potential overflow of int32 to int64 * replace potential int32 overflow variables to int64. * replace potential int32 overflow variables to int64. * change max of int16 to int64 after type changed. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index e6d7fb6a09..42992b782f 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2898,7 +2898,7 @@ static int startMultiThreadCreateChildTable( startFrom = t_info->end_table_to + 1; t_info->use_metric = true; t_info->cols = cols; - t_info->minDelay = INT16_MAX; + t_info->minDelay = INT64_MAX; pthread_create(pids + i, NULL, createTable, t_info); } @@ -5596,7 +5596,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, t_info->superTblInfo = superTblInfo; t_info->start_time = start_time; - t_info->minDelay = INT16_MAX; + t_info->minDelay = INT64_MAX; if ((NULL == superTblInfo) || (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { @@ -5641,7 +5641,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t totalDelay = 0; int64_t maxDelay = 0; - int64_t minDelay = INT16_MAX; + int64_t minDelay = INT64_MAX; int64_t cntDelay = 1; double avgDelay = 0; From 2617bf2b1c9a99ed81fcf8cca60145648b8a2a3d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 29 Apr 2021 14:24:11 +0800 Subject: [PATCH 017/140] [TD-4001]: taosdemo restful segfault. (#5962) * [TD-4001]: taosdemo restful segfault. due to gethostbyname() is not thread-safe. * fix uninitialized variable when invalid mode. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 91 ++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 32 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 42992b782f..12684c63bc 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -241,7 +241,7 @@ typedef struct SSuperTable_S { int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample - char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful + char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest int64_t childTblLimit; int64_t childTblOffset; @@ -334,6 +334,8 @@ typedef struct SDataBase_S { typedef struct SDbs_S { char cfgDir[MAX_FILE_NAME_LEN+1]; char host[MAX_HOSTNAME_SIZE]; + struct sockaddr_in serv_addr; + uint16_t port; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; @@ -393,10 +395,11 @@ typedef struct SQueryMetaInfo_S { char cfgDir[MAX_FILE_NAME_LEN+1]; char host[MAX_HOSTNAME_SIZE]; uint16_t port; + struct sockaddr_in serv_addr; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; char dbName[MAX_DB_NAME_SIZE+1]; - char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful + char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest SpecifiedQueryInfo specifiedQueryInfo; SuperQueryInfo superQueryInfo; @@ -1955,14 +1958,12 @@ static void printfQuerySystemInfo(TAOS * taos) { free(dbInfos); } -static int postProceSql(char* host, uint16_t port, char* sqlstr) +static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, char* sqlstr) { char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; char *url = "/rest/sql"; - struct hostent *server; - struct sockaddr_in serv_addr; int bytes, sent, received, req_str_len, resp_len; char *request_buf; char response_buf[RESP_BUF_LEN]; @@ -2011,27 +2012,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) ERROR_EXIT("ERROR opening socket"); } - server = gethostbyname(host); - if (server == NULL) { - free(request_buf); - ERROR_EXIT("ERROR, no such host"); - } - - debugPrint("h_name: %s\nh_addretype: %s\nh_length: %d\n", - server->h_name, - (server->h_addrtype == AF_INET)?"ipv4":"ipv6", - server->h_length); - - memset(&serv_addr, 0, sizeof(serv_addr)); - serv_addr.sin_family = AF_INET; - serv_addr.sin_port = htons(rest_port); -#ifdef WINDOWS - serv_addr.sin_addr.s_addr = inet_addr(host); -#else - memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length); -#endif - - int retConn = connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr)); + int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr)); debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); if (retConn < 0) { free(request_buf); @@ -3724,7 +3705,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful + cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, @@ -4619,14 +4600,18 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k) if (superTblInfo) { if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); - } else { - if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { + } else if (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest"))) { + if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, buffer)) { affectedRows = -1; printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); } else { affectedRows = k; } + } else { + errorPrint("%s() LN%d: unknown insert mode: %s\n", + __func__, __LINE__, superTblInfo->insertMode); + affectedRows = 0; } } else { affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); @@ -5425,6 +5410,32 @@ static void *asyncWrite(void *sarg) { return NULL; } +static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr) +{ + uint16_t rest_port = port + TSDB_PORT_HTTP; + struct hostent *server = gethostbyname(host); + if ((server == NULL) || (server->h_addr == NULL)) { + errorPrint("%s", "ERROR, no such host"); + return -1; + } + + debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", + server->h_name, + server->h_addr, + (server->h_addrtype == AF_INET)?"ipv4":"ipv6", + server->h_length); + + memset(serv_addr, 0, sizeof(struct sockaddr_in)); + serv_addr->sin_family = AF_INET; + serv_addr->sin_port = htons(rest_port); +#ifdef WINDOWS + serv_addr->sin_addr.s_addr = inet_addr(host); +#else + memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); +#endif + return 0; +} + static void startMultiThreadInsertData(int threads, char* db_name, char* precision,SSuperTable* superTblInfo) { @@ -5588,6 +5599,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, b = ntables % threads; } + if ((superTblInfo) + && (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest")))) { + if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) + exit(-1); + } + for (int i = 0; i < threads; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; @@ -5996,7 +6013,7 @@ static void *specifiedTableQuery(void *sarg) { st = taosGetTimestampMs(); - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { int64_t t1 = taosGetTimestampMs(); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { @@ -6009,9 +6026,9 @@ static void *specifiedTableQuery(void *sarg) { int64_t t2 = taosGetTimestampMs(); printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000.0); - } else { + } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { int64_t t1 = taosGetTimestampMs(); - int retCode = postProceSql(g_queryInfo.host, + int retCode = postProceSql(g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); if (0 != retCode) { @@ -6022,6 +6039,10 @@ static void *specifiedTableQuery(void *sarg) { printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000.0); + } else { + errorPrint("%s() LN%d, unknown query mode: %s\n", + __func__, __LINE__, g_queryInfo.queryMode); + return NULL; } totalQueried ++; g_queryInfo.specifiedQueryInfo.totalQueried ++; @@ -6171,6 +6192,12 @@ static int queryTestProcess() { printfQuerySystemInfo(taos); + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + if (convertHostToServAddr( + g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) + exit(-1); + } + pthread_t *pids = NULL; threadInfo *infos = NULL; //==== create sub threads for query from specify table From 2742159fde19020be9dec4d28b94aecaa4ffbdf7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 29 Apr 2021 23:16:33 +0800 Subject: [PATCH 018/140] [td-3967]1). fix escape char error in sql. 2) refactor. --- src/client/inc/tschemautil.h | 2 +- src/client/src/tscParseInsert.c | 33 ++++++++++++------- src/client/src/tscPrepare.c | 2 +- src/client/src/tscSQLParser.c | 14 ++++---- src/client/src/tscSql.c | 2 +- src/client/src/tscUtil.c | 8 ++--- src/client/tests/timeParseTest.cpp | 2 +- src/common/inc/tname.h | 2 +- src/common/inc/tvariant.h | 2 +- src/common/src/tname.c | 2 +- src/common/src/tvariant.c | 14 ++++---- src/os/tests/test.cpp | 2 +- src/query/inc/qSqlparser.h | 2 +- src/query/src/qSqlParser.c | 6 ++-- src/query/src/sql.c | 6 ++-- src/query/tests/tsBufTest.cpp | 4 +-- src/query/tests/unitTest.cpp | 14 ++++---- src/util/inc/{tstoken.h => ttoken.h} | 29 ++++++++-------- .../qTokenizer.c => util/src/ttokenizer.c} | 32 +++++++++++++----- 19 files changed, 102 insertions(+), 76 deletions(-) rename src/util/inc/{tstoken.h => ttoken.h} (93%) rename src/{query/src/qTokenizer.c => util/src/ttokenizer.c} (92%) diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h index a9dcd230a6..0026a27e19 100644 --- a/src/client/inc/tschemautil.h +++ b/src/client/inc/tschemautil.h @@ -21,8 +21,8 @@ extern "C" { #endif #include "taosmsg.h" -#include "tstoken.h" #include "tsclient.h" +#include "ttoken.h" /** * get the number of tags of this table diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 79ceb81cbc..a9d8afd09e 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -29,8 +29,7 @@ #include "taosdef.h" #include "tscLog.h" -#include "tscSubquery.h" -#include "tstoken.h" +#include "ttoken.h" #include "tdataformat.h" @@ -463,23 +462,33 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1 // Remove quotation marks if (TK_STRING == sToken.type) { // delete escape character: \\, \', \" - char delim = sToken.z[0]; +// char delim = sToken.z[0]; int32_t cnt = 0; int32_t j = 0; for (uint32_t k = 1; k < sToken.n - 1; ++k) { - if (sToken.z[k] == delim || sToken.z[k] == '\\') { - if (sToken.z[k + 1] == delim) { - cnt++; - tmpTokenBuf[j] = sToken.z[k + 1]; - j++; - k++; - continue; - } + if (sToken.z[k] == '\\') { + cnt++; + + tmpTokenBuf[j] = GET_ESCAPE_CHAR(sToken.z[k+1]); + j++; + k++; + continue; } +// if (sToken.z[k] == delim || sToken.z[k] == '\\') { +// if (sToken.z[k + 1] == delim) { +// cnt++; +// tmpTokenBuf[j] = sToken.z[k + 1]; +// j++;s +// k++; +// continue; +// } +// } + tmpTokenBuf[j] = sToken.z[k]; j++; } + tmpTokenBuf[j] = 0; sToken.z = tmpTokenBuf; sToken.n -= 2 + cnt; @@ -1005,7 +1014,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) { psTblToken->n = len; psTblToken->type = TK_ID; - tSQLGetToken(psTblToken->z, &psTblToken->type); + tGetToken(psTblToken->z, &psTblToken->type); return tscValidateName(psTblToken); } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 4efaf7c2b5..73e39b471b 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -151,7 +151,7 @@ static int normalStmtPrepare(STscStmt* stmt) { while (sql[i] != 0) { SStrToken token = {0}; - token.n = tSQLGetToken(sql + i, &token.type); + token.n = tGetToken(sql + i, &token.type); if (token.type == TK_QUESTION) { sql[i] = 0; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4cf1423c43..e33b8ef33a 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -21,19 +21,19 @@ #endif // __APPLE__ #include "os.h" -#include "ttype.h" -#include "texpr.h" #include "taos.h" #include "taosmsg.h" #include "tcompare.h" +#include "texpr.h" #include "tname.h" #include "tscLog.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" -#include "tstoken.h" #include "tstrbuild.h" +#include "ttoken.h" #include "ttokendef.h" +#include "ttype.h" #include "qUtil.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -4629,7 +4629,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t } } else { SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID}; - int32_t len = tSQLGetToken(pRight->value.pz, &token.type); + int32_t len = tGetToken(pRight->value.pz, &token.type); if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) { return TSDB_CODE_TSC_INVALID_SQL; @@ -5498,13 +5498,13 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { } int32_t validateColumnName(char* name) { - bool ret = isKeyWord(name, (int32_t)strlen(name)); + bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name)); if (ret) { return TSDB_CODE_TSC_INVALID_SQL; } SStrToken token = {.z = name}; - token.n = tSQLGetToken(name, &token.type); + token.n = tGetToken(name, &token.type); if (token.type != TK_STRING && token.type != TK_ID) { return TSDB_CODE_TSC_INVALID_SQL; @@ -5515,7 +5515,7 @@ int32_t validateColumnName(char* name) { strntolower(token.z, token.z, token.n); token.n = (uint32_t)strtrim(token.z); - int32_t k = tSQLGetToken(token.z, &token.type); + int32_t k = tGetToken(token.z, &token.type); if (k != token.n) { return TSDB_CODE_TSC_INVALID_SQL; } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 36d5cec06f..02cd2bd5ef 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -962,7 +962,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t len = (int32_t)strtrim(tblName); SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName}; - tSQLGetToken(tblName, &sToken.type); + tGetToken(tblName, &sToken.type); // Check if the table name available or not if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index e8d7d5d03c..a9826f2fdf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1519,7 +1519,7 @@ void tscColumnListDestroy(SArray* pColumnList) { static int32_t validateQuoteToken(SStrToken* pToken) { tscDequoteAndTrimToken(pToken); - int32_t k = tSQLGetToken(pToken->z, &pToken->type); + int32_t k = tGetToken(pToken->z, &pToken->type); if (pToken->type == TK_STRING) { return tscValidateName(pToken); @@ -1587,7 +1587,7 @@ int32_t tscValidateName(SStrToken* pToken) { tscStrToLower(pToken->z, pToken->n); //pToken->n = (uint32_t)strtrim(pToken->z); - int len = tSQLGetToken(pToken->z, &pToken->type); + int len = tGetToken(pToken->z, &pToken->type); // single token, validate it if (len == pToken->n) { @@ -1613,7 +1613,7 @@ int32_t tscValidateName(SStrToken* pToken) { pToken->n = (uint32_t)strtrim(pToken->z); } - pToken->n = tSQLGetToken(pToken->z, &pToken->type); + pToken->n = tGetToken(pToken->z, &pToken->type); if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1630,7 +1630,7 @@ int32_t tscValidateName(SStrToken* pToken) { pToken->z = sep + 1; pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1); - int32_t len = tSQLGetToken(pToken->z, &pToken->type); + int32_t len = tGetToken(pToken->z, &pToken->type); if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) { return TSDB_CODE_TSC_INVALID_SQL; } diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp index d7325430cd..ba06a6b9aa 100644 --- a/src/client/tests/timeParseTest.cpp +++ b/src/client/tests/timeParseTest.cpp @@ -4,7 +4,7 @@ #include #include "taos.h" -#include "tstoken.h" +#include "ttoken.h" #include "tutil.h" int main(int argc, char** argv) { diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 465b298973..cacd6d2ae7 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -18,7 +18,7 @@ #include "os.h" #include "taosmsg.h" -#include "tstoken.h" +#include "ttoken.h" #include "tvariant.h" typedef struct SDataStatis { diff --git a/src/common/inc/tvariant.h b/src/common/inc/tvariant.h index f8f715c6ca..21b7fd8223 100644 --- a/src/common/inc/tvariant.h +++ b/src/common/inc/tvariant.h @@ -16,8 +16,8 @@ #ifndef TDENGINE_TVARIANT_H #define TDENGINE_TVARIANT_H -#include "tstoken.h" #include "tarray.h" +#include "ttoken.h" #ifdef __cplusplus extern "C" { diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 787aa1e95b..65725455e8 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -2,7 +2,7 @@ #include "tutil.h" #include "tname.h" -#include "tstoken.h" +#include "ttoken.h" #include "tvariant.h" #define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS) diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index c872d8731b..9988450c30 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -14,14 +14,14 @@ */ #include "os.h" -#include "tvariant.h" #include "hash.h" #include "taos.h" #include "taosdef.h" -#include "tstoken.h" +#include "ttoken.h" #include "ttokendef.h" -#include "tutil.h" #include "ttype.h" +#include "tutil.h" +#include "tvariant.h" void tVariantCreate(tVariant *pVar, SStrToken *token) { int32_t ret = 0; @@ -49,7 +49,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) { ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true); if (ret != 0) { SStrToken t = {0}; - tSQLGetToken(token->z, &t.type); + tGetToken(token->z, &t.type); if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN pVar->nType = -1; // -1 means error type return; @@ -460,7 +460,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result *result = (int64_t) pVariant->dKey; } else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen}; - /*int32_t n = */tSQLGetToken(pVariant->pz, &token.type); + /*int32_t n = */tGetToken(pVariant->pz, &token.type); if (token.type == TK_NULL) { if (releaseVariantPtr) { @@ -495,10 +495,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result wchar_t *endPtr = NULL; SStrToken token = {0}; - token.n = tSQLGetToken(pVariant->pz, &token.type); + token.n = tGetToken(pVariant->pz, &token.type); if (token.type == TK_MINUS || token.type == TK_PLUS) { - token.n = tSQLGetToken(pVariant->pz + token.n, &token.type); + token.n = tGetToken(pVariant->pz + token.n, &token.type); } if (token.type == TK_FLOAT) { diff --git a/src/os/tests/test.cpp b/src/os/tests/test.cpp index 600e5d71a7..bdd3114363 100644 --- a/src/os/tests/test.cpp +++ b/src/os/tests/test.cpp @@ -4,7 +4,7 @@ #include #include "taos.h" -#include "tstoken.h" +#include "ttoken.h" #include "tutil.h" int main(int argc, char** argv) { diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 0a0587f701..70ba48e551 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -22,8 +22,8 @@ extern "C" { #include "taos.h" #include "taosmsg.h" -#include "tstoken.h" #include "tstrbuild.h" +#include "ttoken.h" #include "tvariant.h" #define ParseTOKENTYPE SStrToken diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 6b38536b15..56d395e498 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -13,13 +13,13 @@ * along with this program. If not, see . */ -#include "os.h" #include "qSqlparser.h" +#include "os.h" #include "taosdef.h" #include "taosmsg.h" #include "tcmdtype.h" -#include "tstoken.h" #include "tstrbuild.h" +#include "ttoken.h" #include "ttokendef.h" #include "tutil.h" @@ -38,7 +38,7 @@ SSqlInfo qSqlParse(const char *pStr) { goto abort_parse; } - t0.n = tSQLGetToken((char *)&pStr[i], &t0.type); + t0.n = tGetToken((char *)&pStr[i], &t0.type); t0.z = (char *)(pStr + i); i += t0.n; diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 96d33a8ed6..8f36638dc9 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -26,14 +26,14 @@ #include /************ Begin %include sections from the grammar ************************/ +#include +#include #include #include #include -#include -#include #include "qSqlparser.h" #include "tcmdtype.h" -#include "tstoken.h" +#include "ttoken.h" #include "ttokendef.h" #include "tutil.h" #include "tvariant.h" diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index 8ca636b834..dd7f03a494 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -3,10 +3,10 @@ #include #include +#include "qTsbuf.h" #include "taos.h" #include "tsdb.h" -#include "qTsbuf.h" -#include "tstoken.h" +#include "ttoken.h" #include "tutil.h" namespace { diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp index 3406d83090..d2b058cf7c 100644 --- a/src/query/tests/unitTest.cpp +++ b/src/query/tests/unitTest.cpp @@ -21,7 +21,7 @@ int32_t testValidateName(char* name) { token.n = strlen(name); token.type = 0; - tSQLGetToken(name, &token.type); + tGetToken(name, &token.type); return tscValidateName(&token); } } @@ -691,32 +691,32 @@ TEST(testCase, tGetToken_Test) { char* s = ".123 "; uint32_t type = 0; - int32_t len = tSQLGetToken(s, &type); + int32_t len = tGetToken(s, &type); EXPECT_EQ(type, TK_FLOAT); EXPECT_EQ(len, strlen(s) - 1); char s1[] = "1.123e10 "; - len = tSQLGetToken(s1, &type); + len = tGetToken(s1, &type); EXPECT_EQ(type, TK_FLOAT); EXPECT_EQ(len, strlen(s1) - 1); char s4[] = "0xff "; - len = tSQLGetToken(s4, &type); + len = tGetToken(s4, &type); EXPECT_EQ(type, TK_HEX); EXPECT_EQ(len, strlen(s4) - 1); // invalid data type char s2[] = "e10 "; - len = tSQLGetToken(s2, &type); + len = tGetToken(s2, &type); EXPECT_FALSE(type == TK_FLOAT); char s3[] = "1.1.1.1"; - len = tSQLGetToken(s3, &type); + len = tGetToken(s3, &type); EXPECT_EQ(type, TK_IPTOKEN); EXPECT_EQ(len, strlen(s3)); char s5[] = "0x "; - len = tSQLGetToken(s5, &type); + len = tGetToken(s5, &type); EXPECT_FALSE(type == TK_HEX); } diff --git a/src/util/inc/tstoken.h b/src/util/inc/ttoken.h similarity index 93% rename from src/util/inc/tstoken.h rename to src/util/inc/ttoken.h index ab1ef7b279..c1e2170ac3 100644 --- a/src/util/inc/tstoken.h +++ b/src/util/inc/ttoken.h @@ -37,13 +37,25 @@ typedef struct SStrToken { char *z; } SStrToken; +extern const char escapeChar[]; + +/** + * check if it is a number or not + * @param pToken + * @return + */ +#define isNumber(tk) \ +((tk)->type == TK_INTEGER || (tk)->type == TK_FLOAT || (tk)->type == TK_HEX || (tk)->type == TK_BIN) + +#define GET_ESCAPE_CHAR(c) (escapeChar[(uint8_t)(c)]) + /** * tokenizer for sql string * @param z * @param tokenType * @return */ -uint32_t tSQLGetToken(char *z, uint32_t *tokenType); +uint32_t tGetToken(char *z, uint32_t *tokenType); /** * enhanced tokenizer for sql string. @@ -61,21 +73,12 @@ SStrToken tStrGetToken(char *str, int32_t *i, bool isPrevOptr); * @param len * @return */ -bool isKeyWord(const char *z, int32_t len); - -/** - * check if it is a number or not - * @param pToken - * @return - */ -#define isNumber(tk) \ -((tk)->type == TK_INTEGER || (tk)->type == TK_FLOAT || (tk)->type == TK_HEX || (tk)->type == TK_BIN) - +bool taosIsKeyWordToken(const char *z, int32_t len); /** * check if it is a token or not - * @param pToken - * @return token type, if it is not a number, TK_ILLEGAL will return + * @param pToken + * @return token type, if it is not a number, TK_ILLEGAL will return */ static FORCE_INLINE int32_t tGetNumericStringType(const SStrToken* pToken) { const char* z = pToken->z; diff --git a/src/query/src/qTokenizer.c b/src/util/src/ttokenizer.c similarity index 92% rename from src/query/src/qTokenizer.c rename to src/util/src/ttokenizer.c index 7869e27707..794420d55b 100644 --- a/src/query/src/qTokenizer.c +++ b/src/util/src/ttokenizer.c @@ -18,7 +18,7 @@ #include "hash.h" #include "hashfunc.h" #include "taosdef.h" -#include "tstoken.h" +#include "ttoken.h" #include "ttokendef.h" #include "tutil.h" @@ -232,6 +232,18 @@ static const char isIdChar[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ }; +const char escapeChar[] = { + /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /* 0x */ + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, /* 1x */ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, /* 2x */ + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, /* 3x */ + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,/* 4x */ + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,/* 5x */ + 0x60, 0x07, 0x08, 0x63, 0x64, 0x65, 0x0C, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x0A, 0x6F,/* 6x */ + 0x70, 0x71, 0x0D, 0x73, 0x09, 0x75, 0x0B, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,/* 7x */ +}; + static void* keywordHashTable = NULL; static void doInitKeywordsTable(void) { @@ -247,7 +259,7 @@ static void doInitKeywordsTable(void) { static pthread_once_t keywordsHashTableInit = PTHREAD_ONCE_INIT; -int tSQLKeywordCode(const char* z, int n) { +static int32_t tKeywordCode(const char* z, int n) { pthread_once(&keywordsHashTableInit, doInitKeywordsTable); char key[512] = {0}; @@ -271,7 +283,7 @@ int tSQLKeywordCode(const char* z, int n) { * Return the length of the token that begins at z[0]. * Store the token type in *type before returning. */ -uint32_t tSQLGetToken(char* z, uint32_t* tokenId) { +uint32_t tGetToken(char* z, uint32_t* tokenId) { uint32_t i; switch (*z) { case ' ': @@ -408,7 +420,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) { continue; } - if (z[i] == delim ) { + if (z[i] == delim) { if (z[i + 1] == delim) { i++; } else { @@ -551,7 +563,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) { } for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[(uint8_t) z[i]]; i++) { } - *tokenId = tSQLKeywordCode(z, i); + *tokenId = tKeywordCode(z, i); return i; } } @@ -584,7 +596,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) { t = str[++(*i)]; } - t0.n = tSQLGetToken(&str[*i], &t0.type); + t0.n = tGetToken(&str[*i], &t0.type); break; // not support user specfied ignored symbol list @@ -613,7 +625,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) { // support parse the 'db.tbl' format, notes: There should be no space on either side of the dot! if ('.' == str[*i + t0.n]) { - len = tSQLGetToken(&str[*i + t0.n + 1], &type); + len = tGetToken(&str[*i + t0.n + 1], &type); // only id and string are valid if ((TK_STRING != t0.type) && (TK_ID != t0.type)) { @@ -628,7 +640,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) { } else { // support parse the -/+number format if ((isPrevOptr) && (t0.type == TK_MINUS || t0.type == TK_PLUS)) { - len = tSQLGetToken(&str[*i + t0.n], &type); + len = tGetToken(&str[*i + t0.n], &type); if (type == TK_INTEGER || type == TK_FLOAT) { t0.type = type; t0.n += len; @@ -642,7 +654,9 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) { return t0; } -bool isKeyWord(const char* z, int32_t len) { return (tSQLKeywordCode((char*)z, len) != TK_ID); } +bool taosIsKeyWordToken(const char* z, int32_t len) { + return (tKeywordCode((char*)z, len) != TK_ID); +} void taosCleanupKeywordsTable() { void* m = keywordHashTable; From 160516665c46971d62d17ad551e06ac0d87c2eed Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 30 Apr 2021 09:50:15 +0800 Subject: [PATCH 019/140] Hotfix/sangshuduo/td 4025 fix travis ci broken (#5973) * [TD-4025]: travis ci broken due to valgrind dependency missed. * [TD-4025]: travis ci broken due to valgrind dependency missed. change focal to bionic. * [TD-4025]: travis ci broken due to valgrind dependency missed. install python3.8 * [TD-4025]: travis ci broken due to valgrind dependency missed. install python3.8 and pip, setuptools * [TD-4025]: travis ci broken due to valgrind dependency missed. install python3.8, and install pip and setuptools * [TD-4025]: travis ci broken due to valgrind dependency missed. modify smoketest.sh to python3.8 Co-authored-by: Shuduo Sang --- .travis.yml | 8 ++++--- tests/pytest/smoketest.sh | 44 +++++++++++++++++++-------------------- 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0617d75976..efe7917105 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,7 @@ branches: matrix: - os: linux - dist: focal + dist: bionic language: c git: @@ -28,8 +28,8 @@ matrix: - build-essential - cmake - net-tools - - python3-pip - - python3-setuptools + - python3.8 + - libc6-dbg - valgrind - psmisc - unixodbc @@ -39,6 +39,8 @@ matrix: before_script: - export TZ=Asia/Harbin - date + - curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && python3.8 get-pip.py + - python3.8 -m pip install --upgrade pip setuptools - cd ${TRAVIS_BUILD_DIR} - mkdir debug - cd debug diff --git a/tests/pytest/smoketest.sh b/tests/pytest/smoketest.sh index 7c14b673e5..0eb850749f 100755 --- a/tests/pytest/smoketest.sh +++ b/tests/pytest/smoketest.sh @@ -2,36 +2,36 @@ ulimit -c unlimited # insert -python3 ./test.py $1 -f insert/basic.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/bigint.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/nchar.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/multi.py -python3 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f insert/basic.py +python3.8 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f insert/bigint.py +python3.8 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f insert/nchar.py +python3.8 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f insert/multi.py +python3.8 ./test.py $1 -s && sleep 1 # table -python3 ./test.py $1 -f table/column_name.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f table/column_num.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f table/db_table.py -python3 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f table/column_name.py +python3.8 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f table/column_num.py +python3.8 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f table/db_table.py +python3.8 ./test.py $1 -s && sleep 1 # import -python3 ./test.py $1 -f import_merge/importDataLastSub.py -python3 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f import_merge/importDataLastSub.py +python3.8 ./test.py $1 -s && sleep 1 #tag -python3 ./test.py $1 -f tag_lite/filter.py -python3 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f tag_lite/filter.py +python3.8 ./test.py $1 -s && sleep 1 #query -python3 ./test.py $1 -f query/filter.py -python3 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f query/filter.py +python3.8 ./test.py $1 -s && sleep 1 # client -python3 ./test.py $1 -f client/client.py -python3 ./test.py $1 -s && sleep 1 +python3.8 ./test.py $1 -f client/client.py +python3.8 ./test.py $1 -s && sleep 1 From c64fd41be7f4dba7f929565acdf033a89bd07042 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 30 Apr 2021 10:46:13 +0800 Subject: [PATCH 020/140] [td-3967]refactor. --- src/client/src/tscParseInsert.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index a9d8afd09e..35dc7bca55 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -475,16 +475,6 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1 continue; } -// if (sToken.z[k] == delim || sToken.z[k] == '\\') { -// if (sToken.z[k + 1] == delim) { -// cnt++; -// tmpTokenBuf[j] = sToken.z[k + 1]; -// j++;s -// k++; -// continue; -// } -// } - tmpTokenBuf[j] = sToken.z[k]; j++; } From bdc887741ece56dc7e1afa9789338d55b652c35c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 30 Apr 2021 10:47:42 +0800 Subject: [PATCH 021/140] [td-3967] --- src/client/src/tscParseInsert.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 35dc7bca55..298f1245e7 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -462,7 +462,6 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1 // Remove quotation marks if (TK_STRING == sToken.type) { // delete escape character: \\, \', \" -// char delim = sToken.z[0]; int32_t cnt = 0; int32_t j = 0; for (uint32_t k = 1; k < sToken.n - 1; ++k) { @@ -704,6 +703,8 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock } code = TSDB_CODE_TSC_INVALID_SQL; + + // todo the size should be limited to the current sql length char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \" if (NULL == tmpTokenBuf) { return TSDB_CODE_TSC_OUT_OF_MEMORY; From 5eac06b287e9db625d28d61bfbf86579d1b9cf3f Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Fri, 30 Apr 2021 11:41:24 +0800 Subject: [PATCH 022/140] merge taos-jdbcdriver-2.0.28 all changes to master branch (#5974) * merge taos-jdbcdriver-2.0.28 all changes to master branch * change version in cmake/install.inc --- cmake/install.inc | 2 +- src/connector/jdbc/CMakeLists.txt | 2 +- src/connector/jdbc/deploy-pom.xml | 2 +- src/connector/jdbc/pom.xml | 2 +- .../com/taosdata/jdbc/AbstractConnection.java | 18 +- .../com/taosdata/jdbc/AbstractResultSet.java | 3 +- .../com/taosdata/jdbc/TSDBConnection.java | 1 + .../java/com/taosdata/jdbc/TSDBDriver.java | 7 +- .../taosdata/jdbc/TSDBPreparedStatement.java | 163 +------- .../java/com/taosdata/jdbc/TSDBResultSet.java | 7 +- .../taosdata/jdbc/TSDBResultSetRowData.java | 15 +- .../taosdata/jdbc/rs/RestfulConnection.java | 1 + .../com/taosdata/jdbc/rs/RestfulDriver.java | 2 +- .../jdbc/rs/RestfulPreparedStatement.java | 47 +-- .../taosdata/jdbc/rs/RestfulResultSet.java | 108 ++++- .../taosdata/jdbc/rs/RestfulStatement.java | 46 ++- .../taosdata/jdbc/utils/UtcTimestampUtil.java | 12 + .../java/com/taosdata/jdbc/utils/Utils.java | 135 +++++++ .../jdbc/TSDBPreparedStatementTest.java | 47 ++- .../com/taosdata/jdbc/TSDBResultSetTest.java | 1 + .../jdbc/cases/DriverAutoloadTest.java | 2 + .../cases/InsertSpecialCharacterJniTest.java | 375 +++++++++++++++++ .../InsertSpecialCharacterRestfulTest.java | 376 ++++++++++++++++++ .../NullValueInResultSetForJdbcJniTest.java | 64 +++ .../com/taosdata/jdbc/cases/TD3841Test.java | 91 +++++ .../TwoTypeTimestampPercisionInJniTest.java | 89 +++++ ...woTypeTimestampPercisionInRestfulTest.java | 168 ++++++++ .../jdbc/rs/RestfulPreparedStatementTest.java | 49 ++- .../jdbc/rs/RestfulResultSetTest.java | 1 + .../com/taosdata/jdbc/utils/UtilsTest.java | 24 ++ 30 files changed, 1624 insertions(+), 236 deletions(-) create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java diff --git a/cmake/install.inc b/cmake/install.inc index 5823ef743e..9e325531d5 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index eb158b1f76..de4b8f6bfb 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index eb8c92575c..a31796ffde 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.25 + 2.0.28 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 1f75754b0c..3400a82e73 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.25 + 2.0.28 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index 976078da95..2970f6c2d3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -4,13 +4,23 @@ import java.sql.*; import java.util.Enumeration; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.concurrent.*; public abstract class AbstractConnection extends WrapperImpl implements Connection { protected volatile boolean isClosed; protected volatile String catalog; - protected volatile Properties clientInfoProps = new Properties(); + protected final Properties clientInfoProps = new Properties(); + + protected AbstractConnection(Properties properties) { + Set propNames = properties.stringPropertyNames(); + for (String propName : propNames) { + clientInfoProps.setProperty(propName, properties.getProperty(propName)); + } + String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING"); + clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat); + } @Override public abstract Statement createStatement() throws SQLException; @@ -35,7 +45,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti } - @Override public void setAutoCommit(boolean autoCommit) throws SQLException { if (isClosed()) @@ -441,9 +450,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed) throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); - if (clientInfoProps == null) - clientInfoProps = new Properties(); - clientInfoProps.setProperty(name, value); + if (clientInfoProps != null) + clientInfoProps.setProperty(name, value); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java index abd348e68c..4b5b88d93b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -10,6 +10,7 @@ import java.util.Map; public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { private int fetchSize; + protected boolean wasNull; protected void checkAvailability(int columnIndex, int bounds) throws SQLException { if (isClosed()) @@ -28,7 +29,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public boolean wasNull() throws SQLException { - return false; + return wasNull; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 5b4615485d..c8ab9fb15a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -28,6 +28,7 @@ public class TSDBConnection extends AbstractConnection { } public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { + super(info); this.databaseMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 5f599df130..55533bd28c 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -95,11 +95,16 @@ public class TSDBDriver extends AbstractDriver { */ public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch"; + /** + * timestamp format for JDBC-RESTful,should one of the options: string or timestamp or utc + */ + public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat"; + private TSDBDatabaseMetaData dbMetaData = null; static { try { - java.sql.DriverManager.registerDriver(new TSDBDriver()); + DriverManager.registerDriver(new TSDBDriver()); } catch (SQLException e) { throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 728b537f71..56f971a35e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -14,11 +14,12 @@ *****************************************************************************/ package com.taosdata.jdbc; +import com.taosdata.jdbc.utils.Utils; + import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; -import java.nio.charset.Charset; import java.sql.*; import java.util.ArrayList; import java.util.Calendar; @@ -33,17 +34,9 @@ import java.util.regex.Pattern; public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { private String rawSql; - private String sql; - // private ArrayList parameters = new ArrayList<>(); private Object[] parameters; private boolean isPrepared; - //start with insert or import and is case-insensitive - private static Pattern savePattern = Pattern.compile("(?i)^\\s*(insert|import)"); - // is insert or import - private boolean isSaved; - - // private SavedPreparedStatement savedPreparedStatement; private volatile TSDBParameterMetaData parameterMetaData; TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) { @@ -65,35 +58,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private void init(String sql) { this.rawSql = sql; preprocessSql(); -// this.isSaved = isSavedSql(this.rawSql); -// if (this.isSaved) { -// try { -// this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this); -// } catch (SQLException e) { -// e.printStackTrace(); -// } -// } - - } - - /** - * if the precompiled sql is insert or import - * - * @param sql - * @return - */ - private boolean isSavedSql(String sql) { - Matcher matcher = savePattern.matcher(sql); - return matcher.find(); } @Override public int[] executeBatch() throws SQLException { -// if (isSaved) { -// return this.savedPreparedStatement.executeBatch(); -// } else { return super.executeBatch(); -// } } /* @@ -157,152 +126,64 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat * * @return a string of the native sql statement for TSDB */ -// private String getNativeSql(String rawSql) { -// for (int i = 0; i < parameters.length; i++) { -// Object para = parameters[i]; -// if (para != null) { -// String paraStr = para.toString(); -// if (para instanceof Timestamp || para instanceof String) { -// paraStr = "'" + paraStr + "'"; -// } -// this.sql = this.sql.replaceFirst("[?]", paraStr); -// } else { -// this.sql = this.sql.replaceFirst("[?]", "NULL"); -// } -// } -// parameters = new Object[parameters.length]; -// return sql; -// } - private String getNativeSql(String rawSql) throws SQLException { - String sql = rawSql; - for (int i = 0; i < parameters.length; ++i) { - Object para = parameters[i]; - if (para != null) { - String paraStr; - if (para instanceof byte[]) { - paraStr = new String((byte[]) para, Charset.forName("UTF-8")); - } else { - paraStr = para.toString(); - } - // if para is timestamp or String or byte[] need to translate ' character - if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) { - paraStr = paraStr.replaceAll("'", "\\\\\\\\'"); - paraStr = "'" + paraStr + "'"; - } - sql = sql.replaceFirst("[?]", paraStr); - } else { - sql = sql.replaceFirst("[?]", "NULL"); - } - } - clearParameters(); - return sql; + return Utils.getNativeSql(rawSql, this.parameters); } @Override public ResultSet executeQuery() throws SQLException { -// if (isSaved) { -// this.savedPreparedStatement.executeBatchInternal(); -// return null; -// } else { - if (!isPrepared) return executeQuery(this.rawSql); final String sql = getNativeSql(this.rawSql); return executeQuery(sql); -// } } @Override public int executeUpdate() throws SQLException { -// if (isSaved) { -// return this.savedPreparedStatement.executeBatchInternal(); -// } else { if (!isPrepared) return executeUpdate(this.rawSql); String sql = getNativeSql(this.rawSql); return executeUpdate(sql); -// } - } - - private boolean isSupportedSQLType(int sqlType) { - switch (sqlType) { - case Types.TIMESTAMP: - case Types.INTEGER: - case Types.BIGINT: - case Types.FLOAT: - case Types.DOUBLE: - case Types.SMALLINT: - case Types.TINYINT: - case Types.BOOLEAN: - case Types.BINARY: - case Types.NCHAR: - return true; - default: - return false; - } } @Override public void setNull(int parameterIndex, int sqlType) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (!isSupportedSQLType(sqlType) || parameterIndex < 0) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); -// if (parameterIndex >= parameters.size()) -// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_BOUNDARY); - - setObject(parameterIndex, "NULL"); + setObject(parameterIndex, null); } @Override public void setBoolean(int parameterIndex, boolean x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - setObject(parameterIndex, x); } @Override public void setByte(int parameterIndex, byte x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - setObject(parameterIndex,x); + setObject(parameterIndex, x); } @Override public void setShort(int parameterIndex, short x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setInt(int parameterIndex, int x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setLong(int parameterIndex, long x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setFloat(int parameterIndex, float x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setDouble(int parameterIndex, double x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @@ -315,17 +196,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setString(int parameterIndex, String x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setBytes(int parameterIndex, byte[] x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - - setObject(parameterIndex,x); + setObject(parameterIndex, x); } @Override @@ -344,8 +220,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @@ -360,7 +234,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -375,8 +248,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void clearParameters() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - -// parameters.clear(); parameters = new Object[parameters.length]; } @@ -384,43 +255,29 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + setObject(parameterIndex, x); } @Override public void setObject(int parameterIndex, Object x) throws SQLException { -// if (isSaved) { -// this.savedPreparedStatement.setParam(parameterIndex, x); -// } else { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (parameterIndex < 1 && parameterIndex >= parameters.length) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - parameters[parameterIndex - 1] = x; -// parameters.add(x); -// } } @Override public boolean execute() throws SQLException { -// if (isSaved) { -// int result = this.savedPreparedStatement.executeBatchInternal(); -// return result > 0; -// } else { if (!isPrepared) return execute(this.rawSql); final String sql = getNativeSql(this.rawSql); - return execute(sql); -// } } @Override public void addBatch() throws SQLException { -// if (isSaved) { -// this.savedPreparedStatement.addBatch(); -// } else { if (this.batchedArgs == null) { batchedArgs = new ArrayList<>(); } @@ -431,7 +288,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat String sql = this.getConnection().nativeSQL(this.rawSql); addBatch(sql); } -// } } @Override @@ -475,7 +331,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public ResultSetMetaData getMetaData() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); -// return this.getResultSet().getMetaData(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index a20ddaa836..2576a25f0d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -203,7 +203,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); + Object value = this.rowData.get(columnIndex - 1); + if (value instanceof Timestamp) + res = ((Timestamp) value).getTime(); + else + res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } @@ -273,7 +277,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, this.columnMetaDataList.size()); Timestamp res = null; - if (this.getBatchFetch()) return this.blockData.getTimestamp(columnIndex - 1); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index 7cf5f0d79a..34470fbc4e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -17,6 +17,7 @@ package com.taosdata.jdbc; import java.math.BigDecimal; import java.sql.SQLException; import java.sql.Timestamp; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; @@ -299,7 +300,19 @@ public class TSDBResultSetRowData { } public void setTimestamp(int col, long ts) { - data.set(col, new Timestamp(ts)); + //TODO: this implementation contains logical error + // when precision is us the (long ts) is 16 digital number + // when precision is ms, the (long ts) is 13 digital number + // we need a JNI function like this: + // public void setTimestamp(int col, long epochSecond, long nanoAdjustment) + if (ts < 1_0000_0000_0000_0L) { + data.set(col, new Timestamp(ts)); + } else { + long epochSec = ts / 1000_000l; + long nanoAdjustment = ts % 1000_000l * 1000l; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + data.set(col, timestamp); + } } public Timestamp getTimestamp(int col) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 1f3ed2d144..b810f9aeb5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -22,6 +22,7 @@ public class RestfulConnection extends AbstractConnection { private final DatabaseMetaData metadata; public RestfulConnection(String host, String port, Properties props, String database, String url) { + super(props); this.host = host; this.port = Integer.parseInt(port); this.database = database; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index 6efe13561d..a94cfa6e07 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver { static { try { - java.sql.DriverManager.registerDriver(new RestfulDriver()); + DriverManager.registerDriver(new RestfulDriver()); } catch (SQLException e) { throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java index f82955ca9d..f846a1162e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java @@ -1,7 +1,12 @@ package com.taosdata.jdbc.rs; +import com.google.common.collect.Range; +import com.google.common.collect.RangeSet; +import com.google.common.collect.TreeRangeSet; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.utils.SqlSyntaxValidator; +import com.taosdata.jdbc.utils.Utils; import java.io.InputStream; import java.io.Reader; @@ -10,6 +15,12 @@ import java.net.URL; import java.nio.charset.Charset; import java.sql.*; import java.util.Calendar; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.IntStream; public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement { @@ -21,6 +32,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) { super(conn, database); this.rawSql = sql; + if (sql.contains("?")) { int parameterCnt = 0; for (int i = 0; i < sql.length(); i++) { @@ -58,29 +70,14 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar return executeUpdate(sql); } - private String getNativeSql(String rawSql) throws SQLException { - String sql = rawSql; - for (int i = 0; i < parameters.length; ++i) { - Object para = parameters[i]; - if (para != null) { - String paraStr; - if (para instanceof byte[]) { - paraStr = new String((byte[]) para, Charset.forName("UTF-8")); - } else { - paraStr = para.toString(); - } - // if para is timestamp or String or byte[] need to translate ' character - if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) { - paraStr = paraStr.replaceAll("'", "\\\\\\\\'"); - paraStr = "'" + paraStr + "'"; - } - sql = sql.replaceFirst("[?]", paraStr); - } else { - sql = sql.replaceFirst("[?]", "NULL"); - } - } - clearParameters(); - return sql; + /**** + * 将rawSql转换成一条可执行的sql语句,使用属性parameters中的变脸进行替换 + * 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?) + * @param rawSql,可能是insert、select或其他,使用?做占位符 + * @return + */ + private String getNativeSql(String rawSql) { + return Utils.getNativeSql(rawSql, this.parameters); } @Override @@ -220,8 +217,8 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - - setObject(parameterIndex,x); + + setObject(parameterIndex, x); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 5c2d4c45b0..db635f5f79 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -5,13 +5,12 @@ import com.alibaba.fastjson.JSONObject; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import com.google.common.primitives.Shorts; -import com.taosdata.jdbc.AbstractResultSet; -import com.taosdata.jdbc.TSDBConstants; -import com.taosdata.jdbc.TSDBError; -import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.*; import java.math.BigDecimal; import java.sql.*; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Calendar; @@ -19,6 +18,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; private int pos = -1; + private final String database; private final Statement statement; // data @@ -65,7 +65,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - private Object parseColumnData(JSONArray row, int colIndex, int taosType) { + private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException { switch (taosType) { case TSDBConstants.TSDB_DATA_TYPE_BOOL: return row.getBoolean(colIndex); @@ -81,8 +81,44 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return row.getFloat(colIndex); case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return row.getDouble(colIndex); - case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: - return new Timestamp(row.getDate(colIndex).getTime()); + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + if (row.get(colIndex) == null) + return null; + String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); + if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) { + Long value = row.getLong(colIndex); + //TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9 + if (value < 1_0000_0000_0000_0L) + return new Timestamp(value); + long epochSec = value / 1000_000l; + long nanoAdjustment = value % 1000_000l * 1000l; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if ("UTC".equalsIgnoreCase(timestampFormat)) { + String value = row.getString(colIndex); + long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000; + int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5)); + long nanoAdjustment = 0; + if (value.length() > 28) { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 + nanoAdjustment = fractionalSec * 1000l; + } else { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 + nanoAdjustment = fractionalSec * 1000_000l; + } + ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); + Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); + return Timestamp.from(instant); + } + String value = row.getString(colIndex); + if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS + return row.getTimestamp(colIndex); + // us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + return timestamp; + } case TSDBConstants.TSDB_DATA_TYPE_BINARY: return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: @@ -126,12 +162,12 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - @Override - public boolean wasNull() throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - return resultSet.isEmpty(); - } +// @Override +// public boolean wasNull() throws SQLException { +// if (isClosed()) +// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); +// return resultSet.isEmpty(); +// } @Override public String getString(int columnIndex) throws SQLException { @@ -150,8 +186,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return false; + } + wasNull = false; if (value instanceof Boolean) return (boolean) value; return Boolean.valueOf(value.toString()); @@ -162,8 +201,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; long valueAsLong = Long.parseLong(value.toString()); if (valueAsLong == Byte.MIN_VALUE) return 0; @@ -183,8 +225,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; long valueAsLong = Long.parseLong(value.toString()); if (valueAsLong == Short.MIN_VALUE) return 0; @@ -198,8 +243,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; long valueAsLong = Long.parseLong(value.toString()); if (valueAsLong == Integer.MIN_VALUE) return 0; @@ -213,9 +261,14 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; - + } + wasNull = false; + if (value instanceof Timestamp) { + return ((Timestamp) value).getTime(); + } long valueAsLong = 0; try { valueAsLong = Long.parseLong(value.toString()); @@ -232,8 +285,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; if (value instanceof Float || value instanceof Double) return (float) value; return Float.parseFloat(value.toString()); @@ -244,8 +300,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; if (value instanceof Double || value instanceof Float) return (double) value; return Double.parseDouble(value.toString()); @@ -307,6 +366,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return null; if (value instanceof Timestamp) return (Timestamp) value; +// if (value instanceof Long) { +// if (1_0000_0000_0000_0L > (long) value) +// return Timestamp.from(Instant.ofEpochMilli((long) value)); +// long epochSec = (long) value / 1000_000L; +// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000); +// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); +// } return Timestamp.valueOf(value.toString()); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 9071c04672..fbc3a50a27 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.AbstractStatement; +import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; import com.taosdata.jdbc.utils.HttpClientPoolUtil; @@ -34,14 +35,11 @@ public class RestfulStatement extends AbstractStatement { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - return executeOneQuery(url, sql); + return executeOneQuery(sql); } -// if (this.database != null && !this.database.trim().replaceAll("\\s","").isEmpty()) -// HttpClientPoolUtil.execute(url, "use " + this.database); - return executeOneQuery(url, sql); + return executeOneQuery(sql); } @Override @@ -56,8 +54,6 @@ public class RestfulStatement extends AbstractStatement { return executeOneUpdate(url, sql); } -// if (this.database != null && !this.database.trim().replaceAll("\\s", "").isEmpty()) -// HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneUpdate(url, sql); } @@ -78,14 +74,21 @@ public class RestfulStatement extends AbstractStatement { //如果执行了use操作应该将当前Statement的catalog设置为新的database boolean result = true; - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) { + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + } + if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) { + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + } + if (SqlSyntaxValidator.isUseSql(sql)) { HttpClientPoolUtil.execute(url, sql); this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; } else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - executeOneQuery(url, sql); + executeOneQuery(sql); } else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { executeOneUpdate(url, sql); result = false; @@ -101,11 +104,18 @@ public class RestfulStatement extends AbstractStatement { return result; } - private ResultSet executeOneQuery(String url, String sql) throws SQLException { + private ResultSet executeOneQuery(String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); // row data + String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); + if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + if ("UTC".equalsIgnoreCase(timestampFormat)) + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + String result = HttpClientPoolUtil.execute(url, sql); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { @@ -126,21 +136,21 @@ public class RestfulStatement extends AbstractStatement { throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc")); } this.resultSet = null; - this.affectedRows = checkJsonResultSet(jsonObject); + this.affectedRows = getAffectedRows(jsonObject); return this.affectedRows; } - private int checkJsonResultSet(JSONObject jsonObject) { + private int getAffectedRows(JSONObject jsonObject) throws SQLException { // create ... SQLs should return 0 , and Restful result is this: // {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1} JSONArray head = jsonObject.getJSONArray("head"); + if (head.size() != 1 || !"affected_rows".equals(head.getString(0))) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); JSONArray data = jsonObject.getJSONArray("data"); - int rows = Integer.parseInt(jsonObject.getString("rows")); - if (head.size() == 1 && "affected_rows".equals(head.getString(0)) - && data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) { - return 0; - } - return rows; + if (data != null) + return data.getJSONArray(0).getInteger(0); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java new file mode 100644 index 0000000000..04a11a2beb --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java @@ -0,0 +1,12 @@ +package com.taosdata.jdbc.utils; + +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; + +public class UtcTimestampUtil { + public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder() + .appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+") +// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true) + .toFormatter(); + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java new file mode 100644 index 0000000000..a7cb71ad38 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java @@ -0,0 +1,135 @@ +package com.taosdata.jdbc.utils; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeSet; +import com.google.common.collect.TreeRangeSet; + +import java.nio.charset.Charset; +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class Utils { + + private static Pattern ptn = Pattern.compile(".*?'"); + + public static String escapeSingleQuota(String origin) { + Matcher m = ptn.matcher(origin); + StringBuffer sb = new StringBuffer(); + int end = 0; + while (m.find()) { + end = m.end(); + String seg = origin.substring(m.start(), end); + int len = seg.length(); + if (len == 1) { + if ('\'' == seg.charAt(0)) { + sb.append("\\'"); + } else { + sb.append(seg); + } + } else { // len > 1 + sb.append(seg.substring(0, seg.length() - 2)); + char lastcSec = seg.charAt(seg.length() - 2); + if (lastcSec == '\\') { + sb.append("\\'"); + } else { + sb.append(lastcSec); + sb.append("\\'"); + } + } + } + + if (end < origin.length()) { + sb.append(origin.substring(end)); + } + return sb.toString(); + } + + public static String getNativeSql(String rawSql, Object[] parameters) { + // toLowerCase + String preparedSql = rawSql.trim().toLowerCase(); + + String[] clause = new String[0]; + if (SqlSyntaxValidator.isInsertSql(preparedSql)) { + // insert or import + clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"}; + } + if (SqlSyntaxValidator.isSelectSql(preparedSql)) { + // select + clause = new String[]{"where\\s*.*"}; + } + Map placeholderPositions = new HashMap<>(); + RangeSet clauseRangeSet = TreeRangeSet.create(); + findPlaceholderPosition(preparedSql, placeholderPositions); + findClauseRangeSet(preparedSql, clause, clauseRangeSet); + + return transformSql(preparedSql, parameters, placeholderPositions, clauseRangeSet); + } + + private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet clauseRangeSet) { + clauseRangeSet.clear(); + for (String regex : regexArr) { + Matcher matcher = Pattern.compile(regex).matcher(preparedSql); + while (matcher.find()) { + int start = matcher.start(); + int end = matcher.end(); + clauseRangeSet.add(Range.closed(start, end)); + } + } + } + + private static void findPlaceholderPosition(String preparedSql, Map placeholderPosition) { + placeholderPosition.clear(); + Matcher matcher = Pattern.compile("\\?").matcher(preparedSql); + int index = 0; + while (matcher.find()) { + int pos = matcher.start(); + placeholderPosition.put(index, pos); + index++; + } + } + + /*** + * + * @param preparedSql + * @param paramArr + * @param placeholderPosition + * @param clauseRangeSet + * @return + */ + private static String transformSql(String preparedSql, Object[] paramArr, Map placeholderPosition, RangeSet clauseRangeSet) { + String[] sqlArr = preparedSql.split("\\?"); + return IntStream.range(0, sqlArr.length).mapToObj(index -> { + if (index == paramArr.length) + return sqlArr[index]; + + Object para = paramArr[index]; + String paraStr; + if (para != null) { + if (para instanceof byte[]) { + paraStr = new String((byte[]) para, Charset.forName("UTF-8")); + } else { + paraStr = para.toString(); + } + // if para is timestamp or String or byte[] need to translate ' character + if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) { + paraStr = Utils.escapeSingleQuota(paraStr); + + Integer pos = placeholderPosition.get(index); + boolean contains = clauseRangeSet.contains(pos); + if (contains) { + paraStr = "'" + paraStr + "'"; + } + } + } else { + paraStr = "NULL"; + } + return sqlArr[index] + paraStr; + }).collect(Collectors.joining()); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 434095efa2..dc6fd4c501 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -50,6 +50,51 @@ public class TSDBPreparedStatementTest { pstmt_insert.setNull(2, Types.INTEGER); int result = pstmt_insert.executeUpdate(); Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(3, Types.BIGINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(4, Types.FLOAT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(5, Types.DOUBLE); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(6, Types.SMALLINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(7, Types.TINYINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(8, Types.BOOLEAN); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(9, Types.BINARY); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.NCHAR); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.OTHER); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test @@ -129,7 +174,7 @@ public class TSDBPreparedStatementTest { Assert.assertFalse(pstmt_insert.execute()); } - class Person implements Serializable { + class Person { String name; int age; boolean sex; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java index 374b1335fc..c5c6f7bca5 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java @@ -160,6 +160,7 @@ public class TSDBResultSetTest { @Test public void getTime() throws SQLException { Time f1 = rs.getTime("f1"); + Assert.assertNotNull(f1); Assert.assertEquals("00:00:00", f1.toString()); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java index 9826e6ed76..6c8aed1b06 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java @@ -20,6 +20,7 @@ public class DriverAutoloadTest { final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); Assert.assertNotNull(conn); + conn.close(); } @Test @@ -27,6 +28,7 @@ public class DriverAutoloadTest { final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); Assert.assertNotNull(conn); + conn.close(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java new file mode 100644 index 0000000000..e7d3274136 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java @@ -0,0 +1,375 @@ +package com.taosdata.jdbc.cases; + +import org.junit.*; + +import java.sql.*; + +public class InsertSpecialCharacterJniTest { + + private static final String host = "127.0.0.1"; + private static Connection conn; + private static String dbName = "spec_char_test"; + private static String tbname1 = "test"; + private static String tbname2 = "weather"; + private static String special_character_str_1 = "$asd$$fsfsf$"; + private static String special_character_str_2 = "\\asdfsfsf\\\\"; + private static String special_character_str_3 = "\\\\asdfsfsf\\"; + private static String special_character_str_4 = "?asd??fsf?sf?"; + private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; + + @Test + public void testCase01() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_1.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from ?"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setString(1, tbname1); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_1, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + + @Test + public void testCase02() throws SQLException { + //TODO: + // Expected :\asdfsfsf\\ + // Actual :\asdfsfsf\ + + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_2.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + //TODO: bug to be fixed +// Assert.assertEquals(special_character_str_2, f1); + Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test(expected = SQLException.class) + public void testCase03() throws SQLException { + //TODO: + // TDengine ERROR (216): Syntax error in SQL + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_3.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_3, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase04() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_4.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_4, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase05() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_5.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_5, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase06() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setInt(1, 1); + pstmt.setString(2, special_character_str_4); + pstmt.setTimestamp(3, new Timestamp(now)); + pstmt.setBytes(4, special_character_str_4.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query t1 + final String query = "select * from t1"; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_4, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase07() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_4.getBytes()); + pstmt.setString(3, special_character_str_4); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_4, f1); + String f2 = rs.getString(3); + Assert.assertEquals(special_character_str_4, f2); + } + } + + @Test(expected = SQLException.class) + public void testCase08() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setInt(1, 1); + pstmt.setString(2, special_character_str_5); + pstmt.setTimestamp(3, new Timestamp(now)); + pstmt.setBytes(4, special_character_str_5.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + } + + @Test + public void testCase09() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + // t1 + pstmt.setString(1, dbName); + pstmt.setInt(2, 1); + pstmt.setString(3, special_character_str_5); + pstmt.setTimestamp(4, new Timestamp(now)); + pstmt.setBytes(5, special_character_str_5.getBytes()); + // t2 + pstmt.setInt(7, 2); + pstmt.setString(8, special_character_str_5); + pstmt.setTimestamp(9, new Timestamp(now)); + pstmt.setString(11, special_character_str_5); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(2, ret); + } + // query t1 + String query = "select * from t?"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setInt(1, 1); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_5, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + // query t2 + query = "select * from t2"; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + byte[] f1 = rs.getBytes(2); + Assert.assertNull(f1); + String f2 = new String(rs.getBytes(3)); + Assert.assertEquals(special_character_str_5, f2); + } + } + + @Test + public void testCase10() throws SQLException { + final long now = System.currentTimeMillis(); + + // insert + final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + // t1 + pstmt.setInt(1, 1); + pstmt.setString(2, tbname2); + pstmt.setString(3, special_character_str_5); + pstmt.setTimestamp(4, new Timestamp(now)); + pstmt.setBytes(5, special_character_str_5.getBytes()); + // t2 + pstmt.setInt(7, 2); + pstmt.setString(8, special_character_str_5); + pstmt.setTimestamp(9, new Timestamp(now)); + pstmt.setString(11, special_character_str_5); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(2, ret); + } + //query t1 + String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setString(1, dbName); + pstmt.setInt(2, 1); + pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis())); + pstmt.setTimestamp(4, new Timestamp(0)); + pstmt.setString(5, "f1"); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_5, f1); + byte[] f2 = rs.getBytes(3); + Assert.assertNull(f2); + } + // query t2 + query = "select * from t? where ts < ? and ts >= ? and ? is not null"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setInt(1, 2); + pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); + pstmt.setTimestamp(3, new Timestamp(0)); + pstmt.setString(4, "f2"); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + byte[] f1 = rs.getBytes(2); + Assert.assertNull(f1); + String f2 = new String(rs.getBytes(3)); + Assert.assertEquals(special_character_str_5, f2); + } + } + + @Test(expected = SQLException.class) + public void testCase11() throws SQLException { + final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$"; + final long now = System.currentTimeMillis(); + + final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setInt(1, 1); + pstmt.setTimestamp(2, new Timestamp(now)); + pstmt.setBytes(3, speicalCharacterStr.getBytes()); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + } + + @Before + public void before() throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop table if exists " + tbname1 + ""); + stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))"); + stmt.execute("drop table if exists " + tbname2); + stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))"); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists " + dbName); + stmt.execute("create database if not exists " + dbName); + stmt.execute("use " + dbName); + } + } + + @AfterClass + public static void afterClass() throws SQLException { + if (conn != null) + conn.close(); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java new file mode 100644 index 0000000000..64a0e97684 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java @@ -0,0 +1,376 @@ +package com.taosdata.jdbc.cases; + +import org.junit.*; + +import java.sql.*; + +public class InsertSpecialCharacterRestfulTest { + + private static final String host = "127.0.0.1"; + // private static final String host = "master"; + private static Connection conn; + private static String dbName = "spec_char_test"; + private static String tbname1 = "test"; + private static String tbname2 = "weather"; + private static String special_character_str_1 = "$asd$$fsfsf$"; + private static String special_character_str_2 = "\\asdfsfsf\\\\"; + private static String special_character_str_3 = "\\\\asdfsfsf\\"; + private static String special_character_str_4 = "?asd??fsf?sf?"; + private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; + + @Test + public void testCase01() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_1.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from ?"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setString(1, tbname1); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_1, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + + @Test + public void testCase02() throws SQLException { + //TODO: + // Expected :\asdfsfsf\\ + // Actual :\asdfsfsf\ + + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_2.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + //TODO: bug to be fixed +// Assert.assertEquals(special_character_str_2, f1); + Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test(expected = SQLException.class) + public void testCase03() throws SQLException { + //TODO: + // TDengine ERROR (216): Syntax error in SQL + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_3.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_3, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase04() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_4.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_4, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase05() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_5.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_5, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase06() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setInt(1, 1); + pstmt.setString(2, special_character_str_4); + pstmt.setTimestamp(3, new Timestamp(now)); + pstmt.setBytes(4, special_character_str_4.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query t1 + final String query = "select * from t1"; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_4, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + } + + @Test + public void testCase07() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setBytes(2, special_character_str_4.getBytes()); + pstmt.setString(3, special_character_str_4); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_4, f1); + String f2 = rs.getString(3); + Assert.assertEquals(special_character_str_4, f2); + } + } + + @Test(expected = SQLException.class) + public void testCase08() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setInt(1, 1); + pstmt.setString(2, special_character_str_5); + pstmt.setTimestamp(3, new Timestamp(now)); + pstmt.setBytes(4, special_character_str_5.getBytes()); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + } + + @Test + public void testCase09() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + // t1 + pstmt.setString(1, dbName); + pstmt.setInt(2, 1); + pstmt.setString(3, special_character_str_5); + pstmt.setTimestamp(4, new Timestamp(now)); + pstmt.setBytes(5, special_character_str_5.getBytes()); + // t2 + pstmt.setInt(7, 2); + pstmt.setString(8, special_character_str_5); + pstmt.setTimestamp(9, new Timestamp(now)); + pstmt.setString(11, special_character_str_5); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(2, ret); + } + // query t1 + String query = "select * from t?"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setInt(1, 1); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_5, f1); + String f2 = rs.getString(3); + Assert.assertNull(f2); + } + // query t2 + query = "select * from t2"; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + byte[] f1 = rs.getBytes(2); + Assert.assertNull(f1); + String f2 = new String(rs.getBytes(3)); + Assert.assertEquals(special_character_str_5, f2); + } + } + + @Test + public void testCase10() throws SQLException { + final long now = System.currentTimeMillis(); + + // insert + final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + // t1 + pstmt.setInt(1, 1); + pstmt.setString(2, tbname2); + pstmt.setString(3, special_character_str_5); + pstmt.setTimestamp(4, new Timestamp(now)); + pstmt.setBytes(5, special_character_str_5.getBytes()); + // t2 + pstmt.setInt(7, 2); + pstmt.setString(8, special_character_str_5); + pstmt.setTimestamp(9, new Timestamp(now)); + pstmt.setString(11, special_character_str_5); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(2, ret); + } + //query t1 + String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setString(1, dbName); + pstmt.setInt(2, 1); + pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis())); + pstmt.setTimestamp(4, new Timestamp(0)); + pstmt.setString(5, "f1"); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals(special_character_str_5, f1); + byte[] f2 = rs.getBytes(3); + Assert.assertNull(f2); + } + // query t2 + query = "select * from t? where ts < ? and ts >= ? and ? is not null"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setInt(1, 2); + pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); + pstmt.setTimestamp(3, new Timestamp(0)); + pstmt.setString(4, "f2"); + + ResultSet rs = pstmt.executeQuery(); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + byte[] f1 = rs.getBytes(2); + Assert.assertNull(f1); + String f2 = new String(rs.getBytes(3)); + Assert.assertEquals(special_character_str_5, f2); + } + } + + @Test(expected = SQLException.class) + public void testCase11() throws SQLException { + final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$"; + final long now = System.currentTimeMillis(); + + final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setInt(1, 1); + pstmt.setTimestamp(2, new Timestamp(now)); + pstmt.setBytes(3, speicalCharacterStr.getBytes()); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + } + + @Before + public void before() throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop table if exists " + tbname1 + ""); + stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))"); + stmt.execute("drop table if exists " + tbname2); + stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))"); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists " + dbName); + stmt.execute("create database if not exists " + dbName); + stmt.execute("use " + dbName); + } + } + + @AfterClass + public static void afterClass() throws SQLException { + if (conn != null) + conn.close(); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java new file mode 100644 index 0000000000..782125144c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class NullValueInResultSetForJdbcJniTest { + + private static final String host = "127.0.0.1"; + Connection conn; + + @Test + public void test() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + Object value = rs.getObject(i); + System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); + } + System.out.println(); + } + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)"); + stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)"); + stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)"); + stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)"); + stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)"); + stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)"); + stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)"); + stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')"); + stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java new file mode 100644 index 0000000000..c6fba81eb2 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java @@ -0,0 +1,91 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.TimestampUtil; +import org.junit.*; + +import java.sql.*; +import java.util.Properties; + +public class TD3841Test { + private static final String host = "127.0.0.1"; + private static Properties properties; + private static Connection conn_restful; + private static Connection conn_jni; + + @Test + public void testRestful() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn_restful = DriverManager.getConnection(url, properties); + + try (Statement stmt = conn_restful.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")"); + ResultSet rs = stmt.executeQuery("select * from weather"); + rs.next(); + + Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime())); + Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull()); + Assert.assertEquals(null, rs.getBytes(9)); + Assert.assertEquals(null, rs.getString(10)); + + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testJNI() throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn_jni = DriverManager.getConnection(url, properties); + + try (Statement stmt = conn_jni.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")"); + ResultSet rs = stmt.executeQuery("select * from weather"); + rs.next(); + + Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime())); + Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull()); + Assert.assertEquals(null, rs.getBytes(9)); + Assert.assertEquals(null, rs.getString(10)); + + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + } + + @AfterClass + public static void afterClass() throws SQLException { + if (conn_restful != null) + conn_restful.close(); + if (conn_jni != null) + conn_jni.close(); + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java new file mode 100644 index 0000000000..f9b111bb12 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java @@ -0,0 +1,89 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class TwoTypeTimestampPercisionInJniTest { + + private static final String host = "127.0.0.1"; + private static final String ms_timestamp_db = "ms_precision_test"; + private static final String us_timestamp_db = "us_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000 + 123; + + private static Connection conn; + + @Test + public void testCase1() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase2() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + System.out.println(timestamp); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ms_timestamp_db); + stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'"); + stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)"); + + stmt.execute("drop database if exists " + us_timestamp_db); + stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'"); + stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java new file mode 100644 index 0000000000..ed4f979ef3 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java @@ -0,0 +1,168 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class TwoTypeTimestampPercisionInRestfulTest { + + private static final String host = "127.0.0.1"; + private static final String ms_timestamp_db = "ms_precision_test"; + private static final String us_timestamp_db = "us_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000 + 123; + + private static Connection conn1; + private static Connection conn2; + private static Connection conn3; + + @Test + public void testCase1() { + try (Statement stmt = conn1.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase2() { + try (Statement stmt = conn1.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase3() { + try (Statement stmt = conn2.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + Timestamp rsTimestamp = rs.getTimestamp(1); + long ts = rsTimestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase4() { + try (Statement stmt = conn2.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase5() { + try (Statement stmt = conn3.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase6() { + try (Statement stmt = conn3.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); +// properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "TIMESTAMP"); + + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn1 = DriverManager.getConnection(url, properties); + + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=timestamp"; + conn2 = DriverManager.getConnection(url, properties); + + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=utc"; + conn3 = DriverManager.getConnection(url, properties); + + Statement stmt = conn1.createStatement(); + stmt.execute("drop database if exists " + ms_timestamp_db); + stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'"); + stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)"); + + stmt.execute("drop database if exists " + us_timestamp_db); + stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'"); + stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn1 != null) + conn1.close(); + if (conn2 != null) + conn2.close(); + if (conn3 != null) + conn3.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java index 094dff8c8d..e4dd6384f9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java @@ -6,11 +6,11 @@ import org.junit.BeforeClass; import org.junit.Test; import java.io.IOException; -import java.io.Serializable; import java.sql.*; public class RestfulPreparedStatementTest { private static final String host = "127.0.0.1"; + // private static final String host = "master"; private static Connection conn; private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; private static PreparedStatement pstmt_insert; @@ -50,6 +50,51 @@ public class RestfulPreparedStatementTest { pstmt_insert.setNull(2, Types.INTEGER); int result = pstmt_insert.executeUpdate(); Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(3, Types.BIGINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(4, Types.FLOAT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(5, Types.DOUBLE); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(6, Types.SMALLINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(7, Types.TINYINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(8, Types.BOOLEAN); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(9, Types.BINARY); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.NCHAR); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.OTHER); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test @@ -129,7 +174,7 @@ public class RestfulPreparedStatementTest { Assert.assertFalse(pstmt_insert.execute()); } - class Person implements Serializable { + private class Person { String name; int age; boolean sex; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index d6b2a58127..9bfe9a04ff 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -160,6 +160,7 @@ public class RestfulResultSetTest { @Test public void getTime() throws SQLException { Time f1 = rs.getTime("f1"); + Assert.assertNotNull(f1); Assert.assertEquals("00:00:00", f1.toString()); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java new file mode 100644 index 0000000000..c861ef2966 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java @@ -0,0 +1,24 @@ +package com.taosdata.jdbc.utils; + +import org.junit.Assert; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class UtilsTest { + + @Test + public void escapeSingleQuota() { + String s = "'''''a\\'"; + String news = Utils.escapeSingleQuota(s); + Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news); + + s = "\'''''a\\'"; + news = Utils.escapeSingleQuota(s); + Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news); + + s = "\'\'\'\''a\\'"; + news = Utils.escapeSingleQuota(s); + Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news); + } +} \ No newline at end of file From 687d3b4c33266a6fa56d7b316f8bdd8979eedc55 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 30 Apr 2021 13:45:40 +0800 Subject: [PATCH 023/140] [TD-3614] avoid tcp send failure --- src/rpc/src/rpcMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index db3c72c2fc..2ec508f050 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } } else { - pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30); + pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 20); if ( pRpc->pCache == NULL ) { tError("%s failed to init connection cache", pRpc->label); rpcClose(pRpc); From 52b739abe0d1ae394b4d471766caf754f68bb754 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 30 Apr 2021 14:09:34 +0800 Subject: [PATCH 024/140] [TD-3747]enhance performance on win --- src/client/src/tscParseInsert.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 1b8cfdb329..a53dcc05fa 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -705,19 +705,11 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock } code = TSDB_CODE_TSC_INVALID_SQL; - char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \" - if (NULL == tmpTokenBuf) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \" int32_t numOfRows = 0; code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf); - free(tmpTokenBuf); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) { SParamInfo *param = dataBuf->params + i; if (param->idx == -1) { From 460768f067ffc3708d0f088c9541aa9df26f9075 Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Fri, 30 Apr 2021 14:20:12 +0800 Subject: [PATCH 025/140] [TD-4032]Eliminate false alarms (#5978) * [TD-4032]Eliminate false alarms * update rules --- tests/pytest/crash_gen/valgrind_taos.supp | 165 ++++++++++++++++++++++ 1 file changed, 165 insertions(+) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 5eb5403395..a00b2d830c 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17332,3 +17332,168 @@ fun:PyVectorcall_Call fun:_PyEval_EvalFrameDefault } +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_and_cache_attr + fun:lib_getattr + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:PyEval_EvalCode + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_and_cache_attr + fun:lib_getattr + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:PyEval_EvalCode + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_my_Py_InitModule + fun:lib_getattr + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:_cffi_init + fun:PyInit__bcrypt + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_PyObject_GC_New + fun:lib_getattr + fun:ffi_internal_new + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:_cffi_init + fun:PyInit__bcrypt + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_cpython_func.isra.87 + fun:lib_build_and_cache_attr + fun:lib_getattr + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_and_cache_attr + fun:lib_getattr + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_my_Py_InitModule + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:_cffi_init + fun:PyInit__bcrypt + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_my_Py_InitModule + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__openssl + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_PyObject_GC_New + fun:ffi_internal_new + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:_cffi_init + fun:PyInit__bcrypt + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call +} \ No newline at end of file From 4a6a4f9e71c46e7ab38376d59a947502afdb16b7 Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 30 Apr 2021 16:23:43 +0800 Subject: [PATCH 026/140] [TD-4000] verify teature of TD-3948 --- tests/pytest/fulltest.sh | 1 + .../insert/insertDynamicColBeforeVal.py | 136 ++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 tests/pytest/insert/insertDynamicColBeforeVal.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 3a6e0d5ba2..54eef46628 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -23,6 +23,7 @@ python3 ./test.py -f insert/insertIntoTwoTables.py python3 ./test.py -f insert/before_1970.py python3 bug2265.py python3 ./test.py -f insert/bug3654.py +python3 ./test.py -f insert/insertDynamicColBeforeVal.py #table python3 ./test.py -f table/alter_wal0.py diff --git a/tests/pytest/insert/insertDynamicColBeforeVal.py b/tests/pytest/insert/insertDynamicColBeforeVal.py new file mode 100644 index 0000000000..4b596049d1 --- /dev/null +++ b/tests/pytest/insert/insertDynamicColBeforeVal.py @@ -0,0 +1,136 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table") + tdSql.execute( + "create table stb1 (ts timestamp, c11 int, c12 float ) TAGS(t11 int, t12 int )" + ) + + tdLog.printNoPrefix("==========step2:insert data with new syntax") + tdSql.execute( + "insert into t1 using stb1(t11, t12) tags(11, 12) (ts, c11, c12) values (now, 10, 20)" + ) + + # case for tag-value + tdSql.execute( + "insert into t2 using stb1(t11) tags(21) (ts, c11, c12) values (now-1m, 11, 21)" + ) + tdSql.execute( + "insert into t3 using stb1 tags(31, 32) (ts, c11, c12) values (now-2m, 12, 22)" + ) + tdSql.error( + "insert into t4 using stb1(t11, t12) (ts, c11, c12) values (now-3m, 13, 23)" + ) + tdSql.error( + "insert into t5 using stb1(t11, t12) tags() (ts, c11, c12) values (now-4m, 14, 24)" + ) + tdSql.error( + "insert into t6 using stb1(t11, t12) tags(41) (ts, c11, c12) values (now-5m, 15, 25)" + ) + tdSql.error( + "insert into t7 using stb1(t12) tags(51, 52) (ts, c11, c12) values (now-6m, 16, 26)" + ) + tdSql.execute( + "insert into t8 using stb1(t11, t12) tags('61', 62) (ts, c11, c12) values (now-7m, 17, 27)" + ) + + + # case for col-value + tdSql.execute( + "insert into t9 using stb1(t11, t12) tags(71, 72) values (now-8m, 18, 28)" + ) + tdSql.error( + "insert into t10 using stb1(t11, t12) tags(81, 82) (ts, c11, c12) values ()" + ) + tdSql.error( + "insert into t11 using stb1(t11, t12) tags(91, 92) (ts, c11, c12) " + ) + tdSql.error( + "insert into t12 using stb1(t11, t12) tags(101, 102) values (now-9m, 19)" + ) + tdSql.error( + "insert into t13 using stb1(t11, t12) tags(111, 112) (ts, c11) values (now-10m, 110, 210)" + ) + tdSql.error( + "insert into t14 using stb1(t11, t12) tags(121, 122) (ts, c11, c12) values (now-11m, 111)" + ) + tdSql.execute( + "insert into t15 using stb1(t11, t12) tags(131, 132) (ts, c11, c12) values (now-12m, NULL , 212)" + ) + tdSql.execute( + "insert into t16 using stb1(t11, t12) tags(141, 142) (ts, c11, c12) values (now-13m, 'NULL', 213)" + ) + tdSql.error( + "insert into t17 using stb1(t11, t12) tags(151, 152) (ts, c11, c12) values (now-14m, Nan, 214)" + ) + tdSql.error( + "insert into t18 using stb1(t11, t12) tags(161, 162) (ts, c11, c12) values (now-15m, 'NaN', 215)" + ) + tdSql.execute( + "insert into t19 using stb1(t11, t12) tags(171, 172) (ts, c11) values (now-16m, 216)" + ) + tdSql.error( + "insert into t20 using stb1(t11, t12) tags(181, 182) (c11, c12) values (117, 217)" + ) + + # multi-col_value + tdSql.execute( + "insert into t21 using stb1(t11, t12) tags(191, 192) (ts, c11, c12) values (now-17m, 118, 218)(now-18m, 119, 219)" + ) + tdSql.execute( + "insert into t22 using stb1(t11, t12) tags(201, 202) values (now-19m, 120, 220)(now-19m, 121, 221)" + ) + tdSql.error( + "insert into t23 using stb1(t11, t12) tags(211, 212) values (now-20m, 122, 222) (ts, c11, c12) values (now-21m, 123, 223)" + ) + tdSql.error( + "insert into t24 using stb1(t11, t12) tags(221, 222) (ts, c11, c12) values (now-22m, 124, 224) (ts, c11, c12) values (now-23m, 125, 225)" + ) + tdSql.execute( + "insert into t25 (ts, c11, c12) using stb1(t11, t12) tags(231, 232) values (now-24m, 126, 226)(now-25m, 127, 227)" + ) + tdSql.error( + "insert into t26 (ts, c11, c12) values (now-24m, 128, 228)(now-25m, 129, 229) using stb1(t11, t12) tags(241, 242) " + ) + tdSql.error( + "insert into t27 (ts, c11, c12) values (now-24m, 130, 230) using stb1(t11, t12) tags(251, 252) " + ) + + tdSql.query("show tables") + tdSql.checkRows(21) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 2f069d9068a9e70d9ffd89ff36fa274ca6fe8ff1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 30 Apr 2021 16:42:29 +0800 Subject: [PATCH 027/140] Hotfix/sangshuduo/td 3968 taosdemo datalen 16k (#5971) * [TD-3968]: taosdemo data length should be 16*1024 * [TD-3968]: taosdemo datalen should be 16*1024 fix data buffer issue. * [TD-3968]: taosdemo datalen should be 16*1024 commend out unused func for debugging purpose in the future * [TD-3968]: taosdemo datalen should be 16*1024 comment off unused function * [TD-3968]: taosdemo datalen more than 16k. comment off unused array. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 100 +++++++++++++++++++++--------------- 1 file changed, 59 insertions(+), 41 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 12684c63bc..fe0d9a0396 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -81,7 +81,7 @@ enum QUERY_MODE { #define MAX_DB_NAME_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE (16*1024) +#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space #define MAX_NUM_DATATYPE 10 #define OPT_ABORT 1 /* –abort */ #define STRING_LEN 60000 @@ -1191,13 +1191,31 @@ static float rand_float(){ return randfloat[cursor]; } +#if 0 +static const char charNum[] = "0123456789"; + +static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose +static void nonrand_string(char *str, int size) +{ + str[0] = 0; + if (size > 0) { + int n; + for (n = 0; n < size; n++) { + str[n] = charNum[n % 10]; + } + str[n] = 0; + } +} +#endif + static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; + static void rand_string(char *str, int size) { str[0] = 0; if (size > 0) { //--size; int n; - for (n = 0; n < size - 1; n++) { + for (n = 0; n < size; n++) { int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); str[n] = charset[key]; } @@ -4438,11 +4456,11 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) - || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { + if ((0 == strncasecmp(stbInfo->columns[i].dataType, "BINARY", strlen("BINARY"))) + || (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", strlen("NCHAR")))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); @@ -4455,47 +4473,47 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb return -1; } rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "int", 3)) { + "INT", 3)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_int()); + "%d,", rand_int()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "bigint", 6)) { + "BIGINT", 6)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64",", rand_bigint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "float", 5)) { + "FLOAT", 5)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%f, ", rand_float()); + "%f,", rand_float()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "double", 6)) { + "DOUBLE", 6)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%f, ", rand_double()); + "%f,", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "smallint", 8)) { + "SMALLINT", 8)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_smallint()); + "%d,", rand_smallint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "tinyint", strlen("tinyint"))) { + "TINYINT", strlen("TINYINT"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_tinyint()); + "%d,", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "bool", strlen("bool"))) { + "BOOL", strlen("BOOL"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_bool()); + "%d,", rand_bool()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "timestamp", strlen("timestamp"))) { + "TIMESTAMP", strlen("TIMESTAMP"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64",", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); return -1; } } - dataLen -= 2; + dataLen -= 1; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); @@ -4522,31 +4540,31 @@ static int64_t generateData(char *recBuf, char **data_type, } for (int i = 0; i < c; i++) { - if (strcasecmp(data_type[i % c], "tinyint") == 0) { - pstr += sprintf(pstr, ", %d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % c], "smallint") == 0) { - pstr += sprintf(pstr, ", %d", rand_smallint()); - } else if (strcasecmp(data_type[i % c], "int") == 0) { - pstr += sprintf(pstr, ", %d", rand_int()); - } else if (strcasecmp(data_type[i % c], "bigint") == 0) { - pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); - } else if (strcasecmp(data_type[i % c], "float") == 0) { - pstr += sprintf(pstr, ", %10.4f", rand_float()); - } else if (strcasecmp(data_type[i % c], "double") == 0) { + if (strcasecmp(data_type[i % c], "TINYINT") == 0) { + pstr += sprintf(pstr, ",%d", rand_tinyint() ); + } else if (strcasecmp(data_type[i % c], "SMALLINT") == 0) { + pstr += sprintf(pstr, ",%d", rand_smallint()); + } else if (strcasecmp(data_type[i % c], "INT") == 0) { + pstr += sprintf(pstr, ",%d", rand_int()); + } else if (strcasecmp(data_type[i % c], "BIGINT") == 0) { + pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + } else if (strcasecmp(data_type[i % c], "FLOAT") == 0) { + pstr += sprintf(pstr, ",%10.4f", rand_float()); + } else if (strcasecmp(data_type[i % c], "DOUBLE") == 0) { double t = rand_double(); - pstr += sprintf(pstr, ", %20.8f", t); - } else if (strcasecmp(data_type[i % c], "bool") == 0) { + pstr += sprintf(pstr, ",%20.8f", t); + } else if (strcasecmp(data_type[i % c], "BOOL") == 0) { bool b = taosRandom() & 1; - pstr += sprintf(pstr, ", %s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % c], "binary") == 0) { + pstr += sprintf(pstr, ",%s", b ? "true" : "false"); + } else if (strcasecmp(data_type[i % c], "BINARY") == 0) { char *s = malloc(lenOfBinary); rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ", \"%s\"", s); + pstr += sprintf(pstr, ",\"%s\"", s); free(s); - } else if (strcasecmp(data_type[i % c], "nchar") == 0) { + } else if (strcasecmp(data_type[i % c], "NCHAR") == 0) { char *s = malloc(lenOfBinary); rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ", \"%s\"", s); + pstr += sprintf(pstr, ",\"%s\"", s); free(s); } From d78ac9848c3efd37107b886bd1f6e1780b805d86 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 30 Apr 2021 17:12:20 +0800 Subject: [PATCH 028/140] [td-3967]1) refactor. --- src/client/src/tscParseInsert.c | 12 +++++++++--- .../script/general/parser/binary_escapeCharacter.sim | 9 +++++++++ tests/script/general/parser/testSuite.sim | 1 + 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 298f1245e7..9130118f80 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -462,13 +462,19 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1 // Remove quotation marks if (TK_STRING == sToken.type) { // delete escape character: \\, \', \" + char delim = sToken.z[0]; + int32_t cnt = 0; int32_t j = 0; for (uint32_t k = 1; k < sToken.n - 1; ++k) { - if (sToken.z[k] == '\\') { - cnt++; + if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { + if (sToken.z[k] == '\\') { + tmpTokenBuf[j] = GET_ESCAPE_CHAR(sToken.z[k+1]); + } else { + tmpTokenBuf[j] = sToken.z[k + 1]; + } - tmpTokenBuf[j] = GET_ESCAPE_CHAR(sToken.z[k+1]); + cnt++; j++; k++; continue; diff --git a/tests/script/general/parser/binary_escapeCharacter.sim b/tests/script/general/parser/binary_escapeCharacter.sim index f0589d154f..1c6bc5b37f 100644 --- a/tests/script/general/parser/binary_escapeCharacter.sim +++ b/tests/script/general/parser/binary_escapeCharacter.sim @@ -93,5 +93,14 @@ if $data41 != @udp005@ then print "[ERROR] expect: udp005, act:$data41" endi +print ---------------------> TD-3967 +sql insert into tb values(now, '\\abc\\\\'); +sql insert into tb values(now, '\\abc\\\\'); +sql insert into tb values(now, '\\\\'); + +print ------------->sim bug +# sql_error insert into tb values(now, '\\\'); +# sql_error insert into tb values(now, '\\'); +# sql_error insert into tb values(now, '\\n'); system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index f05474d158..b605a04df7 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -55,4 +55,5 @@ run general/parser/sliding.sim run general/parser/function.sim run general/parser/stableOp.sim run general/parser/slimit_alter_tags.sim +run general/parser/binary_escapeCharacter.sim From b0ab4573bba52b91df50edde52819ecb7e6c1468 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 30 Apr 2021 17:14:57 +0800 Subject: [PATCH 029/140] [td-3967]update the sim. --- tests/script/general/parser/binary_escapeCharacter.sim | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/script/general/parser/binary_escapeCharacter.sim b/tests/script/general/parser/binary_escapeCharacter.sim index 1c6bc5b37f..b5bb10284b 100644 --- a/tests/script/general/parser/binary_escapeCharacter.sim +++ b/tests/script/general/parser/binary_escapeCharacter.sim @@ -100,7 +100,8 @@ sql insert into tb values(now, '\\\\'); print ------------->sim bug # sql_error insert into tb values(now, '\\\'); -# sql_error insert into tb values(now, '\\'); -# sql_error insert into tb values(now, '\\n'); +sql_error insert into tb values(now, '\'); +#sql_error insert into tb values(now, '\\\n'); +sql insert into tb values(now, '\n'); system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From 5d493f11b3ca9dfdbf6c543d6d7fba4cca917da8 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 30 Apr 2021 17:32:57 +0800 Subject: [PATCH 030/140] Hotfix/sangshuduo/td 3851 taosdemo performance boost (#5968) * [TD-3851]: taosdemo performance boost. * change few default values. * [TD-3851]: taosdemo performance boost. change max sql len default value to 1024*1024 Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index fe0d9a0396..f35c0facfa 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -569,7 +569,7 @@ SArguments g_args = { 1, // query_times 0, // interlace_rows; 30000, // num_of_RPR - 1024000, // max_sql_len + (1024*1024), // max_sql_len 10000, // num_of_tables 10000, // num_of_DPT 0, // abort @@ -666,11 +666,11 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-q", indent, "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); printf("%s%s%s%s\n", indent, "-b", indent, - "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP."); + "The data_type of columns, default: INT,INT,INT,INT."); printf("%s%s%s%s\n", indent, "-w", indent, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); printf("%s%s%s%s\n", indent, "-l", indent, - "The number of columns per record. Default is 10."); + "The number of columns per record. Default is 4."); printf("%s%s%s%s\n", indent, "-T", indent, "The number of threads. Default is 10."); printf("%s%s%s%s\n", indent, "-i", indent, @@ -3334,7 +3334,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (threads2 && threads2->type == cJSON_Number) { g_Dbs.threadCountByCreateTbl = threads2->valueint; } else if (!threads2) { - g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; + g_Dbs.threadCountByCreateTbl = 1; } else { errorPrint("%s() LN%d, failed to read json, threads2 not found\n", __func__, __LINE__); @@ -3378,7 +3378,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (maxSqlLen && maxSqlLen->type == cJSON_Number) { g_args.max_sql_len = maxSqlLen->valueint; } else if (!maxSqlLen) { - g_args.max_sql_len = 1024000; + g_args.max_sql_len = (1024*1024); } else { errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); From a7e41f2c8677c82dc9a560ce0d8ea87bdd93ffde Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Fri, 30 Apr 2021 18:05:29 +0800 Subject: [PATCH 031/140] Hotfix/td 3986 (#5982) * change * [TD-3986]: modify the subscribe test cases --- .../jdbc/rs/RestfulPreparedStatement.java | 11 --- .../java/com/taosdata/jdbc/utils/Utils.java | 8 +- .../java/com/taosdata/jdbc/SubscribeTest.java | 85 +++++++++---------- .../cases/InsertSpecialCharacterJniTest.java | 25 ++++++ .../InsertSpecialCharacterRestfulTest.java | 25 ++++++ 5 files changed, 92 insertions(+), 62 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java index f846a1162e..f58e3f8cd2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java @@ -1,26 +1,15 @@ package com.taosdata.jdbc.rs; -import com.google.common.collect.Range; -import com.google.common.collect.RangeSet; -import com.google.common.collect.TreeRangeSet; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; -import com.taosdata.jdbc.utils.SqlSyntaxValidator; import com.taosdata.jdbc.utils.Utils; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; -import java.nio.charset.Charset; import java.sql.*; import java.util.Calendar; -import java.util.HashMap; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.IntStream; public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java index a7cb71ad38..0ce4bd3dc1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java @@ -67,7 +67,7 @@ public class Utils { findPlaceholderPosition(preparedSql, placeholderPositions); findClauseRangeSet(preparedSql, clause, clauseRangeSet); - return transformSql(preparedSql, parameters, placeholderPositions, clauseRangeSet); + return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet); } private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet clauseRangeSet) { @@ -95,14 +95,14 @@ public class Utils { /*** * - * @param preparedSql + * @param rawSql * @param paramArr * @param placeholderPosition * @param clauseRangeSet * @return */ - private static String transformSql(String preparedSql, Object[] paramArr, Map placeholderPosition, RangeSet clauseRangeSet) { - String[] sqlArr = preparedSql.split("\\?"); + private static String transformSql(String rawSql, Object[] paramArr, Map placeholderPosition, RangeSet clauseRangeSet) { + String[] sqlArr = rawSql.split("\\?"); return IntStream.range(0, sqlArr.length).mapToObj(index -> { if (index == paramArr.length) return sqlArr[index]; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 11c3de3052..3a223ed981 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -12,6 +12,7 @@ import java.util.Properties; import java.util.concurrent.TimeUnit; public class SubscribeTest { + Connection connection; Statement statement; String dbName = "test"; @@ -19,62 +20,53 @@ public class SubscribeTest { String host = "127.0.0.1"; String topic = "test"; - @Before - public void createDatabase() { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - - statement = connection.createStatement(); - statement.execute("drop database if exists " + dbName); - statement.execute("create database if not exists " + dbName); - statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); - long ts = System.currentTimeMillis(); - for (int i = 0; i < 2; i++) { - ts += i; - String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")"; - statement.executeUpdate(sql); - } - - } catch (ClassNotFoundException | SQLException e) { - return; - } - } - @Test public void subscribe() { try { String rawSql = "select * from " + dbName + "." + tName + ";"; - System.out.println(rawSql); -// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); + TSDBConnection conn = connection.unwrap(TSDBConnection.class); + TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false); -// int a = 0; -// while (true) { -// TimeUnit.MILLISECONDS.sleep(1000); -// TSDBResultSet resSet = subscribe.consume(); -// while (resSet.next()) { -// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { -// System.out.printf(i + ": " + resSet.getString(i) + "\t"); -// } -// System.out.println("\n======" + a + "=========="); -// } -// a++; -// if (a >= 2) { -// break; -// } -// resSet.close(); -// } -// -// subscribe.close(true); + int a = 0; + while (true) { + TimeUnit.MILLISECONDS.sleep(1000); + TSDBResultSet resSet = subscribe.consume(); + while (resSet.next()) { + for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { + System.out.printf(i + ": " + resSet.getString(i) + "\t"); + } + System.out.println("\n======" + a + "=========="); + } + a++; + if (a >= 2) { + break; + } + resSet.close(); + } + + subscribe.close(true); } catch (Exception e) { e.printStackTrace(); } } + @Before + public void createDatabase() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + + statement = connection.createStatement(); + statement.execute("drop database if exists " + dbName); + statement.execute("create database if not exists " + dbName); + statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); + long ts = System.currentTimeMillis(); + statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)"); + statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)"); + } + @After public void close() { try { @@ -86,6 +78,5 @@ public class SubscribeTest { } catch (SQLException e) { e.printStackTrace(); } - } } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java index e7d3274136..9014e82a9e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java @@ -345,6 +345,31 @@ public class InsertSpecialCharacterJniTest { } } + @Test + public void testCase12() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setString(2, special_character_str_4); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals("HelloTDengine", f1); + String f2 = rs.getString(3); + Assert.assertEquals(special_character_str_4, f2); + } + } + @Before public void before() throws SQLException { try (Statement stmt = conn.createStatement()) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java index 64a0e97684..0cbbe76716 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java @@ -346,6 +346,31 @@ public class InsertSpecialCharacterRestfulTest { } } + @Test + public void testCase12() throws SQLException { + final long now = System.currentTimeMillis(); + // insert + final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, new Timestamp(now)); + pstmt.setString(2, special_character_str_4); + int ret = pstmt.executeUpdate(); + Assert.assertEquals(1, ret); + } + // query + final String query = "select * from " + tbname1; + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery(query); + rs.next(); + long timestamp = rs.getTimestamp(1).getTime(); + Assert.assertEquals(now, timestamp); + String f1 = new String(rs.getBytes(2)); + Assert.assertEquals("HelloTDengine", f1); + String f2 = rs.getString(3); + Assert.assertEquals(special_character_str_4, f2); + } + } + @Before public void before() throws SQLException { try (Statement stmt = conn.createStatement()) { From b12668ff7fbb697643d58515fe26e24a7abde675 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 30 Apr 2021 22:45:59 +0800 Subject: [PATCH 032/140] [TD-3639] : compare timestamp with datetime in python (#5990) By this commit, It will close TD-3639. It will not require test case to write *correct* datetime string to pass a test case. One test could use seconds, milliseconds or microseconds to check data. That means the three tests are equivalent: ```python .checkData(0, 0, "2020-01-01 00:00:00") .checkData(0, 0, "2020-01-01 00:00:00.000") .checkData(0, 0, "2020-01-01 00:00:00.000000") ``` Co-authored-by: Huo Linhe --- tests/pytest/util/sql.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index ba9cb4d53d..8f62c5932b 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -136,6 +136,11 @@ class TDSql: def checkData(self, row, col, data): self.checkRowCol(row, col) if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP") and self.queryResult[row][col] == datetime.datetime.fromisoformat(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (self.sql, row, col, self.queryResult[row][col], data)) + return + if str(self.queryResult[row][col]) == str(data): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) From 29e9ff8ba27c7f82236673e048d041b5aa3f94de Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 1 May 2021 19:50:53 +0800 Subject: [PATCH 033/140] [TD-4054]: taosdump wrong column note length. (#5996) * [TD-4054]: taosdump wrong column note length. * [TD-4054]: taosdump wrong column note length. reserve 2 bytes for NCHAR. --- src/kit/taosdump/taosdump.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 96a1cd16f8..0dca591b04 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -99,11 +99,13 @@ enum _describe_table_index { TSDB_MAX_DESCRIBE_METRIC }; +#define COL_NOTE_LEN 128 + typedef struct { char field[TSDB_COL_NAME_LEN + 1]; char type[16]; int length; - char note[128]; + char note[COL_NOTE_LEN]; } SColDes; typedef struct { @@ -1188,16 +1190,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo case TSDB_DATA_TYPE_BINARY: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); tableDes->cols[i].note[0] = '\''; - char tbuf[COMMAND_SIZE]; - converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); + char tbuf[COL_NOTE_LEN]; + converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); *(pstr++) = '\''; break; } case TSDB_DATA_TYPE_NCHAR: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); - char tbuf[COMMAND_SIZE]; - convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); + char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' + convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); break; } From 29b0751a34c5bda41c777ae69a849b25b0ce82da Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 1 May 2021 22:21:23 +0800 Subject: [PATCH 034/140] [TD-4035]: taosdemo more checking for prevent value out of scope. (#5992) * [TD-4035]: taosdemo more checking for prevent value out of scope. * [TD-4035]: taosdemo more checking for prevent value out of scope. change min rows and max rows to unsigned int and 0 means default. * [TD-4035]: taosdemo more checking for prevent value out of scope. change offset=0 to get all childtable count Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 474 ++++++++++++++++++++---------------- 1 file changed, 270 insertions(+), 204 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f35c0facfa..eb0a2d3822 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -188,7 +188,7 @@ typedef struct { /* Used by main to communicate with parse_opt. */ typedef struct SArguments_S { char * metaFile; - int test_mode; + uint32_t test_mode; char * host; uint16_t port; char * user; @@ -205,31 +205,31 @@ typedef struct SArguments_S { bool verbose_print; bool performance_print; char * output_file; - int query_mode; + uint32_t query_mode; char * datatype[MAX_NUM_DATATYPE + 1]; - int len_of_binary; - int num_of_CPR; - int num_of_threads; - int64_t insert_interval; + uint32_t len_of_binary; + uint32_t num_of_CPR; + uint32_t num_of_threads; + uint64_t insert_interval; int64_t query_times; - int64_t interlace_rows; - int64_t num_of_RPR; // num_of_records_per_req - int64_t max_sql_len; - int64_t num_of_tables; - int64_t num_of_DPT; + uint64_t interlace_rows; + uint64_t num_of_RPR; // num_of_records_per_req + uint64_t max_sql_len; + uint64_t num_of_tables; + uint64_t num_of_DPT; int abort; int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int method_of_delete; + uint32_t method_of_delete; char ** arg_list; - int64_t totalInsertRows; - int64_t totalAffectedRows; + uint64_t totalInsertRows; + uint64_t totalAffectedRows; } SArguments; typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN + 1]; - char dataType[MAX_TB_NAME_SIZE]; - int dataLen; + char field[TSDB_COL_NAME_LEN + 1]; + char dataType[MAX_TB_NAME_SIZE]; + uint32_t dataLen; char note[128]; } StrColumn; @@ -237,50 +237,50 @@ typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; int64_t childTblCount; bool childTblExists; // 0: no, 1: yes - int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql - int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table + uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest int64_t childTblLimit; - int64_t childTblOffset; + uint64_t childTblOffset; // int multiThreadWriteOneTbl; // 0: no, 1: yes - int64_t interlaceRows; // + uint64_t interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int64_t maxSqlLen; // + uint64_t maxSqlLen; // - int64_t insertInterval; // insert interval, will override global insert interval - int64_t insertRows; + uint64_t insertInterval; // insert interval, will override global insert interval + uint64_t insertRows; int64_t timeStampStep; char startTimestamp[MAX_TB_NAME_SIZE]; char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFile[MAX_FILE_NAME_LEN+1]; char tagsFile[MAX_FILE_NAME_LEN+1]; - int columnCount; + uint32_t columnCount; StrColumn columns[MAX_COLUMN_COUNT]; - int tagCount; + uint32_t tagCount; StrColumn tags[MAX_TAG_COUNT]; char* childTblName; char* colsOfCreateChildTable; - int64_t lenOfOneRow; - int64_t lenOfTagOfOneRow; + uint64_t lenOfOneRow; + uint64_t lenOfTagOfOneRow; char* sampleDataBuf; //int sampleRowCount; //int sampleUsePos; - int tagSource; // 0: rand, 1: tag sample + uint32_t tagSource; // 0: rand, 1: tag sample char* tagDataBuf; - int tagSampleCount; - int tagUsePos; + uint32_t tagSampleCount; + uint32_t tagUsePos; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + uint64_t totalInsertRows; + uint64_t totalAffectedRows; } SSuperTable; typedef struct { @@ -307,8 +307,8 @@ typedef struct { typedef struct SDbCfg_S { // int maxtablesPerVnode; - int minRows; - int maxRows; + uint32_t minRows; // 0 means default + uint32_t maxRows; // 0 means default int comp; int walLevel; int cacheLast; @@ -327,7 +327,7 @@ typedef struct SDataBase_S { char dbName[MAX_DB_NAME_SIZE]; bool drop; // 0: use exists, 1: if exists, drop then new create SDbCfg dbCfg; - int64_t superTblCount; + uint64_t superTblCount; SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; } SDataBase; @@ -345,57 +345,57 @@ typedef struct SDbs_S { bool do_aggreFunc; bool queryMode; - int threadCount; - int threadCountByCreateTbl; - int dbCount; + uint32_t threadCount; + uint32_t threadCountByCreateTbl; + uint32_t dbCount; SDataBase db[MAX_DB_COUNT]; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + uint64_t totalInsertRows; + uint64_t totalAffectedRows; } SDbs; typedef struct SpecifiedQueryInfo_S { - int64_t queryInterval; // 0: unlimit > 0 loop/s - int64_t concurrent; - int64_t sqlCount; - int mode; // 0: sync, 1: async - int64_t subscribeInterval; // ms - int64_t queryTimes; + uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint64_t concurrent; + uint64_t sqlCount; + uint32_t mode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + uint64_t queryTimes; int subscribeRestart; int subscribeKeepProgress; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - int64_t totalQueried; + uint64_t totalQueried; } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; - int64_t queryInterval; // 0: unlimit > 0 loop/s - int threadCnt; - int mode; // 0: sync, 1: async - int64_t subscribeInterval; // ms + uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint32_t threadCnt; + uint32_t mode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; - int64_t queryTimes; - int64_t childTblCount; + uint64_t queryTimes; + uint64_t childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; - int64_t sqlCount; + uint64_t sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; - int64_t totalQueried; + uint64_t totalQueried; } SuperQueryInfo; typedef struct SQueryMetaInfo_S { char cfgDir[MAX_FILE_NAME_LEN+1]; char host[MAX_HOSTNAME_SIZE]; uint16_t port; - struct sockaddr_in serv_addr; + struct sockaddr_in serv_addr; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; char dbName[MAX_DB_NAME_SIZE+1]; @@ -403,47 +403,47 @@ typedef struct SQueryMetaInfo_S { SpecifiedQueryInfo specifiedQueryInfo; SuperQueryInfo superQueryInfo; - int64_t totalQueried; + uint64_t totalQueried; } SQueryMetaInfo; typedef struct SThreadInfo_S { - TAOS *taos; - int threadID; - char db_name[MAX_DB_NAME_SIZE+1]; - uint32_t time_precision; - char fp[4096]; - char tb_prefix[MAX_TB_NAME_SIZE]; - int64_t start_table_from; - int64_t end_table_to; - int64_t ntables; - int64_t data_of_rate; - int64_t start_time; - char* cols; - bool use_metric; + TAOS * taos; + int threadID; + char db_name[MAX_DB_NAME_SIZE+1]; + uint32_t time_precision; + char fp[4096]; + char tb_prefix[MAX_TB_NAME_SIZE]; + uint64_t start_table_from; + uint64_t end_table_to; + uint64_t ntables; + uint64_t data_of_rate; + int64_t start_time; + char* cols; + bool use_metric; SSuperTable* superTblInfo; // for async insert - tsem_t lock_sem; - int64_t counter; + tsem_t lock_sem; + int64_t counter; uint64_t st; uint64_t et; - int64_t lastTs; + uint64_t lastTs; // sample data - int64_t samplePos; + int64_t samplePos; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + uint64_t totalInsertRows; + uint64_t totalAffectedRows; // insert delay statistics - int64_t cntDelay; - int64_t totalDelay; - int64_t avgDelay; - int64_t maxDelay; - int64_t minDelay; + uint64_t cntDelay; + uint64_t totalDelay; + uint64_t avgDelay; + uint64_t maxDelay; + uint64_t minDelay; // query - int64_t querySeq; // sequence number of sql command + uint64_t querySeq; // sequence number of sql command } threadInfo; #ifdef WINDOWS @@ -1004,17 +1004,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { break; printf("\n"); } - printf("# Insertion interval: %"PRId64"\n", + printf("# Insertion interval: %"PRIu64"\n", arguments->insert_interval); - printf("# Number of records per req: %"PRId64"\n", + printf("# Number of records per req: %"PRIu64"\n", arguments->num_of_RPR); - printf("# Max SQL length: %"PRId64"\n", + printf("# Max SQL length: %"PRIu64"\n", arguments->max_sql_len); printf("# Length of Binary: %d\n", arguments->len_of_binary); printf("# Number of Threads: %d\n", arguments->num_of_threads); - printf("# Number of Tables: %"PRId64"\n", + printf("# Number of Tables: %"PRIu64"\n", arguments->num_of_tables); - printf("# Number of Data per Table: %"PRId64"\n", + printf("# Number of Data per Table: %"PRIu64"\n", arguments->num_of_DPT); printf("# Database name: %s\n", arguments->database); printf("# Table prefix: %s\n", arguments->tb_prefix); @@ -1270,11 +1270,11 @@ static int printfInsertMeta() { printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - printf("top insert interval: \033[33m%"PRId64"\033[0m\n", + printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", g_args.insert_interval); - printf("number of records per req: \033[33m%"PRId64"\033[0m\n", + printf("number of records per req: \033[33m%"PRIu64"\033[0m\n", g_args.num_of_RPR); - printf("max sql length: \033[33m%"PRId64"\033[0m\n", + printf("max sql length: \033[33m%"PRIu64"\033[0m\n", g_args.max_sql_len); printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); @@ -1336,10 +1336,10 @@ static int printfInsertMeta() { } } - printf(" super table count: \033[33m%"PRId64"\033[0m\n", + printf(" super table count: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTblCount); - for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j); + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); @@ -1360,7 +1360,7 @@ static int printfInsertMeta() { printf(" childTblExists: \033[33m%s\033[0m\n", "error"); } - printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", + printf(" childTblCount: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); @@ -1373,10 +1373,10 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].childTblLimit); } if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) { - printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n", + printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblOffset); } - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", + printf(" insertRows: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); /* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { @@ -1385,11 +1385,11 @@ static int printfInsertMeta() { printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); } */ - printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n", + printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n", + printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertInterval); } @@ -1397,7 +1397,7 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].disorderRange); printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n", + printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); @@ -1463,8 +1463,8 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR); - fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len); + fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR); + fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -1521,7 +1521,7 @@ static void printfInsertMetaToFile(FILE* fp) { } } - fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount); + fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { fprintf(fp, " super table[%d]:\n", j); @@ -1543,7 +1543,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " childTblExists: %s\n", "error"); } - fprintf(fp, " childTblCount: %"PRId64"\n", + fprintf(fp, " childTblCount: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].childTblCount); fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); @@ -1551,12 +1551,12 @@ static void printfInsertMetaToFile(FILE* fp) { g_Dbs.db[i].superTbls[j].dataSource); fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode); - fprintf(fp, " insertRows: %"PRId64"\n", + fprintf(fp, " insertRows: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %"PRId64"\n", + fprintf(fp, " interlace rows: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %"PRId64"\n", + fprintf(fp, " stable insert interval: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].insertInterval); } /* @@ -1566,11 +1566,11 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " multiThreadWriteOneTbl: yes\n"); } */ - fprintf(fp, " interlaceRows: %"PRId64"\n", + fprintf(fp, " interlaceRows: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %"PRId64"\n", + fprintf(fp, " maxSqlLen: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].maxSqlLen); fprintf(fp, " timeStampStep: %"PRId64"\n", @@ -1631,21 +1631,21 @@ static void printfQueryMeta() { printf("\n"); printf("specified table query info: \n"); - printf("query interval: \033[33m%"PRId64" ms\033[0m\n", + printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%"PRId64"\033[0m\n", + printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%"PRId64"\033[0m\n", + printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); - printf(" \033[33m%"PRId64"\033[0m\n", + printf(" \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.mode); - printf("interval: \033[33m%"PRId64"\033[0m\n", + printf("interval: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); @@ -1653,27 +1653,27 @@ static void printfQueryMeta() { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } - for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n", + for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); printf("super table query info:\n"); - printf("query interval: \033[33m%"PRId64"\033[0m\n", + printf("query interval: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.queryInterval); printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%"PRId64"\033[0m\n", + printf("childTblCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%"PRId64"\033[0m\n", + printf("stb query times:\033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.mode); - printf("interval: \033[33m%"PRId64"\033[0m\n", + printf("interval: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); @@ -1681,7 +1681,7 @@ static void printfQueryMeta() { g_queryInfo.superQueryInfo.subscribeKeepProgress); } - printf("sqlCount: \033[33m%"PRId64"\033[0m\n", + printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { printf(" sql[%d]: \033[33m%s\033[0m\n", @@ -2290,7 +2290,7 @@ static int calcRowLen(SSuperTable* superTbls) { static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) { + uint64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { char command[BUFFER_SIZE] = "\0"; char limitBuf[100] = "\0"; @@ -2301,7 +2301,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* childTblName = *childTblNameOfSuperTbl; if (offset >= 0) { - snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"", + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", limit, offset); } @@ -2367,11 +2367,11 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int64_t* childTblCountOfSuperTbl) { + uint64_t* childTblCountOfSuperTbl) { return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, - -1, -1); + -1, 0); } static int getSuperTableFromServer(TAOS * taos, char* dbName, @@ -2707,7 +2707,7 @@ static int createDatabasesAndStables() { printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); } - debugPrint("%s() LN%d supertbl count:%"PRId64"\n", + debugPrint("%s() LN%d supertbl count:%"PRIu64"\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); int validStbCount = 0; @@ -2766,15 +2766,15 @@ static void* createTable(void *sarg) int len = 0; int batchNum = 0; - verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n", + verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - for (int64_t i = pThreadInfo->start_table_from; + for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(buffer, buff_len, - "create table if not exists %s.%s%"PRId64" %s;", + "create table if not exists %s.%s%"PRIu64" %s;", pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); @@ -2805,7 +2805,7 @@ static void* createTable(void *sarg) } len += snprintf(buffer + len, buff_len - len, - "if not exists %s.%s%"PRId64" using %s.%s tags %s ", + "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", pThreadInfo->db_name, superTblInfo->childTblPrefix, i, pThreadInfo->db_name, superTblInfo->sTblName, tagsValBuf); @@ -2829,7 +2829,7 @@ static void* createTable(void *sarg) int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n", + printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", pThreadInfo->threadID, pThreadInfo->start_table_from, i); lastPrintTime = currentPrintTime; } @@ -2897,7 +2897,7 @@ static int startMultiThreadCreateChildTable( startFrom = t_info->end_table_to + 1; t_info->use_metric = true; t_info->cols = cols; - t_info->minDelay = INT64_MAX; + t_info->minDelay = UINT64_MAX; pthread_create(pids + i, NULL, createTable, t_info); } @@ -2963,7 +2963,7 @@ static void createChildTables() { snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", + verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRIu64" schema: %s\n", __func__, __LINE__, g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); startMultiThreadCreateChildTable( @@ -3091,7 +3091,7 @@ static int readSampleFromCsvFileToMem( } if (readLen > superTblInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n", + printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow); continue; } @@ -3343,6 +3343,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); if (gInsertInterval && gInsertInterval->type == cJSON_Number) { + if (gInsertInterval->valueint <0) { + errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_args.insert_interval = gInsertInterval->valueint; } else if (!gInsertInterval) { g_args.insert_interval = 0; @@ -3354,13 +3359,19 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { + if (interlaceRows->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + + } g_args.interlace_rows = interlaceRows->valueint; // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n", + printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n", g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3376,6 +3387,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); if (maxSqlLen && maxSqlLen->type == cJSON_Number) { + if (maxSqlLen->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_args.max_sql_len = maxSqlLen->valueint; } else if (!maxSqlLen) { g_args.max_sql_len = (1024*1024); @@ -3387,9 +3403,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); if (numRecPerReq && numRecPerReq->type == cJSON_Number) { + if (numRecPerReq->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = INT64_MAX; + g_args.num_of_RPR = UINT64_MAX; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); @@ -3549,7 +3570,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (minRows && minRows->type == cJSON_Number) { g_Dbs.db[i].dbCfg.minRows = minRows->valueint; } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = -1; + g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default } else { printf("ERROR: failed to read json, minRows not found\n"); goto PARSE_OVER; @@ -3559,7 +3580,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (maxRows && maxRows->type == cJSON_Number) { g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = -1; + g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default } else { printf("ERROR: failed to read json, maxRows not found\n"); goto PARSE_OVER; @@ -3704,7 +3725,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint("%s() LN%d, failed to read json, childtable_count not found\n", + errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n", __func__, __LINE__); goto PARSE_OVER; } @@ -3858,12 +3879,17 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { */ cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { + if (interlaceRows->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n", + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n", i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3906,6 +3932,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); if (insertRows && insertRows->type == cJSON_Number) { + if (insertRows->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; } else if (!insertRows) { g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; @@ -3918,8 +3949,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval"); if (insertInterval && insertInterval->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; + if (insertInterval->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n", + verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { @@ -4000,6 +4036,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); if (gQueryTimes && gQueryTimes->type == cJSON_Number) { + if (gQueryTimes->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_args.query_times = gQueryTimes->valueint; } else if (!gQueryTimes) { g_args.query_times = 1; @@ -4027,10 +4068,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } - // super_table_query + // specified_table_query cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); if (!specifiedQuery) { - g_queryInfo.specifiedQueryInfo.concurrent = 0; + g_queryInfo.specifiedQueryInfo.concurrent = 1; g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (specifiedQuery->type != cJSON_Object) { printf("ERROR: failed to read json, super_table_query not found\n"); @@ -4046,6 +4087,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { + if (specifiedQueryTimes->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + + } g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; } else if (!specifiedQueryTimes) { g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; @@ -4057,13 +4104,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); if (concurrent && concurrent->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; - if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n", - __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + if (concurrent->valueint <= 0) { + errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); goto PARSE_OVER; } + g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; } else if (!concurrent) { g_queryInfo.specifiedQueryInfo.concurrent = 1; } @@ -4167,7 +4215,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { // sub_table_query cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); if (!superQuery) { - g_queryInfo.superQueryInfo.threadCnt = 0; + g_queryInfo.superQueryInfo.threadCnt = 1; g_queryInfo.superQueryInfo.sqlCount = 0; } else if (superQuery->type != cJSON_Object) { printf("ERROR: failed to read json, sub_table_query not found\n"); @@ -4183,6 +4231,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); if (superQueryTimes && superQueryTimes->type == cJSON_Number) { + if (superQueryTimes->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; } else if (!superQueryTimes) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; @@ -4194,6 +4247,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); if (threads && threads->type == cJSON_Number) { + if (threads->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, threads input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + + } g_queryInfo.superQueryInfo.threadCnt = threads->valueint; } else if (!threads) { g_queryInfo.superQueryInfo.threadCnt = 1; @@ -4233,10 +4292,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } - cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); - if (subinterval && subinterval->type == cJSON_Number) { - g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint; - } else if (!subinterval) { + cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); + if (superInterval && superInterval->type == cJSON_Number) { + if (superInterval->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; + } else if (!superInterval) { //printf("failed to read json, subscribe interval no found\n"); //goto PARSE_OVER; g_queryInfo.superQueryInfo.subscribeInterval = 10000; @@ -4587,7 +4651,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); @@ -4638,7 +4702,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k) return affectedRows; } -static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq) +static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; if (superTblInfo) { @@ -4649,7 +4713,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableS (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { - verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRIu64" seq=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); @@ -4657,31 +4721,31 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableS superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"", + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", g_args.tb_prefix, tableSeq); } } static int64_t generateDataTail( SSuperTable* superTblInfo, - int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, + uint64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) { - int64_t len = 0; - int ncols_per_record = 1; // count first col ts + uint64_t len = 0; + uint32_t ncols_per_record = 1; // count first col ts char *pstr = buffer; if (superTblInfo == NULL) { - int datatypeSeq = 0; + uint32_t datatypeSeq = 0; while(g_args.datatype[datatypeSeq]) { datatypeSeq ++; ncols_per_record ++; } } - verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch); + verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch); - int64_t k = 0; + uint64_t k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); @@ -4756,7 +4820,7 @@ static int64_t generateDataTail( remainderBufLen -= retLen; } - verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n", + verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); startFrom ++; @@ -4838,12 +4902,12 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, } static int64_t generateInterlaceDataBuffer( - char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes, - int64_t tableSeq, + char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes, + uint64_t tableSeq, threadInfo *pThreadInfo, char *buffer, - int64_t insertRows, + uint64_t insertRows, int64_t startTime, - int64_t *pRemainderBufLen) + uint64_t *pRemainderBufLen) { assert(buffer); char *pstr = buffer; @@ -4856,7 +4920,7 @@ static int64_t generateInterlaceDataBuffer( return 0; } // generate data buffer - verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n", + verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; @@ -4864,7 +4928,7 @@ static int64_t generateInterlaceDataBuffer( int64_t dataLen = 0; - verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n", + verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); @@ -4886,7 +4950,7 @@ static int64_t generateInterlaceDataBuffer( pstr += dataLen; *pRemainderBufLen -= dataLen; } else { - debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n", + debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n", __func__, __LINE__, k, batchPerTbl); pstr -= headLen; pstr[0] = '\0'; @@ -4981,10 +5045,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // TODO: prompt tbl count multple interlace rows and batch // - int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n", + errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4996,18 +5060,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int insert_interval = + uint64_t insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - int64_t st = 0; - int64_t et = 0xffffffff; + uint64_t st = 0; + uint64_t et = UINT64_MAX; - int64_t lastPrintTime = taosGetTimestampMs(); - int64_t startTs = taosGetTimestampMs(); - int64_t endTs; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; - int64_t tableSeq = pThreadInfo->start_table_from; + uint64_t tableSeq = pThreadInfo->start_table_from; - debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n", + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRIu64" insertRows=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); @@ -5039,7 +5103,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } // generate data memset(buffer, 0, maxSqlLen); - int64_t remainderBufLen = maxSqlLen; + uint64_t remainderBufLen = maxSqlLen; char *pstr = buffer; @@ -5058,7 +5122,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } - int64_t oldRemainderLen = remainderBufLen; + uint64_t oldRemainderLen = remainderBufLen; int64_t generated = generateInterlaceDataBuffer( tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, @@ -5539,7 +5603,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, int startFrom; if (superTblInfo) { - int limit, offset; + int64_t limit; + uint64_t offset; if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) && ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { @@ -5592,7 +5657,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - int64_t childTblCount; + uint64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos, db_name, superTblInfo->sTblName, @@ -5631,7 +5696,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, t_info->superTblInfo = superTblInfo; t_info->start_time = start_time; - t_info->minDelay = INT64_MAX; + t_info->minDelay = UINT64_MAX; if ((NULL == superTblInfo) || (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { @@ -5676,7 +5741,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t totalDelay = 0; int64_t maxDelay = 0; - int64_t minDelay = INT64_MAX; + int64_t minDelay = UINT64_MAX; int64_t cntDelay = 1; double avgDelay = 0; @@ -5780,11 +5845,11 @@ static void *readTable(void *sarg) { printf("%d records:\n", totalData); fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - for (int j = 0; j < n; j++) { + for (uint64_t j = 0; j < n; j++) { double totalT = 0; - int count = 0; - for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %" PRId64, + uint64_t count = 0; + for (uint64_t i = 0; i < num_of_tables; i++) { + sprintf(command, "select %s from %s%"PRIu64" where ts>= %" PRIu64, aggreFunc[j], tb_prefix, i, sTime); double t = taosGetTimestampMs(); @@ -6020,8 +6085,8 @@ static void *specifiedTableQuery(void *sarg) { int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; int totalQueried = 0; - int64_t lastPrintTime = taosGetTimestampMs(); - int64_t startTs = taosGetTimestampMs(); + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); while(queryTimes --) { if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < @@ -6067,10 +6132,10 @@ static void *specifiedTableQuery(void *sarg) { et = taosGetTimestampMs(); - int64_t currentPrintTime = taosGetTimestampMs(); - int64_t endTs = taosGetTimestampMs(); + uint64_t currentPrintTime = taosGetTimestampMs(); + uint64_t endTs = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n", + debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n", __func__, __LINE__, endTs, startTs); printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n", pThreadInfo->threadID, @@ -6167,7 +6232,7 @@ static void *superTableQuery(void *sarg) { } } et = taosGetTimestampMs(); - printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n", + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), pThreadInfo->start_table_from, pThreadInfo->end_table_to, @@ -6433,7 +6498,7 @@ static void *superSubscribe(void *sarg) { } } //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); } while(0); // start loop to consume result @@ -6499,7 +6564,7 @@ static void *specifiedSubscribe(void *sarg) { do { //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) { // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); + // //printf("========sleep duration:%"PRIu64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} //st = taosGetTimestampMs(); @@ -6519,7 +6584,7 @@ static void *specifiedSubscribe(void *sarg) { } } //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); } while(0); // start loop to consume result @@ -6589,8 +6654,9 @@ static int subscribeTestProcess() { //==== create sub threads for query from super table if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) || (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) { - errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n", - __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); exit(-1); } From 142d89f3fce3bb82583b2734d7e72c8e0b64efb2 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 1 May 2021 12:25:14 +0000 Subject: [PATCH 035/140] modify drone CI on arm32 platform --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 4009b563b4..9930e81416 100644 --- a/.drone.yml +++ b/.drone.yml @@ -82,7 +82,7 @@ platform: steps: - name: build - image: gcc + image: gccarm32v7/ubuntu:bionic commands: - apt-get update - apt-get install -y cmake build-essential From 22f6ff282429da4e08501b58e55835e3a1b27d5c Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 1 May 2021 12:27:09 +0000 Subject: [PATCH 036/140] update --- .drone.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index 9930e81416..eb1601fd3f 100644 --- a/.drone.yml +++ b/.drone.yml @@ -8,7 +8,7 @@ platform: steps: - name: build - image: gcc + image: ubuntu:focal commands: - apt-get update - apt-get install -y cmake build-essential git @@ -82,7 +82,7 @@ platform: steps: - name: build - image: gccarm32v7/ubuntu:bionic + image: arm32v7/ubuntu:bionic commands: - apt-get update - apt-get install -y cmake build-essential From 8ca809d3766873e7e6604c4fab71d5304a22f8b8 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 1 May 2021 12:32:51 +0000 Subject: [PATCH 037/140] update --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index eb1601fd3f..9053953c42 100644 --- a/.drone.yml +++ b/.drone.yml @@ -11,7 +11,7 @@ steps: image: ubuntu:focal commands: - apt-get update - - apt-get install -y cmake build-essential git + - apt-get install -y cmake build-essential - mkdir debug - cd debug - cmake .. From d0aa7146d2daba873ae6770f626d573a753b2e28 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 1 May 2021 12:35:17 +0000 Subject: [PATCH 038/140] update --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 9053953c42..79a5b1cec8 100644 --- a/.drone.yml +++ b/.drone.yml @@ -8,7 +8,7 @@ platform: steps: - name: build - image: ubuntu:focal + image: ubuntu:groovy commands: - apt-get update - apt-get install -y cmake build-essential From 8e146460b0f9005f739f14a1120876ac2a4fc5f0 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 1 May 2021 13:00:18 +0000 Subject: [PATCH 039/140] update --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 79a5b1cec8..b71b99e488 100644 --- a/.drone.yml +++ b/.drone.yml @@ -8,7 +8,7 @@ platform: steps: - name: build - image: ubuntu:groovy + image: gcc commands: - apt-get update - apt-get install -y cmake build-essential From 4c6f4a140b3f0aa7c4a01491e9caaabd1ff41896 Mon Sep 17 00:00:00 2001 From: zyyang Date: Sat, 1 May 2021 15:49:18 +0800 Subject: [PATCH 040/140] change release version --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index 8035b31cc7..a560c7f598 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.20.0") + SET(TD_VER_NUMBER "2.0.20.2") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 31343ed293..9c7400c616 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.20.0' +version: '2.0.20.2' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.20.0 + - usr/lib/libtaos.so.2.0.20.2 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 1466505e4b1e9906288beee358add4258cbce384 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 5 May 2021 17:32:46 +0800 Subject: [PATCH 041/140] [TD-3197]: taosdemo and taosdump coverity scan issues. (#6004) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 16 +++++++--------- src/kit/taosdump/taosdump.c | 2 +- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index eb0a2d3822..37023913c4 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -725,7 +725,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrint("%s", "\n\t-c need a valid path following!\n"); exit(EXIT_FAILURE); } - tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); + tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); } else if (strcmp(argv[i], "-h") == 0) { if (argc == i+1) { @@ -967,9 +967,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } } else if (strcmp(argv[i], "-D") == 0) { arguments->method_of_delete = atoi(argv[++i]); - if (arguments->method_of_delete < 0 - || arguments->method_of_delete > 3) { - arguments->method_of_delete = 0; + if (arguments->method_of_delete > 3) { + errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n"); + exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "--version") == 0) || (strcmp(argv[i], "-V") == 0)){ @@ -1372,7 +1372,7 @@ static int printfInsertMeta() { printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblLimit); } - if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) { + if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblOffset); } @@ -4706,8 +4706,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t table { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; if (superTblInfo) { - if ((superTblInfo->childTblOffset >= 0) - && (superTblInfo->childTblLimit > 0)) { + if (superTblInfo->childTblLimit > 0) { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", superTblInfo->childTblName + (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); @@ -5611,8 +5610,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } - if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) - && (superTblInfo->childTblOffset >= 0)) { + if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { if ((superTblInfo->childTblLimit < 0) || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) > (superTblInfo->childTblCount))) { diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 0dca591b04..f7cb3e93d1 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -525,7 +525,7 @@ int main(int argc, char *argv[]) { /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - if (argc > 1) + if (argc > 2) parse_args(argc, argv, &g_args); argp_parse(&argp, argc, argv, 0, 0, &g_args); From fa902a35b36d4b7ad3711af45a4dc36f842dd5d0 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 11:44:39 +0800 Subject: [PATCH 042/140] [TD-4059]update case version.py --- tests/pytest/client/version.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py index 93b302f619..7cbeeb60df 100644 --- a/tests/pytest/client/version.py +++ b/tests/pytest/client/version.py @@ -28,20 +28,22 @@ class TDTestCase: sql = "select server_version()" ret = tdSql.query(sql) version = tdSql.getData(0, 0)[0:3] - expectedVersion = "2.0" - if(version == expectedVersion): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (sql, 0, 0, version, expectedVersion)) + expectedVersion_dev = "2.0" + expectedVersion_master = "2.1" + if(version == expectedVersion_dev or version == expectedVersion_master): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) else: - tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s" % (sql, 0, 0, version, expectedVersion)) + tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master)) sql = "select client_version()" ret = tdSql.query(sql) version = tdSql.getData(0, 0)[0:3] - expectedVersion = "2.0" - if(version == expectedVersion): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (sql, 0, 0, version, expectedVersion)) + expectedVersion_dev = "2.0" + expectedVersion_master = "2.1" + if(version == expectedVersion_dev or version == expectedVersion_master): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) else: - tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s" % (sql, 0, 0, version, expectedVersion)) + tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master)) def stop(self): From f1098e1cc397b20d648db1b0560712794cc34c25 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 6 May 2021 13:18:45 +0800 Subject: [PATCH 043/140] [TD-3747] --- src/client/inc/tsclient.h | 1 + src/client/src/tscServer.c | 5 +++++ src/client/src/tscUtil.c | 5 ++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c91943e232..6d94d270a7 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -83,6 +83,7 @@ typedef struct STableMeta { typedef struct STableMetaInfo { STableMeta *pTableMeta; // table meta, cached in client side and acquired by name + uint32_t tableMetaSize; SVgroupsInfo *vgroupList; SArray *pVgroupTables; // SArray diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 5a7bd92994..706055668d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2533,6 +2533,11 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { uint32_t size = tscGetTableMetaMaxSize(); if (pTableMetaInfo->pTableMeta == NULL) { pTableMetaInfo->pTableMeta = calloc(1, size); + } else if (pTableMetaInfo->tableMetaSize < size) { + char *tmp = realloc(pTableMetaInfo->pTableMeta, size); + if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY;} + pTableMetaInfo->pTableMeta = (STableMeta *)tmp; + pTableMetaInfo->tableMetaSize = size; } else { uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); memset(pTableMetaInfo->pTableMeta, 0, s); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a1416284c7..df48486e66 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -693,7 +693,8 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { tfree(pTableMetaInfo->pTableMeta); } - pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta); + pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta); + pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta); } /* @@ -2078,6 +2079,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } pTableMetaInfo->pTableMeta = pTableMeta; + pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); if (vgroupList != NULL) { pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); @@ -2352,6 +2354,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); + } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0); if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) { From 014ac9144320f9d253d00589523fc9bc04c9c3ef Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 6 May 2021 13:30:16 +0800 Subject: [PATCH 044/140] [TD-3747] --- src/client/src/tscUtil.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index df48486e66..37875790dd 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2079,7 +2079,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } pTableMetaInfo->pTableMeta = pTableMeta; - pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); + if (pTableMetaInfo->pTableMeta == NULL) { + pTableMetaInfo->tableMetaSize = 0; + } else { + pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); + } if (vgroupList != NULL) { pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); From 17ddb4587ddce910fb3ccf9bc3a806e50c301f99 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 6 May 2021 14:36:40 +0800 Subject: [PATCH 045/140] [TD-4056]: fix possible char string buffer length overflow --- src/plugins/http/inc/httpInt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 634468f3cc..0a5822b908 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -171,7 +171,7 @@ typedef struct HttpThread { EpollFd pollFd; int32_t numOfContexts; int32_t threadId; - char label[HTTP_LABEL_SIZE]; + char label[HTTP_LABEL_SIZE << 1]; bool (*processData)(HttpContext *pContext); } HttpThread; From 702cb8549918bdaf44ad31204d8d57ad7e0ad69e Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 14:55:18 +0800 Subject: [PATCH 046/140] smoke test --- .drone.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.drone.yml b/.drone.yml index b71b99e488..1619b031d7 100644 --- a/.drone.yml +++ b/.drone.yml @@ -16,6 +16,8 @@ steps: - cd debug - cmake .. - make + - cd ../tests + - ./test-all.sh smoke when: branch: - develop From 8258fc25334cd83d962a78a7728afcda82e366e2 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 14:55:36 +0800 Subject: [PATCH 047/140] smoke --- tests/test-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 47e5de6aa0..997894c509 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -66,7 +66,7 @@ function runSimCaseOneByOne { echo -e "${RED} failed${NC}" | tee -a out.log else echo -n $case - ./test.sh -f $case > /dev/null 2>&1 && \ + ./test.sh -f $case && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ echo -e "${RED} failed${NC}" | tee -a out.log From ee6d50e059adfa2f0b53afc64b492dae28c0c0c5 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 15:05:51 +0800 Subject: [PATCH 048/140] test --- .drone.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index 1619b031d7..b71b99e488 100644 --- a/.drone.yml +++ b/.drone.yml @@ -16,8 +16,6 @@ steps: - cd debug - cmake .. - make - - cd ../tests - - ./test-all.sh smoke when: branch: - develop From 642830a10e672d26f1081c9f1fee184ca2d52d9a Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 15:10:35 +0800 Subject: [PATCH 049/140] test --- .drone.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.drone.yml b/.drone.yml index b71b99e488..ae536ac2c7 100644 --- a/.drone.yml +++ b/.drone.yml @@ -24,6 +24,8 @@ steps: - name: smoke_test image: python:3.8 commands: + - apt-get update + - apt-get install -y cmake build-essential gcc - pip3 install psutil - pip3 install guppy3 - pip3 install src/connector/python/linux/python3/ From 5d5027f82087a51e71a24a7a8f88cec2009fdffa Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 15:16:16 +0800 Subject: [PATCH 050/140] test --- tests/script/general/cache/new_metrics.sim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/general/cache/new_metrics.sim b/tests/script/general/cache/new_metrics.sim index eb9b042483..b12938f25e 100644 --- a/tests/script/general/cache/new_metrics.sim +++ b/tests/script/general/cache/new_metrics.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 +sleep 20000 sql connect $i = 0 From 6bba155857aa8d902b6fd2ae72eb56674d649f89 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 15:23:25 +0800 Subject: [PATCH 051/140] test --- .drone.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index ae536ac2c7..5ecc78490c 100644 --- a/.drone.yml +++ b/.drone.yml @@ -24,11 +24,10 @@ steps: - name: smoke_test image: python:3.8 commands: - - apt-get update - - apt-get install -y cmake build-essential gcc - pip3 install psutil - pip3 install guppy3 - pip3 install src/connector/python/linux/python3/ + - mkdir sim/tsim - cd tests - ./test-all.sh smoke when: From cef351408a7d4347b08815ef06ac072a1fd4649e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 6 May 2021 15:39:54 +0800 Subject: [PATCH 052/140] [TD-4060]: taos shell startup message. (#6009) Co-authored-by: Shuduo Sang --- src/util/src/tnettest.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 131063b0de..318a2d4860 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -539,7 +539,7 @@ static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) { } void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) { -// tscEmbedded = 1; + tscEmbedded = 1; if (host == NULL) host = tsLocalFqdn; if (port == 0) port = tsServerPort; if (pkgLen <= 10) pkgLen = 1000; @@ -550,6 +550,7 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) { } else if (0 == strcmp("server", role)) { taosNetTestServer(host, port, pkgLen); } else if (0 == strcmp("rpc", role)) { + tscEmbedded = 0; taosNetTestRpc(host, port, pkgLen); } else if (0 == strcmp("sync", role)) { taosNetCheckSync(host, port); @@ -559,5 +560,5 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) { taosNetTestStartup(host, port); } -// tscEmbedded = 0; + tscEmbedded = 0; } From 2268535d28ea9655bde6f33b101b7496f5774272 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 15:58:36 +0800 Subject: [PATCH 053/140] test --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 5ecc78490c..22ef72f80d 100644 --- a/.drone.yml +++ b/.drone.yml @@ -27,7 +27,7 @@ steps: - pip3 install psutil - pip3 install guppy3 - pip3 install src/connector/python/linux/python3/ - - mkdir sim/tsim + - mkdir -p /drone/src/sim/tsim - cd tests - ./test-all.sh smoke when: From fc57fad2db36b7226c8cd33b6a3c9132eb09675d Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 16:08:26 +0800 Subject: [PATCH 054/140] test --- .drone.yml | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/.drone.yml b/.drone.yml index 22ef72f80d..f0e2974a2a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -11,24 +11,15 @@ steps: image: gcc commands: - apt-get update - - apt-get install -y cmake build-essential + - apt-get install -y cmake build-essential python3.8 python3-pip + - pip3 install psutil + - pip3 install guppy3 + - pip3 install src/connector/python/linux/python3/ - mkdir debug - cd debug - cmake .. - make - when: - branch: - - develop - - master - -- name: smoke_test - image: python:3.8 - commands: - - pip3 install psutil - - pip3 install guppy3 - - pip3 install src/connector/python/linux/python3/ - - mkdir -p /drone/src/sim/tsim - - cd tests + - cd ../tests - ./test-all.sh smoke when: branch: @@ -36,6 +27,7 @@ steps: - master + - name: crash_gen image: python:3.8 commands: From dd16e2d95c4360ea23ec03502222e6918f6ce599 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 16:33:39 +0800 Subject: [PATCH 055/140] test --- .drone.yml | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index f0e2974a2a..07d31cb118 100644 --- a/.drone.yml +++ b/.drone.yml @@ -11,7 +11,24 @@ steps: image: gcc commands: - apt-get update - - apt-get install -y cmake build-essential python3.8 python3-pip + - apt-get install -y cmake build-essential + - pip3 install psutil + - pip3 install guppy3 + - pip3 install src/connector/python/linux/python3/ + - mkdir debug + - cd debug + - cmake .. + - make + when: + branch: + - develop + - master + +- name: smoke_test + image: python:3.8 + commands: + - apt-get update + - apt-get install -y cmake build-essential gcc - pip3 install psutil - pip3 install guppy3 - pip3 install src/connector/python/linux/python3/ @@ -27,7 +44,6 @@ steps: - master - - name: crash_gen image: python:3.8 commands: From e2f8b44617213f0ef4d1ebde6d17262096317e16 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 16:36:09 +0800 Subject: [PATCH 056/140] test --- .drone.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index 07d31cb118..fac8efc0bf 100644 --- a/.drone.yml +++ b/.drone.yml @@ -12,9 +12,6 @@ steps: commands: - apt-get update - apt-get install -y cmake build-essential - - pip3 install psutil - - pip3 install guppy3 - - pip3 install src/connector/python/linux/python3/ - mkdir debug - cd debug - cmake .. From 302cb46953f4dcad9cb3d9446ce020a46adaeaff Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 16:40:51 +0800 Subject: [PATCH 057/140] test --- .drone.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.drone.yml b/.drone.yml index fac8efc0bf..926883b0ba 100644 --- a/.drone.yml +++ b/.drone.yml @@ -7,19 +7,6 @@ platform: arch: amd64 steps: -- name: build - image: gcc - commands: - - apt-get update - - apt-get install -y cmake build-essential - - mkdir debug - - cd debug - - cmake .. - - make - when: - branch: - - develop - - master - name: smoke_test image: python:3.8 From 0056a7f39ed3ffd8876089ccf798ef143db57638 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 16:44:47 +0800 Subject: [PATCH 058/140] test --- .drone.yml | 19 ++++++++++++++----- tests/test-all.sh | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/.drone.yml b/.drone.yml index 926883b0ba..5d997560d4 100644 --- a/.drone.yml +++ b/.drone.yml @@ -7,6 +7,19 @@ platform: arch: amd64 steps: +- name: build + image: gcc + commands: + - apt-get update + - apt-get install -y cmake build-essential + - mkdir debug + - cd debug + - cmake .. + - make + when: + branch: + - develop + - master - name: smoke_test image: python:3.8 @@ -16,11 +29,7 @@ steps: - pip3 install psutil - pip3 install guppy3 - pip3 install src/connector/python/linux/python3/ - - mkdir debug - - cd debug - - cmake .. - - make - - cd ../tests + - cd tests - ./test-all.sh smoke when: branch: diff --git a/tests/test-all.sh b/tests/test-all.sh index 997894c509..47e5de6aa0 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -66,7 +66,7 @@ function runSimCaseOneByOne { echo -e "${RED} failed${NC}" | tee -a out.log else echo -n $case - ./test.sh -f $case && \ + ./test.sh -f $case > /dev/null 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ echo -e "${RED} failed${NC}" | tee -a out.log From 828628ab2ec504af21b31d00a7ba6c525c997682 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 16:49:11 +0800 Subject: [PATCH 059/140] test --- .drone.yml | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/.drone.yml b/.drone.yml index 5d997560d4..e7ae6ebbda 100644 --- a/.drone.yml +++ b/.drone.yml @@ -7,20 +7,6 @@ platform: arch: amd64 steps: -- name: build - image: gcc - commands: - - apt-get update - - apt-get install -y cmake build-essential - - mkdir debug - - cd debug - - cmake .. - - make - when: - branch: - - develop - - master - - name: smoke_test image: python:3.8 commands: @@ -29,7 +15,11 @@ steps: - pip3 install psutil - pip3 install guppy3 - pip3 install src/connector/python/linux/python3/ - - cd tests + - mkdir debug + - cd debug + - cmake .. + - make + - cd ../tests - ./test-all.sh smoke when: branch: From 8dc64ed8e7f56aeb499d4be37765c8cb76f4d46b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 6 May 2021 16:55:51 +0800 Subject: [PATCH 060/140] delete some code --- src/client/src/tscParseInsert.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index a53dcc05fa..17f9f27abc 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1349,11 +1349,8 @@ int tsParseSql(SSqlObj *pSql, bool initial) { ret = tsParseInsertSql(pSql); if (/*(sqlstr == NULL) || */(pSql->parseRetry >= 1) || (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) { -// free(sqlstr); } else { tscResetSqlCmd(pCmd, true); -// free(pSql->sqlstr); -// pSql->sqlstr = sqlstr; pSql->parseRetry++; if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) { ret = tsParseInsertSql(pSql); From d8b9ed31da00d38eebfbb059f7d001c2f448eaed Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 6 May 2021 17:46:09 +0800 Subject: [PATCH 061/140] recovery new_metrics.sim --- tests/script/general/cache/new_metrics.sim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/general/cache/new_metrics.sim b/tests/script/general/cache/new_metrics.sim index b12938f25e..eb9b042483 100644 --- a/tests/script/general/cache/new_metrics.sim +++ b/tests/script/general/cache/new_metrics.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 20000 +sleep 2000 sql connect $i = 0 From f8500b04ed87981bc0bd6a64483dc9c73c54a917 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 6 May 2021 18:39:39 +0800 Subject: [PATCH 062/140] Hotfix/sangshuduo/td 3976 taosdemo print insert perf per batch (#6013) * [TD-3976]: taosdemo print each insert performance data. * [TD-3976]: taosdemo print insert performance per thread. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 195 ++++++++++++++++++++---------------- 1 file changed, 108 insertions(+), 87 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 37023913c4..6facdf7eee 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4672,7 +4672,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { return 0; } -static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k) +static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k) { int affectedRows; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4744,7 +4744,7 @@ static int64_t generateDataTail( verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch); - uint64_t k = 0; + int64_t k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); @@ -4959,7 +4959,7 @@ static int64_t generateInterlaceDataBuffer( return k; } -static int generateProgressiveDataBuffer( +static int64_t generateProgressiveDataBuffer( char *tableName, int64_t tableSeq, threadInfo *pThreadInfo, char *buffer, @@ -5004,12 +5004,21 @@ static int generateProgressiveDataBuffer( return k; } +static void printStatPerThread(threadInfo *pThreadInfo) +{ + fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows, + (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0))); +} + static void* syncWriteInterlace(threadInfo *pThreadInfo) { debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, __func__, __LINE__); - int64_t insertRows; - int64_t interlaceRows; + uint64_t insertRows; + uint64_t interlaceRows; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -5078,9 +5087,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { assert(pThreadInfo->ntables > 0); - int64_t batchPerTbl = interlaceRows; + uint64_t batchPerTbl = interlaceRows; + uint64_t batchPerTblTimes; - int64_t batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = g_args.num_of_RPR / interlaceRows; @@ -5088,9 +5097,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { batchPerTblTimes = 1; } - int64_t generatedRecPerTbl = 0; + uint64_t generatedRecPerTbl = 0; bool flagSleep = true; - int64_t sleepTimeTotal = 0; + uint64_t sleepTimeTotal = 0; char *strInsertInto = "insert into "; int nInsertBufLen = strlen(strInsertInto); @@ -5110,9 +5119,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += len; remainderBufLen -= len; - int64_t recOfBatch = 0; + uint64_t recOfBatch = 0; - for (int64_t i = 0; i < batchPerTblTimes; i ++) { + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { errorPrint("[%d] %s() LN%d, getTableName return null\n", @@ -5130,10 +5139,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTime, &remainderBufLen); - if (generated < 0) { - debugPrint("[%d] %s() LN%d, generated data is %"PRId64"\n", + debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_and_statistics_interlace; + if (generated < 0) { + errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace; } else if (generated == 0) { break; } @@ -5177,7 +5188,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { break; } - verbosePrint("[%d] %s() LN%d recOfBatch=%"PRId64" totalInsertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, pThreadInfo->totalInsertRows); verbosePrint("[%d] %s() LN%d, buffer=%s\n", @@ -5188,30 +5199,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); endTs = taosGetTimestampMs(); - int64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n", __func__, __LINE__, delay); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; pThreadInfo->cntDelay++; pThreadInfo->totalDelay += delay; - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - if ((affectedRows < 0) || (recOfBatch != affectedRows)) { - errorPrint("[%d] %s() LN%d execInsert insert %"PRId64", affected rows: %"PRId64"\n%s\n", + if (recOfBatch != affectedRows) { + errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, affectedRows, buffer); - goto free_and_statistics_interlace; + goto free_of_interlace; } pThreadInfo->totalAffectedRows += affectedRows; int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); @@ -5231,13 +5242,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } } -free_and_statistics_interlace: +free_of_interlace: tmfree(buffer); - - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); + printStatPerThread(pThreadInfo); return NULL; } @@ -5253,19 +5260,19 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "Failed to alloc %d Bytes, reason:%s\n", + errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", maxSqlLen, strerror(errno)); return NULL; } - int64_t lastPrintTime = taosGetTimestampMs(); - int64_t startTs = taosGetTimestampMs(); - int64_t endTs; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; int64_t timeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; @@ -5280,15 +5287,15 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->samplePos = 0; - for (int64_t tableSeq = + for (uint64_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { int64_t start_time = pThreadInfo->start_time; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + uint64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); - for (int64_t i = 0; i < insertRows;) { + for (uint64_t i = 0; i < insertRows;) { /* if (insert_interval) { st = taosGetTimestampMs(); @@ -5310,7 +5317,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pstr += len; remainderBufLen -= len; - int generated = generateProgressiveDataBuffer( + int64_t generated = generateProgressiveDataBuffer( tableName, tableSeq, pThreadInfo, pstr, insertRows, i, start_time, &(pThreadInfo->samplePos), @@ -5318,7 +5325,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (generated > 0) i += generated; else - goto free_and_statistics_2; + goto free_of_progressive; start_time += generated * timeStampStep; pThreadInfo->totalInsertRows += generated; @@ -5328,17 +5335,23 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int64_t affectedRows = execInsert(pThreadInfo, buffer, generated); endTs = taosGetTimestampMs(); - int64_t delay = endTs - startTs; + uint64_t delay = endTs - startTs; performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", __func__, __LINE__, delay); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; pThreadInfo->cntDelay++; pThreadInfo->totalDelay += delay; - if (affectedRows < 0) - goto free_and_statistics_2; + if (affectedRows < 0) { + errorPrint("%s() LN%d, affected rows: %"PRId64"\n", + __func__, __LINE__, affectedRows); + goto free_of_progressive; + } pThreadInfo->totalAffectedRows += affectedRows; @@ -5377,13 +5390,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } } // tableSeq -free_and_statistics_2: +free_of_progressive: tmfree(buffer); - - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); + printStatPerThread(pThreadInfo); return NULL; } @@ -5412,6 +5421,7 @@ static void* syncWrite(void *sarg) { // progressive mode return syncWriteProgressive(pThreadInfo); } + } static void callBack(void *param, TAOS_RES *res, int code) { @@ -5737,10 +5747,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, pthread_join(pids[i], NULL); } - int64_t totalDelay = 0; - int64_t maxDelay = 0; - int64_t minDelay = UINT64_MAX; - int64_t cntDelay = 1; + uint64_t totalDelay = 0; + uint64_t maxDelay = 0; + uint64_t minDelay = UINT64_MAX; + uint64_t cntDelay = 1; double avgDelay = 0; for (int i = 0; i < threads; i++) { @@ -5749,7 +5759,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, tsem_destroy(&(t_info->lock_sem)); taos_close(t_info->taos); - debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n", + debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", __func__, __LINE__, t_info->threadID, t_info->totalInsertRows, t_info->totalAffectedRows); @@ -5775,35 +5785,42 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t t = end - start; if (superTblInfo) { - printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", t / 1000.0, superTblInfo->totalInsertRows, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName, (double)superTblInfo->totalInsertRows / (t / 1000.0)); - fprintf(g_fpOfInsertResult, - "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", t / 1000.0, superTblInfo->totalInsertRows, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName, (double)superTblInfo->totalInsertRows / (t / 1000.0)); + } } else { - printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", + fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", t / 1000.0, g_args.totalInsertRows, g_args.totalAffectedRows, threads, db_name, (double)g_args.totalInsertRows / (t / 1000.0)); - fprintf(g_fpOfInsertResult, - "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", t * 1000.0, g_args.totalInsertRows, g_args.totalAffectedRows, threads, db_name, (double)g_args.totalInsertRows / (t / 1000.0)); + } } - printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n", + fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n", avgDelay, maxDelay, minDelay); - fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n", + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n", avgDelay, maxDelay, minDelay); + } //taos_close(taos); @@ -5973,7 +5990,8 @@ static int insertTestProcess() { return -1; } - printfInsertMetaToFile(g_fpOfInsertResult); + if (g_fpOfInsertResult) + printfInsertMetaToFile(g_fpOfInsertResult); if (!g_args.answer_yes) { printf("Press enter key to continue\n\n"); @@ -5984,7 +6002,8 @@ static int insertTestProcess() { // create database and super tables if(createDatabasesAndStables() != 0) { - fclose(g_fpOfInsertResult); + if (g_fpOfInsertResult) + fclose(g_fpOfInsertResult); return -1; } @@ -6000,11 +6019,13 @@ static int insertTestProcess() { end = taosGetTimestampMs(); if (g_totalChildTables > 0) { - printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", + fprintf(stderr, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); - fprintf(g_fpOfInsertResult, + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + } } taosMsleep(1000); @@ -6077,14 +6098,14 @@ static void *specifiedTableQuery(void *sarg) { return NULL; } - int64_t st = 0; - int64_t et = 0; + uint64_t st = 0; + uint64_t et = 0; - int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; + uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; - int totalQueried = 0; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); + uint64_t totalQueried = 0; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); while(queryTimes --) { if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < @@ -6135,7 +6156,7 @@ static void *specifiedTableQuery(void *sarg) { if (currentPrintTime - lastPrintTime > 30*1000) { debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n", __func__, __LINE__, endTs, startTs); - printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n", + printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", pThreadInfo->threadID, totalQueried, (double)(totalQueried/((endTs-startTs)/1000.0))); @@ -6187,14 +6208,14 @@ static void *superTableQuery(void *sarg) { } } - int64_t st = 0; - int64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; + uint64_t st = 0; + uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; - int queryTimes = g_queryInfo.superQueryInfo.queryTimes; - int totalQueried = 0; - int64_t startTs = taosGetTimestampMs(); + uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; + uint64_t totalQueried = 0; + uint64_t startTs = taosGetTimestampMs(); - int64_t lastPrintTime = taosGetTimestampMs(); + uint64_t lastPrintTime = taosGetTimestampMs(); while(queryTimes --) { if (g_queryInfo.superQueryInfo.queryInterval && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { @@ -6221,7 +6242,7 @@ static void *superTableQuery(void *sarg) { int64_t currentPrintTime = taosGetTimestampMs(); int64_t endTs = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently completed queries: %d, QPS: %10.3f\n", + printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n", pThreadInfo->threadID, totalQueried, (double)(totalQueried/((endTs-startTs)/1000.0))); @@ -6285,7 +6306,7 @@ static int queryTestProcess() { int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - int64_t startTs = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); if ((nSqlCount > 0) && (nConcurrent > 0)) { @@ -6345,16 +6366,16 @@ static int queryTestProcess() { ERROR_EXIT("memory allocation failed for create threads\n"); } - int ntables = g_queryInfo.superQueryInfo.childTblCount; + uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount; int threads = g_queryInfo.superQueryInfo.threadCnt; - int a = ntables / threads; + uint64_t a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - int b = 0; + uint64_t b = 0; if (threads != 0) { b = ntables % threads; } @@ -6396,12 +6417,12 @@ static int queryTestProcess() { tmfree((char*)infosOfSub); // taos_close(taos);// TODO: workaround to use separate taos connection; - int64_t endTs = taosGetTimestampMs(); + uint64_t endTs = taosGetTimestampMs(); - int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + + uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + g_queryInfo.superQueryInfo.totalQueried; - printf("==== completed total queries: %d, the QPS of all threads: %10.3f====\n", + fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n", totalQueried, (double)(totalQueried/((endTs-startTs)/1000.0))); return 0; From 9cd45522ccbff69626613114bd91872565386b49 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 6 May 2021 20:34:11 +0800 Subject: [PATCH 063/140] delete some code --- src/client/src/tscServer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 706055668d..9e7d778ddd 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2535,7 +2535,9 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { pTableMetaInfo->pTableMeta = calloc(1, size); } else if (pTableMetaInfo->tableMetaSize < size) { char *tmp = realloc(pTableMetaInfo->pTableMeta, size); - if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY;} + if (tmp == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } pTableMetaInfo->pTableMeta = (STableMeta *)tmp; pTableMetaInfo->tableMetaSize = size; } else { From cf3b1b12fd6a39e5ea567264af62c84c9fd51a86 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 6 May 2021 21:03:54 +0800 Subject: [PATCH 064/140] reset code --- src/client/src/tscParseInsert.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 17f9f27abc..f17afa0f78 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1345,12 +1345,15 @@ int tsParseSql(SSqlObj *pSql, bool initial) { } // make a backup as tsParseInsertSql may modify the string -// char* sqlstr = strdup(pSql->sqlstr); + char* sqlstr = strdup(pSql->sqlstr); ret = tsParseInsertSql(pSql); - if (/*(sqlstr == NULL) || */(pSql->parseRetry >= 1) || + if ((sqlstr == NULL) || (pSql->parseRetry >= 1) || (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) { + free(sqlstr); } else { tscResetSqlCmd(pCmd, true); + free(pSql->sqlstr); + pSql->sqlstr = sqlstr; pSql->parseRetry++; if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) { ret = tsParseInsertSql(pSql); From 980df53800d9b2408d8447e9578ddc201c2267ff Mon Sep 17 00:00:00 2001 From: tomchon Date: Thu, 6 May 2021 21:15:20 +0800 Subject: [PATCH 065/140] [TD-3998]: add taodemo-testcase that inserting too many rows won't make ts disorder --- .../taosdemoAllTest/insert-1s1tnt1r.json | 8 +- .../taosdemoAllTest/insert-1s1tntmr.json | 8 +- .../taosdemoAllTest/insert-disorder.json | 2 +- ...lumns-count-0.json => insert-illegal.json} | 16 +- .../insert-interval-speed.json | 6 +- .../tools/taosdemoAllTest/insert-newdb.json | 2 +- ...sertBinaryLenLarge16374AllcolLar16384.json | 140 ++++++ .../insertColumnsAndTagNum1024.json | 62 +++ .../insertColumnsAndTagNumLarge1024.json | 62 +++ .../taosdemoAllTest/insertColumnsNum0.json | 62 +++ ...mns.json => insertNumOfrecordPerReq0.json} | 16 +- ...json => insertNumOfrecordPerReqless0.json} | 18 +- .../insertSigcolumnsNum1024.json | 62 +++ ...unt129.json => insertTagsNumLarge128.json} | 26 -- .../insertTimestepMulRowsLargeint16.json | 65 +++ .../taosdemoAllTest/query-interrupt.json | 62 +++ .../tools/taosdemoAllTest/query-interrupt.py | 88 ++++ .../tools/taosdemoAllTest/queryQps.json | 37 ++ .../taosdemoAllTest/querySpeciMutisql100.json | 429 ++++++++++++++++++ .../taosdemoAllTest/querySuperMutisql100.json | 419 +++++++++++++++++ .../tools/taosdemoAllTest/queryall.json | 14 + .../taosdemoAllTest/speciQueryInsertdata.json | 2 +- .../taosdemoAllTest/speciQueryRestful.json | 38 ++ .../{speciQuery.json => speciQueryTaosc.json} | 1 + tests/pytest/tools/taosdemoAllTest/sub.json | 37 ++ .../tools/taosdemoAllTest/subInsertdata.json | 61 +++ .../taosdemoTestInsertWithJson.py | 53 ++- .../taosdemoTestQueryWithJson.py | 52 ++- .../taosdemoTestSubWithJson.py | 99 ++++ 29 files changed, 1869 insertions(+), 78 deletions(-) rename tests/pytest/tools/taosdemoAllTest/{insert-illegal-columns-count-0.json => insert-illegal.json} (89%) create mode 100644 tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json rename tests/pytest/tools/taosdemoAllTest/{insert-illegal-columns.json => insertNumOfrecordPerReq0.json} (80%) rename tests/pytest/tools/taosdemoAllTest/{insert-illegal-columns-lmax.json => insertNumOfrecordPerReqless0.json} (79%) create mode 100644 tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json rename tests/pytest/tools/taosdemoAllTest/{insert-illegal-tags-count129.json => insertTagsNumLarge128.json} (65%) create mode 100644 tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json create mode 100644 tests/pytest/tools/taosdemoAllTest/query-interrupt.json create mode 100644 tests/pytest/tools/taosdemoAllTest/query-interrupt.py create mode 100644 tests/pytest/tools/taosdemoAllTest/queryQps.json create mode 100644 tests/pytest/tools/taosdemoAllTest/querySpeciMutisql100.json create mode 100644 tests/pytest/tools/taosdemoAllTest/querySuperMutisql100.json create mode 100644 tests/pytest/tools/taosdemoAllTest/queryall.json create mode 100644 tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json rename tests/pytest/tools/taosdemoAllTest/{speciQuery.json => speciQueryTaosc.json} (96%) create mode 100644 tests/pytest/tools/taosdemoAllTest/sub.json create mode 100644 tests/pytest/tools/taosdemoAllTest/subInsertdata.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json index 8e40ad812d..c67582fb56 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -55,8 +55,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }, { "name": "stb1", @@ -81,8 +81,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":4}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json index e741fd5c05..e3db5476b8 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json @@ -55,8 +55,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }, { "name": "stb1", @@ -81,8 +81,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json index fddaa4b4b9..f2dca662fd 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "": 4, + "thread_count_create_tbl": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json similarity index 89% rename from tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json rename to tests/pytest/tools/taosdemoAllTest/insert-illegal.json index f6a103f001..614402236a 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json @@ -11,8 +11,8 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 10, - "num_of_records_per_req": 100, - "max_sql_len": 10240000000, + "num_of_records_per_req": "-asdf", + "max_sql_len": 1024000, "databases": [{ "dbinfo": { "name": "db", @@ -41,12 +41,12 @@ "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1000, + "insert_rows": 10000, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "interlace_rows": 0, - "insert_interval":0, + "insert_interval":-4, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -55,8 +55,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }, { "name": "stb1", @@ -64,10 +64,10 @@ "childtable_count": 20, "childtable_prefix": "stb01_", "auto_create_table": "no", - "batch_create_tbl_num": 12, + "batch_create_tbl_num": "asdf", "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 2000, + "insert_rows": 20000, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json index c7c5150a06..38975a75a7 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json @@ -55,7 +55,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }, { @@ -81,8 +81,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":9}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json index 72e380a66c..1a19ea00ac 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json @@ -55,7 +55,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }, { diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json new file mode 100644 index 0000000000..55be019891 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json @@ -0,0 +1,140 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10240000000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16374, "count":1}], + "tags": [{"type": "TINYINT", "count":12}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16375, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json new file mode 100644 index 0000000000..42f6ef2f2f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json new file mode 100644 index 0000000000..42461b2f6f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10000, + "num_of_records_per_req": 10000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json new file mode 100644 index 0000000000..fd75f3b43f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json similarity index 80% rename from tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json rename to tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json index 53735dc413..813eb9af04 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json @@ -11,7 +11,7 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 10, - "num_of_records_per_req": 100, + "num_of_records_per_req": 0, "max_sql_len": 10240000000, "databases": [{ "dbinfo": { @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1000, + "insert_rows": 1, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -55,19 +55,19 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] }, { "name": "stb1", "child_table_exists":"no", - "childtable_count": 20, + "childtable_count": 2, "childtable_prefix": "stb01_", "auto_create_table": "no", "batch_create_tbl_num": 12, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 2000, + "insert_rows": 2, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -81,7 +81,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "columns": [{"type": "BINARY", "len": 1, "count":1}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json similarity index 79% rename from tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json rename to tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json index 17050278c8..554115f397 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json @@ -11,7 +11,7 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 10, - "num_of_records_per_req": 100, + "num_of_records_per_req": -1, "max_sql_len": 10240000000, "databases": [{ "dbinfo": { @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1000, + "insert_rows": 1, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -55,19 +55,19 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1024}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] }, { "name": "stb1", "child_table_exists":"no", - "childtable_count": 20, + "childtable_count": 2, "childtable_prefix": "stb01_", "auto_create_table": "no", "batch_create_tbl_num": 12, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 2000, + "insert_rows": 2, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -81,8 +81,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json new file mode 100644 index 0000000000..7c12a62764 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE", "count":1024}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json similarity index 65% rename from tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json rename to tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json index 115c42b502..5cf8114472 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json +++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json @@ -57,32 +57,6 @@ "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}] - }, - { - "name": "stb1", - "child_table_exists":"no", - "childtable_count": 20, - "childtable_prefix": "stb01_", - "auto_create_table": "no", - "batch_create_tbl_num": 12, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 2000, - "childtable_limit": 0, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json b/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json new file mode 100644 index 0000000000..b563dcc94b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json @@ -0,0 +1,65 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "databases": [ + { + "dbinfo": { + "name": "blf", + "drop": "yes" + }, + "super_tables": [ + { + "name": "p_0_topics", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "p_0_topics_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 525600, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 60000, + "start_timestamp": "2019-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + { + "type": "INT", + "count": 1 + }, + { + "type": "FLOAT", + "count": 1 + }, + { + "type": "BINARY", + "len": 12, + "count": 1 + } + ], + "tags": [ + { + "type": "BINARY", + "len": 12, + "count": 10 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json new file mode 100644 index 0000000000..643cbf09c8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 150000, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/query-interrupt.py new file mode 100644 index 0000000000..270bfd8b60 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/query-interrupt.py @@ -0,0 +1,88 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import subprocess +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # # insert 1000w rows in stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/query-interrupt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0,60) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 6000000) + os.system('%staosdemo -f tools/taosdemoAllTest/queryall.json -y & ' % binPath) + time.sleep(2) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/queryall.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + taosd_cpu_load_1 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) + if taosd_cpu_load_1 > 10.0 : + os.system("kill -9 %d" % query_pid) + time.sleep(5) + taosd_cpu_load_2 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) + if taosd_cpu_load_2 < 10.0 : + suc_kill = 60 + else: + suc_kill = 10 + print("taosd_cpu_load is higher than 10%") + else: + suc_kill = 20 + print("taosd_cpu_load is still less than 10%") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, "%d" % suc_kill) + os.system("rm -rf querySystemInfo*") + os.system("rm -rf insert_res.txt") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/queryQps.json b/tests/pytest/tools/taosdemoAllTest/queryQps.json new file mode 100644 index 0000000000..67a1cf3eb3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryQps.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 1, + "specified_table_query": { + "query_interval": 0, + "concurrent": 1, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": 1, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/querySpeciMutisql100.json b/tests/pytest/tools/taosdemoAllTest/querySpeciMutisql100.json new file mode 100644 index 0000000000..4aa1c0b4dd --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/querySpeciMutisql100.json @@ -0,0 +1,429 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "specified_table_query": { + "query_interval": 1, + "concurrent": 3, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_1", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_2", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_3", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_4", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_5", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_6", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_7", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_8", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_9", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_10 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_11 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_12 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_13 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_14 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_15 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_16 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_17 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_18 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_19 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_20 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_21 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_22 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_23 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_24 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_25 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_26 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_27 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_28 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_29 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_30 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_31 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_32 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_33 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_34 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_35 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_36 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_37 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_38 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_39 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_40 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_41 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_42 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_43 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_44 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_45 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_46 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_47 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_48 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_49 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_50 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_51 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_52 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_53 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_54 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_55 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_56 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_57 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_58 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_59 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_60", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_61", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_62", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_63", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_64", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_65", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_66", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_67", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_68", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_69", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_70 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_71 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_72 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_73 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_74 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_75 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_76 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_77 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_78 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_79 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_80 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_81 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_82 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_83 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_84 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_85 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_86 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_87 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_88 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_89 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_90 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_91 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_92 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_93 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_94 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_95 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_96 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_97 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_98 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res0.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/querySuperMutisql100.json b/tests/pytest/tools/taosdemoAllTest/querySuperMutisql100.json new file mode 100644 index 0000000000..c85713c94c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/querySuperMutisql100.json @@ -0,0 +1,419 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 3, + "super_table_query": { + "stblname": "stb0", + "query_interval": 10000, + "concurrent": 9, + "sqls": [ + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from xxxx ", + "result": "./query_res0.txt" + }, + { + "sql": "select * from xxxx ", + "result": "./query_res0.txt" + + }] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/queryall.json b/tests/pytest/tools/taosdemoAllTest/queryall.json new file mode 100644 index 0000000000..bbc3b9717c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryall.json @@ -0,0 +1,14 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "specified_table_query": + {"query_interval":1, "concurrent":1, + "sqls": [{"sql": "select * from stb0", "result": ""}] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json index ec9cb5a40d..79471be204 100644 --- a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json @@ -69,7 +69,7 @@ "insert_rows": 200, "childtable_limit": 0, "childtable_offset": 0, - "interlace_rows": 0, + "interlace_rows": 0 , "insert_interval": 0, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json b/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json new file mode 100644 index 0000000000..98e9b7a4e8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json @@ -0,0 +1,38 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "restful", + "specified_table_query": { + "query_interval": 1, + "concurrent": 3, + "sqls": [ + { + "sql": "select last_row(*) from db.stb0 ", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from db.stb00_1", + "result": "./query_res1.txt" + } + ] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/speciQuery.json b/tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json similarity index 96% rename from tests/pytest/tools/taosdemoAllTest/speciQuery.json rename to tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json index 5e99e80108..fece4e71c5 100644 --- a/tests/pytest/tools/taosdemoAllTest/speciQuery.json +++ b/tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json @@ -8,6 +8,7 @@ "confirm_parameter_prompt": "no", "databases": "db", "query_times": 2, + "query_mode": "taosc", "specified_table_query": { "query_interval": 1, "concurrent": 3, diff --git a/tests/pytest/tools/taosdemoAllTest/sub.json b/tests/pytest/tools/taosdemoAllTest/sub.json new file mode 100644 index 0000000000..fe3c892a76 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sub.json @@ -0,0 +1,37 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":1, + "mode":"sync", + "interval":0, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb00_0 ;", + "result": "./subscribe_res0.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":1, + "mode":"sync", + "interval":10000, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "result": "./subscribe_res1.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json new file mode 100644 index 0000000000..7d14d0ad4b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json @@ -0,0 +1,61 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 0, + "timestamp_step": 1000, + "start_timestamp": "2021-02-25 10:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len":50, "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index 2dd50bf639..d4525e9764 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -153,19 +153,54 @@ class TDTestCase: tdSql.checkData(0, 0, 160) - # insert: let parament in json file is illegal ,i need know how to write exception. + # insert: let parament in json file is illegal, it'll expect error. tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns.json -y " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json -y " % binPath) tdSql.error("use db") - os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-lmax.json -y " % binPath) - tdSql.error("select * from db.stb0") - os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-count-0.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count(*) from db.stb0") - tdSql.checkData(0, 0, 10000) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-tags-count129.json -y " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum1024.json -y " % binPath) + tdSql.error("select * from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum1024.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables like 'stb0%' ") + tdSql.checkData(0, 2, 11) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) tdSql.error("use db1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(1) + tdSql.query("select count(*) from db.stb1") + tdSql.checkRows(1) + tdSql.error("select * from db.stb3") + tdSql.error("select * from db.stb2") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(0) + tdSql.query("select * from db.stb1") + tdSql.checkRows(0) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) + tdSql.error("select count(*) from db.stb0") + tdSql.error("select * from db.stb1") + tdSql.execute("drop database if exists blf") + os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) + tdSql.execute("use blf") + tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") + tdSql.query("select first(ts) from blf.p_0_topics_2") + tdSql.checkData(0, 0, "2019-10-01 00:00:00") + tdSql.query("select last(ts) from blf.p_0_topics_6 ") + tdSql.checkData(0, 0, "2020-09-29 23:59:00") + + # insert: timestamp and step os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py index 00b387e398..57848b8d72 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -49,10 +49,9 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: drop and child_table_exists combination test - # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + # query: query specified table and query super table os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath) - os.system("%staosdemo -f tools/taosdemoAllTest/speciQuery.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryTaosc.json" % binPath) os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") @@ -75,9 +74,54 @@ class TDTestCase: timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") tdSql.query("select last_row(ts) from stb1") tdSql.checkData(0, 0, "%s" % timest) + + # # delete useless files + # os.system("rm -rf ./insert_res.txt") + # os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + # os.system("rm -rf ./querySystemInfo*") + # os.system("rm -rf ./query_res*") + # os.system("rm -rf ./all_query*") + # os.system("rm -rf ./test_query_res0.txt") + + # # use restful api to query + # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryRestful.json" % binPath) + # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") + # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") + # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") + # tdSql.execute("use db") + # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') + # os.system("python3 tools/taosdemoAllTest/convertResFile.py") + # tdSql.execute("insert into result0 file './test_query_res0.txt'") + # tdSql.query("select ts from result0") + # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") + # tdSql.query("select count(*) from result0") + # tdSql.checkData(0, 0, 1) + # with open('./all_query_res1.txt','r+') as f1: + # result1 = int(f1.readline()) + # tdSql.query("select count(*) from stb00_1") + # tdSql.checkData(0, 0, "%d" % result1) + + # with open('./all_query_res2.txt','r+') as f2: + # result2 = int(f2.readline()) + # d2 = datetime.fromtimestamp(result2/1000) + # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") + # tdSql.query("select last_row(ts) from stb1") + # tdSql.checkData(0, 0, "%s" % timest) + + + + # query times less than or equal to 100 + os.system("%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" % binPath) + + # query result print QPS + os.system("%staosdemo -f tools/taosdemoAllTest/queryQps.json" % binPath) + + + # delete useless files os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/taosdemoTestQuerytWithJson.py.sql") + os.system("rm -rf tools/taosdemoAllTest/*.py.sql") os.system("rm -rf ./querySystemInfo*") os.system("rm -rf ./query_res*") os.system("rm -rf ./all_query*") diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py new file mode 100644 index 0000000000..1275b6a8b5 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py @@ -0,0 +1,99 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # query: query specified table and query super table + # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath) + # os.system("%staosdemo -f tools/taosdemoAllTest/sub.json" % binPath) + # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") + # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") + # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") + # tdSql.execute("use db") + # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') + # os.system("python3 tools/taosdemoAllTest/convertResFile.py") + # tdSql.execute("insert into result0 file './test_query_res0.txt'") + # tdSql.query("select ts from result0") + # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") + # tdSql.query("select count(*) from result0") + # tdSql.checkData(0, 0, 1) + # with open('./all_query_res1.txt','r+') as f1: + # result1 = int(f1.readline()) + # tdSql.query("select count(*) from stb00_1") + # tdSql.checkData(0, 0, "%d" % result1) + + # with open('./all_query_res2.txt','r+') as f2: + # result2 = int(f2.readline()) + # d2 = datetime.fromtimestamp(result2/1000) + # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") + # tdSql.query("select last_row(ts) from stb1") + # tdSql.checkData(0, 0, "%s" % timest) + + + # # query times less than or equal to 100 + # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySpeciMutisql100.json" % binPath) + # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySuperMutisql100.json" % binPath) + + + + + # delete useless files + # os.system("rm -rf ./insert_res.txt") + # os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + # os.system("rm -rf ./querySystemInfo*") + # os.system("rm -rf ./query_res*") + # os.system("rm -rf ./all_query*") + # os.system("rm -rf ./test_query_res0.txt") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 12cad8c1b1e4d53a1e63fe740cf7634abc6d21ff Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 6 May 2021 22:12:30 +0800 Subject: [PATCH 066/140] [TD-3747]fix crash --- src/client/src/tscServer.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9e7d778ddd..b691a7ad4a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2532,7 +2532,8 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { uint32_t size = tscGetTableMetaMaxSize(); if (pTableMetaInfo->pTableMeta == NULL) { - pTableMetaInfo->pTableMeta = calloc(1, size); + pTableMetaInfo->pTableMeta = calloc(1, size); + pTableMetaInfo->tableMetaSize = size; } else if (pTableMetaInfo->tableMetaSize < size) { char *tmp = realloc(pTableMetaInfo->pTableMeta, size); if (tmp == NULL) { @@ -2541,8 +2542,8 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { pTableMetaInfo->pTableMeta = (STableMeta *)tmp; pTableMetaInfo->tableMetaSize = size; } else { - uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); - memset(pTableMetaInfo->pTableMeta, 0, s); + //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); + memset(pTableMetaInfo->pTableMeta, 0, size); } pTableMetaInfo->pTableMeta->tableType = -1; From c4bc564931a37c94199da8be03ea1e754d7c041e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 6 May 2021 23:38:40 +0800 Subject: [PATCH 067/140] [TD-3857]: taosdump database properties. (#6019) Co-authored-by: Shuduo Sang --- src/kit/taosdump/taosdump.c | 230 ++++++++++++++++++------------------ 1 file changed, 117 insertions(+), 113 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index f7cb3e93d1..e706ddefd6 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -72,7 +72,8 @@ enum _show_db_index { TSDB_SHOW_DB_WALLEVEL_INDEX, TSDB_SHOW_DB_FSYNC_INDEX, TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, TSDB_SHOW_DB_UPDATE_INDEX, TSDB_SHOW_DB_STATUS_INDEX, TSDB_MAX_SHOW_DB @@ -83,10 +84,10 @@ enum _show_tables_index { TSDB_SHOW_TABLES_NAME_INDEX, TSDB_SHOW_TABLES_CREATED_TIME_INDEX, TSDB_SHOW_TABLES_COLUMNS_INDEX, - TSDB_SHOW_TABLES_METRIC_INDEX, - TSDB_SHOW_TABLES_UID_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_SHOW_TABLES_UID_INDEX, TSDB_SHOW_TABLES_TID_INDEX, - TSDB_SHOW_TABLES_VGID_INDEX, + TSDB_SHOW_TABLES_VGID_INDEX, TSDB_MAX_SHOW_TABLES }; @@ -134,6 +135,7 @@ typedef struct { int8_t wallevel; int32_t fsync; int8_t comp; + int8_t cachelast; char precision[8]; // time resolution int8_t update; char status[16]; @@ -360,19 +362,19 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { arguments->data_batch = atoi(arg); if (arguments->data_batch >= INT16_MAX) { arguments->data_batch = INT16_MAX - 1; - } + } break; - case 'L': + case 'L': { int32_t len = atoi(arg); if (len > TSDB_MAX_ALLOWED_SQL_LEN) { len = TSDB_MAX_ALLOWED_SQL_LEN; } else if (len < TSDB_MAX_SQL_LEN) { len = TSDB_MAX_SQL_LEN; - } + } arguments->max_sql_len = len; break; - } + } case 't': arguments->table_batch = atoi(arg); break; @@ -415,12 +417,12 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i struct arguments g_args = { // connection option - NULL, - "root", + NULL, + "root", #ifdef _TD_POWER_ - "powerdb", + "powerdb", #else - "taosdata", + "taosdata", #endif 0, "", @@ -677,10 +679,10 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS } sprintf(tempCommand, "show tables like %s", table); - - result = taos_query(taosCon, tempCommand); + + result = taos_query(taosCon, tempCommand); int32_t code = taos_errno(result); - + if (code != 0) { fprintf(stderr, "failed to run command %s\n", tempCommand); free(tempCommand); @@ -707,12 +709,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS free(tempCommand); return 0; } - + sprintf(tempCommand, "show stables like %s", table); - - result = taos_query(taosCon, tempCommand); + + result = taos_query(taosCon, tempCommand); code = taos_errno(result); - + if (code != 0) { fprintf(stderr, "failed to run command %s\n", tempCommand); free(tempCommand); @@ -750,7 +752,7 @@ int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric return -1; } } - + memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN); tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); @@ -772,7 +774,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu } sprintf(tmpCommand, "select tbname from %s", metric); - + TAOS_RES *res = taos_query(taosCon, tmpCommand); int32_t code = taos_errno(res); if (code != 0) { @@ -794,20 +796,20 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu } TAOS_FIELD *fields = taos_fetch_fields(res); - + int32_t numOfTable = 0; - while ((row = taos_fetch_row(res)) != NULL) { + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - - taosWrite(fd, &tableRecord, sizeof(STableRecord)); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); numOfTable++; } taos_free_result(res); lseek(fd, 0, SEEK_SET); - + int maxThreads = arguments->thread_num; int tableOfPerFile ; if (numOfTable <= arguments->thread_num) { @@ -817,16 +819,16 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu tableOfPerFile = numOfTable / arguments->thread_num; if (0 != numOfTable % arguments->thread_num) { tableOfPerFile += 1; - } + } } char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); if (NULL == tblBuf){ - fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); + fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); close(fd); return -1; } - + int32_t numOfThread = *totalNumOfThread; int subFd = -1; for (; numOfThread < maxThreads; numOfThread++) { @@ -840,7 +842,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu (void)remove(tmpBuf); } sprintf(tmpBuf, ".select-tbname.tmp"); - (void)remove(tmpBuf); + (void)remove(tmpBuf); free(tblBuf); close(fd); return -1; @@ -858,11 +860,11 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu sprintf(tmpBuf, ".select-tbname.tmp"); (void)remove(tmpBuf); - + if (fd >= 0) { close(fd); fd = -1; - } + } *totalNumOfThread = numOfThread; @@ -886,7 +888,7 @@ int taosDumpOut(struct arguments *arguments) { } else { sprintf(tmpBuf, "dbs.sql"); } - + fp = fopen(tmpBuf, "w"); if (fp == NULL) { fprintf(stderr, "failed to open file %s\n", tmpBuf); @@ -918,9 +920,9 @@ int taosDumpOut(struct arguments *arguments) { taosDumpCharset(fp); sprintf(command, "show databases"); - result = taos_query(taos, command); + result = taos_query(taos, command); int32_t code = taos_errno(result); - + if (code != 0) { fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result)); goto _exit_failure; @@ -960,12 +962,12 @@ int taosDumpOut(struct arguments *arguments) { strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); if (arguments->with_property) { dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); //dbInfos[count]->daysToKeep1; //dbInfos[count]->daysToKeep2; @@ -976,8 +978,9 @@ int taosDumpOut(struct arguments *arguments) { dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } @@ -1009,8 +1012,8 @@ int taosDumpOut(struct arguments *arguments) { g_resultStatistics.totalDatabasesOfDumpOut++; sprintf(command, "use %s", dbInfos[0]->name); - - result = taos_query(taos, command); + + result = taos_query(taos, command); int32_t code = taos_errno(result); if (code != 0) { fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); @@ -1040,7 +1043,7 @@ int taosDumpOut(struct arguments *arguments) { int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); if (0 == ret) { superTblCnt++; - } + } } retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); } @@ -1052,7 +1055,7 @@ int taosDumpOut(struct arguments *arguments) { goto _clean_tmp_file; } } - + // TODO: save dump super table into result_output.txt fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; @@ -1078,7 +1081,7 @@ int taosDumpOut(struct arguments *arguments) { taos_close(taos); taos_free_result(result); tfree(command); - taosFreeDbInfos(); + taosFreeDbInfos(); fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); return 0; @@ -1099,8 +1102,8 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo char sqlstr[COMMAND_SIZE]; sprintf(sqlstr, "describe %s.%s;", dbName, table); - - res = taos_query(taosCon, sqlstr); + + res = taos_query(taosCon, sqlstr); int32_t code = taos_errno(res); if (code != 0) { fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); @@ -1130,23 +1133,23 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo if (isSuperTable) { return count; } - + // if chidl-table have tag, using select tagName from table to get tagValue for (int i = 0 ; i < count; i++) { if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table); - - res = taos_query(taosCon, sqlstr); + + res = taos_query(taosCon, sqlstr); code = taos_errno(res); if (code != 0) { fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); taos_free_result(res); return -1; } - - fields = taos_fetch_fields(res); + + fields = taos_fetch_fields(res); row = taos_fetch_row(res); if (NULL == row) { @@ -1161,7 +1164,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo res = NULL; continue; } - + int32_t* length = taos_fetch_lengths(res); //int32_t* length = taos_fetch_lengths(tmpResult); @@ -1198,7 +1201,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo } case TSDB_DATA_TYPE_NCHAR: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); - char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' + char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); break; @@ -1221,9 +1224,9 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo default: break; } - + taos_free_result(res); - res = NULL; + res = NULL; } return count; @@ -1282,9 +1285,10 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); if (isDumpProperty) { pstr += sprintf(pstr, - "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d", - dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache, - dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update); + "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", + dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache, + dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast, + dbInfo->comp, dbInfo->precision, dbInfo->update); } pstr += sprintf(pstr, ";"); @@ -1295,8 +1299,8 @@ void* taosDumpOutWorkThreadFp(void *arg) { SThreadParaObj *pThread = (SThreadParaObj*)arg; STableRecord tableRecord; - int fd; - + int fd; + char tmpBuf[TSDB_FILENAME_LEN*4] = {0}; sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex); fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); @@ -1307,13 +1311,13 @@ void* taosDumpOutWorkThreadFp(void *arg) FILE *fp = NULL; memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); - + if (g_args.outpath[0] != 0) { sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex); } else { sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); } - + fp = fopen(tmpBuf, "w"); if (fp == NULL) { fprintf(stderr, "failed to open file %s\n", tmpBuf); @@ -1323,13 +1327,13 @@ void* taosDumpOutWorkThreadFp(void *arg) memset(tmpBuf, 0, TSDB_FILENAME_LEN); sprintf(tmpBuf, "use %s", pThread->dbName); - - TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf); + + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf); int32_t code = taos_errno(tmpResult); if (code != 0) { fprintf(stderr, "invalid database %s\n", pThread->dbName); taos_free_result(tmpResult); - fclose(fp); + fclose(fp); close(fd); return NULL; } @@ -1347,7 +1351,7 @@ void* taosDumpOutWorkThreadFp(void *arg) // TODO: sum table count and table rows by self pThread->tablesOfDumpOut++; pThread->rowsOfDumpOut += ret; - + if (pThread->rowsOfDumpOut >= lastRowsPrint) { printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName); lastRowsPrint += 5000000; @@ -1357,15 +1361,15 @@ void* taosDumpOutWorkThreadFp(void *arg) if (tablesInOneFile >= g_args.table_batch) { fclose(fp); tablesInOneFile = 0; - - memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); + + memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); if (g_args.outpath[0] != 0) { sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); } else { sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex); } fileNameIndex++; - + fp = fopen(tmpBuf, "w"); if (fp == NULL) { fprintf(stderr, "failed to open file %s\n", tmpBuf); @@ -1379,7 +1383,7 @@ void* taosDumpOutWorkThreadFp(void *arg) taos_free_result(tmpResult); close(fd); - fclose(fp); + fclose(fp); return NULL; } @@ -1395,7 +1399,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i pThread->threadIndex = t; pThread->totalThreads = numOfThread; tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); - pThread->taosCon = taosCon; + pThread->taosCon = taosCon; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); @@ -1410,7 +1414,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i pthread_join(threadObj[t].threadID, NULL); } - // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt + // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt int64_t totalRowsOfDumpOut = 0; int64_t totalChildTblsOfDumpOut = 0; for (int32_t t = 0; t < numOfThread; ++t) { @@ -1451,7 +1455,7 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) { } -int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) +int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) { TAOS_ROW row; int fd = -1; @@ -1459,8 +1463,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) char sqlstr[TSDB_MAX_SQL_LEN] = {0}; sprintf(sqlstr, "show %s.stables", dbName); - - TAOS_RES* res = taos_query(taosCon, sqlstr); + + TAOS_RES* res = taos_query(taosCon, sqlstr); int32_t code = taos_errno(res); if (code != 0) { fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res)); @@ -1480,13 +1484,13 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) (void)remove(".stables.tmp"); exit(-1); } - - while ((row = taos_fetch_row(res)) != NULL) { + + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); - } - + } + taos_free_result(res); (void)lseek(fd, 0, SEEK_SET); @@ -1494,7 +1498,7 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) while (1) { ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - + int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName); if (0 == ret) { superTblCnt++; @@ -1507,8 +1511,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) close(fd); (void)remove(".stables.tmp"); - - return 0; + + return 0; } @@ -1518,19 +1522,19 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao STableRecord tableRecord; taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); - + fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name); g_resultStatistics.totalDatabasesOfDumpOut++; char sqlstr[TSDB_MAX_SQL_LEN] = {0}; fprintf(fp, "USE %s;\n\n", dbInfo->name); - + (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); sprintf(sqlstr, "show %s.tables", dbInfo->name); - - TAOS_RES* res = taos_query(taosCon, sqlstr); + + TAOS_RES* res = taos_query(taosCon, sqlstr); int code = taos_errno(res); if (code != 0) { fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); @@ -1549,15 +1553,15 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao } TAOS_FIELD *fields = taos_fetch_fields(res); - + int32_t numOfTable = 0; - while ((row = taos_fetch_row(res)) != NULL) { + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); - + taosWrite(fd, &tableRecord, sizeof(STableRecord)); - + numOfTable++; } taos_free_result(res); @@ -1572,7 +1576,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao tableOfPerFile = numOfTable / g_args.thread_num; if (0 != numOfTable % g_args.thread_num) { tableOfPerFile += 1; - } + } } char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); @@ -1581,7 +1585,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao close(fd); return -1; } - + int32_t numOfThread = 0; int subFd = -1; for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) { @@ -1618,7 +1622,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao close(fd); fd = -1; } - + taos_free_result(res); // start multi threads to dumpout @@ -1626,7 +1630,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); (void)remove(tmpBuf); - } + } free(tblBuf); return 0; @@ -1737,7 +1741,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* char *pstr = NULL; TAOS_ROW row = NULL; int numFields = 0; - + if (arguments->schemaonly) { return 0; } @@ -1752,11 +1756,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* pstr = tmpBuffer; char sqlstr[1024] = {0}; - sprintf(sqlstr, - "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", + sprintf(sqlstr, + "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", dbName, tbname, arguments->start_time, arguments->end_time); - - TAOS_RES* tmpResult = taos_query(taosCon, sqlstr); + + TAOS_RES* tmpResult = taos_query(taosCon, sqlstr); int32_t code = taos_errno(tmpResult); if (code != 0) { fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult)); @@ -1776,7 +1780,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* while ((row = taos_fetch_row(tmpResult)) != NULL) { pstr = tmpBuffer; curr_sqlstr_len = 0; - + int32_t* length = taos_fetch_lengths(tmpResult); // act len if (count == 0) { @@ -1831,7 +1835,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); //pstr = stpcpy(pstr, tbuf); //*(pstr++) = '\''; - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; } case TSDB_DATA_TYPE_NCHAR: { @@ -1859,10 +1863,10 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") "); - totalRows++; + totalRows++; count++; fprintf(fp, "%s", tmpBuffer); - + if (totalRows >= lastRowsPrint) { printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname); lastRowsPrint += 5000000; @@ -2208,7 +2212,7 @@ static FILE* taosOpenDumpInFile(char *fptr) { } char *fname = full_path.we_wordv[0]; - + FILE *f = fopen(fname, "r"); if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); @@ -2242,7 +2246,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c line[--read_len] = '\0'; //if (read_len == 0 || isCommentLine(line)) { // line starts with # - if (read_len == 0 ) { + if (read_len == 0 ) { continue; } @@ -2261,8 +2265,8 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c } memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; - + cmd_len = 0; + if (lineNo >= lastRowsPrint) { printf(" %d lines already be executed from file %s\n", lineNo, fileName); lastRowsPrint += 5000000; @@ -2302,7 +2306,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args) if (totalThreads > tsSqlFileNum) { totalThreads = tsSqlFileNum; } - + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj)); for (int32_t t = 0; t < totalThreads; ++t) { pThread = threadObj + t; @@ -2332,7 +2336,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args) int taosDumpIn(struct arguments *arguments) { assert(arguments->isDumpIn); - + TAOS *taos = NULL; FILE *fp = NULL; @@ -2347,22 +2351,22 @@ int taosDumpIn(struct arguments *arguments) { int32_t tsSqlFileNumOfTbls = tsSqlFileNum; if (tsDbSqlFile[0] != 0) { tsSqlFileNumOfTbls--; - + fp = taosOpenDumpInFile(tsDbSqlFile); if (NULL == fp) { fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); return -1; } fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile); - + taosLoadFileCharset(fp, tsfCharset); - + taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); } if (0 != tsSqlFileNumOfTbls) { taosStartDumpInWorkThreads(taos, arguments); - } + } taos_close(taos); taosFreeSQLFiles(); From 2242be054929222a7cbe7032a5d9417e5e9f299a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 7 May 2021 00:17:30 +0800 Subject: [PATCH 068/140] [TD-3747] fix stream case failed --- src/client/src/tscServer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b691a7ad4a..d9405f300a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2544,6 +2544,7 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { } else { //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); memset(pTableMetaInfo->pTableMeta, 0, size); + pTableMetaInfo->tableMetaSize = size; } pTableMetaInfo->pTableMeta->tableType = -1; From bc1e1e2b4ea9c63caca2d8714d07052bab505231 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Fri, 7 May 2021 10:38:09 +0800 Subject: [PATCH 069/140] [TD-4808] : fix python connector error where ts is null (#6025) --- src/connector/python/linux/python2/setup.py | 2 +- .../python/linux/python2/taos/cinterface.py | 14 ++++++++++---- src/connector/python/linux/python3/setup.py | 2 +- .../python/linux/python3/taos/cinterface.py | 14 ++++++++++---- src/connector/python/osx/python3/setup.py | 2 +- .../python/osx/python3/taos/cinterface.py | 14 ++++++++++---- src/connector/python/windows/python2/setup.py | 2 +- .../python/windows/python2/taos/cinterface.py | 14 ++++++++++---- src/connector/python/windows/python3/setup.py | 2 +- .../python/windows/python3/taos/cinterface.py | 14 ++++++++++---- 10 files changed, 55 insertions(+), 25 deletions(-) diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py index ff2d90fcb3..3f065e0348 100644 --- a/src/connector/python/linux/python2/setup.py +++ b/src/connector/python/linux/python2/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.8", + version="2.0.9", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 4367947341..3d0ecd2901 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] else: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py index 296e79b973..0bd7d51b6a 100644 --- a/src/connector/python/linux/python3/setup.py +++ b/src/connector/python/linux/python3/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.7", + version="2.0.9", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index 4367947341..3d0ecd2901 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] else: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py index 9bce1a976f..4c865676c9 100644 --- a/src/connector/python/osx/python3/setup.py +++ b/src/connector/python/osx/python3/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.7", + version="2.0.9", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py index dca9bd42e8..720fbef6f5 100644 --- a/src/connector/python/osx/python3/taos/cinterface.py +++ b/src/connector/python/osx/python3/taos/cinterface.py @@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] else: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py index 47d374fe67..24d75f937c 100644 --- a/src/connector/python/windows/python2/setup.py +++ b/src/connector/python/windows/python2/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.7", + version="2.0.9", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index ec72474df9..65cb183f26 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] else: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py index cdcec62a21..2659c493aa 100644 --- a/src/connector/python/windows/python3/setup.py +++ b/src/connector/python/windows/python3/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.7", + version="2.0.9", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index ec72474df9..65cb183f26 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] else: - return list(map(_timestamp_converter, ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)])) + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): From 3259c3de4099a759662e0edd065f6e86d36d08be Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 7 May 2021 10:58:03 +0800 Subject: [PATCH 070/140] [TD-3954] add case for resolve the bug of TD-3899 --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryTsisNull.py | 53 +++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 tests/pytest/query/queryTsisNull.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 54eef46628..8ead9bb00e 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -227,6 +227,7 @@ python3 ./test.py -f query/querySecondtscolumnTowherenow.py python3 ./test.py -f query/queryFilterTswithDateUnit.py python3 ./test.py -f query/queryTscomputWithNow.py python3 ./test.py -f query/computeErrorinWhere.py +python3 ./test.py -f query/queryTsisNull.py diff --git a/tests/pytest/query/queryTsisNull.py b/tests/pytest/query/queryTsisNull.py new file mode 100644 index 0000000000..df783f2fb8 --- /dev/null +++ b/tests/pytest/query/queryTsisNull.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table and insert data") + tdSql.execute( + "create table stb1 (ts timestamp, c1 timestamp , c2 int) TAGS(t1 int )" + ) + + tdLog.printNoPrefix("==========step2:query data where timestamp data is null") + tdSql.execute( + "insert into t1 using stb1(t1) tags(1) (ts, c1, c2) values (now-1m, null, 1)" + ) + tdSql.execute( + "insert into t1 using stb1(t1) tags(1) (ts, c2) values (now-2m, 2)" + ) + tdSql.query("select * from t1 where c1 is NULL") + tdSql.checkRows(2) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 75bb4e8d03895305c12b10d4609b7d13015abbbd Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 7 May 2021 11:02:54 +0800 Subject: [PATCH 071/140] [TD-3990]remove travis --- .travis.yml | 298 ---------------------------------------------------- 1 file changed, 298 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index efe7917105..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,298 +0,0 @@ -# -# Configuration -# -# -# Build Matrix -# -branches: - only: - - master - - develop - - coverity_scan - - /^.*ci-.*$/ - -matrix: - - os: linux - dist: bionic - language: c - - git: - - depth: 1 - - compiler: gcc - env: DESC="linux/gcc build and test" - - addons: - apt: - packages: - - build-essential - - cmake - - net-tools - - python3.8 - - libc6-dbg - - valgrind - - psmisc - - unixodbc - - unixodbc-dev - - mono-complete - - before_script: - - export TZ=Asia/Harbin - - date - - curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && python3.8 get-pip.py - - python3.8 -m pip install --upgrade pip setuptools - - cd ${TRAVIS_BUILD_DIR} - - mkdir debug - - cd debug - - script: - - cmake .. > /dev/null - - make > /dev/null - - after_success: - - travis_wait 20 - - |- - case $TRAVIS_OS_NAME in - linux) - cd ${TRAVIS_BUILD_DIR}/debug - make install > /dev/null || travis_terminate $? - - py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev - pip3 install psutil - pip3 install guppy3 - pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ - - cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo - mcs -out:taosdemo *.cs || travis_terminate $? - pkill -TERM -x taosd - fuser -k -n tcp 6030 - sleep 1 - ${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null & - sleep 5 - mono taosdemo -Q DEFAULT -y || travis_terminate $? - pkill -KILL -x taosd - fuser -k -n tcp 6030 - sleep 1 - - cd ${TRAVIS_BUILD_DIR}/tests - ./test-all.sh smoke || travis_terminate $? - sleep 1 - - cd ${TRAVIS_BUILD_DIR}/tests/pytest - pkill -TERM -x taosd - fuser -k -n tcp 6030 - sleep 1 - ./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $? - sleep 1 - - cd ${TRAVIS_BUILD_DIR}/tests/pytest - ./valgrind-test.sh 2>&1 > mem-error-out.log - sleep 1 - - - # Color setting - RED='\033[0;31m' - GREEN='\033[1;32m' - GREEN_DARK='\033[0;32m' - GREEN_UNDERLINE='\033[4;32m' - NC='\033[0m' - - grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log - - for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'` - do - if [ -n "$memError" ]; then - if [ "$memError" -gt 12 ]; then - echo -e "${RED} ## Memory errors number valgrind reports is $memError.\ - More than our threshold! ## ${NC}" - travis_terminate $memError - fi - fi - done - - grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log - for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'` - do - if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 13 ]; then - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - travis_terminate $defiMemError - fi - fi - done - - ;; - esac - - - os: linux - dist: bionic - language: c - compiler: gcc - env: COVERITY_SCAN=true - git: - - depth: 1 - - script: - - echo "this job is for coverity scan" - - addons: - coverity_scan: - # GitHub project metadata - # ** specific to your project ** - project: - name: TDengine - version: 2.x - description: TDengine - - # Where email notification of build analysis results will be sent - notification_email: sdsang@taosdata.com, slguan@taosdata.com - - # Commands to prepare for build_command - # ** likely specific to your build ** - build_command_prepend: cmake . > /dev/null - - # The command that will be added as an argument to "cov-build" to compile your project for analysis, - # ** likely specific to your build ** - build_command: make - - # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'. - # Take care in resource usage, and consider the build frequency allowances per - # https://scan.coverity.com/faq#frequency - branch_pattern: coverity_scan - - - os: linux - dist: trusty - language: c - git: - - depth: 1 - - addons: - apt: - packages: - - build-essential - - cmake - - binutils-2.26 - - unixodbc - - unixodbc-dev - env: - - DESC="trusty/gcc-4.8/bintuils-2.26 build" - - before_script: - - export TZ=Asia/Harbin - - date - - cd ${TRAVIS_BUILD_DIR} - - mkdir debug - - cd debug - - script: - - cmake .. > /dev/null - - export PATH=/usr/lib/binutils-2.26/bin:$PATH && make - - - os: linux - dist: bionic - language: c - compiler: clang - env: DESC="linux/clang build" - git: - - depth: 1 - - addons: - apt: - packages: - - build-essential - - cmake - - unixodbc - - unixodbc-dev - - before_script: - - export TZ=Asia/Harbin - - date - - cd ${TRAVIS_BUILD_DIR} - - mkdir debug - - cd debug - - script: - - cmake .. > /dev/null - - make > /dev/null - - - os: linux - arch: arm64 - dist: bionic - language: c - compiler: clang - env: DESC="arm64 linux/clang build" - git: - - depth: 1 - - addons: - apt: - packages: - - build-essential - - cmake - - before_script: - - export TZ=Asia/Harbin - - date - - cd ${TRAVIS_BUILD_DIR} - - mkdir debug - - cd debug - - script: - - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then - cmake .. -DCPUTYPE=aarch64 > /dev/null; - else - cmake .. > /dev/null; - fi - - make > /dev/null - - - os: linux - arch: arm64 - dist: xenial - language: c - git: - - depth: 1 - - addons: - apt: - packages: - - build-essential - - cmake - - unixodbc - - unixodbc-dev - env: - - DESC="arm64 xenial build" - - before_script: - - export TZ=Asia/Harbin - - date - - cd ${TRAVIS_BUILD_DIR} - - mkdir debug - - cd debug - - script: - - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then - cmake .. -DCPUTYPE=aarch64 > /dev/null; - else - cmake .. > /dev/null; - fi - - make > /dev/null - - - os: osx - osx_image: xcode11.4 - language: c - compiler: clang - env: DESC="mac/clang build" - git: - - depth: 1 - addons: - homebrew: - - cmake - - unixodbc - - script: - - cd ${TRAVIS_BUILD_DIR} - - mkdir debug - - cd debug - - cmake .. > /dev/null - - make > /dev/null From afd1b650fa57f386e2b2cd96da2e8f65724dcee5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 7 May 2021 12:02:17 +0800 Subject: [PATCH 072/140] Hotfix/sangshuduo/td 4053 taosdemo more value scope check (#6022) * [TD-4035]: taosdemo more checking for prevent value out of scope. * [TD-4035]: taosdemo more checking for prevent value out of scope. change min rows and max rows to unsigned int and 0 means default. * [TD-4035]: taosdemo more checking for prevent value out of scope. change offset=0 to get all childtable count Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6facdf7eee..20d7f67138 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3403,7 +3403,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); if (numRecPerReq && numRecPerReq->type == cJSON_Number) { - if (numRecPerReq->valueint < 0) { + if (numRecPerReq->valueint <= 0) { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", __func__, __LINE__); goto PARSE_OVER; @@ -4744,7 +4744,7 @@ static int64_t generateDataTail( verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch); - int64_t k = 0; + uint64_t k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); From 4c06f3986e23c154457e6b360aff28c86d9a1f32 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 7 May 2021 12:03:01 +0800 Subject: [PATCH 073/140] [TD-4005]: fix repeated vnode open when tsdb state not ok --- src/inc/taoserror.h | 1 + src/util/src/terror.c | 2 ++ src/vnode/src/vnodeMain.c | 6 +++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index eff4eecbc1..855afe923d 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -218,6 +218,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") +#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0513) //"Invalid tsdb state") // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 586a886f47..fc24b28dc3 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -230,6 +230,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TSDB_STATE, "Invalid tsdb state") // tsdb TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID") @@ -253,6 +254,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, "Need to reconfigure t TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, "Invalid information to create table") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_AVAIL_DISK, "No available disk") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value") // query TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle") diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index ee8ed9e0fa..8ec66316e3 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -310,11 +310,11 @@ int32_t vnodeOpen(int32_t vgId) { vnodeCleanUp(pVnode); return terrno; } else if (tsdbGetState(pVnode->tsdb) != TSDB_STATE_OK) { - vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, - tstrerror(terrno)); + vError("vgId:%d, failed to open tsdb(state: %d), replica:%d reason:%s", pVnode->vgId, + tsdbGetState(pVnode->tsdb), pVnode->syncCfg.replica, tstrerror(terrno)); if (pVnode->syncCfg.replica <= 1) { vnodeCleanUp(pVnode); - return terrno; + return TSDB_CODE_VND_INVALID_TSDB_STATE; } else { pVnode->fversion = 0; pVnode->version = 0; From c4a9fda17a1fefd58b102d6f8d4e37b6a7b6f4ad Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 5 May 2021 10:32:08 +0800 Subject: [PATCH 074/140] [TD-3663]: add test case for docker cluster --- tests/pytest/cluster/clusterEnvSetup/basic.py | 119 -------------- .../clusterEnvSetup/cleanClusterEnv.sh | 39 ----- .../pytest/cluster/clusterEnvSetup/node4.yml | 62 ------- .../pytest/cluster/clusterEnvSetup/node5.yml | 62 ------- .../Dockerfile | 0 .../OneMnodeMultipleVnodesTest.py | 39 +++++ tests/pytest/dockerCluster/__init__.py | 0 tests/pytest/dockerCluster/basic.py | 152 ++++++++++++++++++ .../buildClusterEnv.sh | 52 ++---- .../docker-compose.yml | 7 +- .../insert.json | 0 .../node3.yml | 1 + .../taosdemoWrapper.py | 0 13 files changed, 208 insertions(+), 325 deletions(-) delete mode 100644 tests/pytest/cluster/clusterEnvSetup/basic.py delete mode 100755 tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh delete mode 100644 tests/pytest/cluster/clusterEnvSetup/node4.yml delete mode 100644 tests/pytest/cluster/clusterEnvSetup/node5.yml rename tests/pytest/{cluster/clusterEnvSetup => dockerCluster}/Dockerfile (100%) create mode 100644 tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py create mode 100644 tests/pytest/dockerCluster/__init__.py create mode 100644 tests/pytest/dockerCluster/basic.py rename tests/pytest/{cluster/clusterEnvSetup => dockerCluster}/buildClusterEnv.sh (69%) rename tests/pytest/{cluster/clusterEnvSetup => dockerCluster}/docker-compose.yml (94%) rename tests/pytest/{cluster/clusterEnvSetup => dockerCluster}/insert.json (100%) rename tests/pytest/{cluster/clusterEnvSetup => dockerCluster}/node3.yml (98%) rename tests/pytest/{cluster/clusterEnvSetup => dockerCluster}/taosdemoWrapper.py (100%) diff --git a/tests/pytest/cluster/clusterEnvSetup/basic.py b/tests/pytest/cluster/clusterEnvSetup/basic.py deleted file mode 100644 index dc7e07fd5c..0000000000 --- a/tests/pytest/cluster/clusterEnvSetup/basic.py +++ /dev/null @@ -1,119 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import os -import taos -import random -import argparse - -class BuildDockerCluser: - - def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion, dockerDir, removeFlag): - self.hostName = hostName - self.user = user - self.password = password - self.configDir = configDir - self.numOfNodes = numOfNodes - self.clusterVersion = clusterVersion - self.dockerDir = dockerDir - self.removeFlag = removeFlag - - def getConnection(self): - self.conn = taos.connect( - host = self.hostName, - user = self.user, - password = self.password, - config = self.configDir) - - def createDondes(self): - self.cursor = self.conn.cursor() - for i in range(2, self.numOfNodes + 1): - self.cursor.execute("create dnode tdnode%d" % i) - - def startArbitrator(self): - print("start arbitrator") - os.system("docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator") - - def run(self): - if self.numOfNodes < 2 or self.numOfNodes > 10: - print("the number of nodes must be between 2 and 10") - exit(0) - print("remove Flag value %s" % self.removeFlag) - if self.removeFlag == False: - os.system("./cleanClusterEnv.sh -d %s" % self.dockerDir) - os.system("./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.clusterVersion, self.dockerDir)) - self.getConnection() - self.createDondes() - self.startArbitrator() - -parser = argparse.ArgumentParser() -parser.add_argument( - '-H', - '--host', - action='store', - default='tdnode1', - type=str, - help='host name to be connected (default: tdnode1)') -parser.add_argument( - '-u', - '--user', - action='store', - default='root', - type=str, - help='user (default: root)') -parser.add_argument( - '-p', - '--password', - action='store', - default='taosdata', - type=str, - help='password (default: taosdata)') -parser.add_argument( - '-c', - '--config-dir', - action='store', - default='/etc/taos', - type=str, - help='configuration directory (default: /etc/taos)') -parser.add_argument( - '-n', - '--num-of-nodes', - action='store', - default=2, - type=int, - help='number of nodes in the cluster (default: 2, min: 2, max: 5)') -parser.add_argument( - '-v', - '--version', - action='store', - default='2.0.18.1', - type=str, - help='the version of the cluster to be build, Default is 2.0.17.1') -parser.add_argument( - '-d', - '--docker-dir', - action='store', - default='/data', - type=str, - help='the data dir for docker, default is /data') -parser.add_argument( - '--flag', - action='store_true', - help='remove docker containers flag, default: True') - -args = parser.parse_args() -cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version, args.docker_dir, args.flag) -cluster.run() - -# usage 1: python3 basic.py -n 2 --flag (flag is True) -# usage 2: python3 basic.py -n 2 (flag should be False when it is not specified) \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh b/tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh deleted file mode 100755 index 675cae5fef..0000000000 --- a/tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -echo "Executing cleanClusterEnv.sh" -CURR_DIR=`pwd` - -if [ $# != 2 ]; then - echo "argument list need input : " - echo " -d docker dir" - exit 1 -fi - -DOCKER_DIR= -while getopts "d:" arg -do - case $arg in - d) - DOCKER_DIR=$OPTARG - ;; - ?) - echo "unkonwn argument" - ;; - esac -done - -function removeDockerContainers { - cd $DOCKER_DIR - docker-compose down --remove-orphans -} - -function cleanEnv { - echo "Clean up docker environment" - for i in {1..10} - do - rm -rf $DOCKER_DIR/node$i/data/* - rm -rf $DOCKER_DIR/node$i/log/* - done -} - -removeDockerContainers -cleanEnv \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/node4.yml b/tests/pytest/cluster/clusterEnvSetup/node4.yml deleted file mode 100644 index f542c22c45..0000000000 --- a/tests/pytest/cluster/clusterEnvSetup/node4.yml +++ /dev/null @@ -1,62 +0,0 @@ -version: '3.7' - -services: - td2.0-node4: - build: - context: . - args: - - PACKAGE=${PACKAGE} - - TARBITRATORPKG=${TARBITRATORPKG} - - EXTRACTDIR=${DIR} - - EXTRACTDIR2=${DIR2} - - DATADIR=${DATADIR} - image: 'tdengine:${VERSION}' - container_name: 'tdnode4' - cap_add: - - ALL - stdin_open: true - tty: true - environment: - TZ: "Asia/Shanghai" - command: > - sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && - echo $TZ > /etc/timezone && - mkdir /coredump && - echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && - sysctl -p && - exec my-main-application" - extra_hosts: - - "tdnode2:172.27.0.8" - - "tdnode3:172.27.0.9" - - "tdnode4:172.27.0.10" - - "tdnode5:172.27.0.11" - - "tdnode6:172.27.0.12" - - "tdnode7:172.27.0.13" - - "tdnode8:172.27.0.14" - - "tdnode9:172.27.0.15" - - "tdnode10:172.27.0.16" - volumes: - # bind data directory - - type: bind - source: ${DATADIR}/node4/data - target: /var/lib/taos - # bind log directory - - type: bind - source: ${DATADIR}/node4/log - target: /var/log/taos - # bind configuration - - type: bind - source: ${DATADIR}/node4/cfg - target: /etc/taos - # bind core dump path - - type: bind - source: ${DATADIR}/node4/core - target: /coredump - - type: bind - source: ${DATADIR} - target: /root - hostname: tdnode4 - networks: - taos_update_net: - ipv4_address: 172.27.0.10 - command: taosd \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/node5.yml b/tests/pytest/cluster/clusterEnvSetup/node5.yml deleted file mode 100644 index dd5941ac76..0000000000 --- a/tests/pytest/cluster/clusterEnvSetup/node5.yml +++ /dev/null @@ -1,62 +0,0 @@ -version: '3.7' - -services: - td2.0-node5: - build: - context: . - args: - - PACKAGE=${PACKAGE} - - TARBITRATORPKG=${TARBITRATORPKG} - - EXTRACTDIR=${DIR} - - EXTRACTDIR2=${DIR2} - - DATADIR=${DATADIR} - image: 'tdengine:${VERSION}' - container_name: 'tdnode5' - cap_add: - - ALL - stdin_open: true - tty: true - environment: - TZ: "Asia/Shanghai" - command: > - sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && - echo $TZ > /etc/timezone && - mkdir /coredump && - echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && - sysctl -p && - exec my-main-application" - extra_hosts: - - "tdnode2:172.27.0.8" - - "tdnode3:172.27.0.9" - - "tdnode4:172.27.0.10" - - "tdnode5:172.27.0.11" - - "tdnode6:172.27.0.12" - - "tdnode7:172.27.0.13" - - "tdnode8:172.27.0.14" - - "tdnode9:172.27.0.15" - - "tdnode10:172.27.0.16" - volumes: - # bind data directory - - type: bind - source: ${DATADIR}/node5/data - target: /var/lib/taos - # bind log directory - - type: bind - source: ${DATADIR}/node5/log - target: /var/log/taos - # bind configuration - - type: bind - source: ${DATADIR}/node5/cfg - target: /etc/taos - # bind core dump path - - type: bind - source: ${DATADIR}/node5/core - target: /coredump - - type: bind - source: ${DATADIR} - target: /root - hostname: tdnode5 - networks: - taos_update_net: - ipv4_address: 172.27.0.11 - command: taosd \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/Dockerfile b/tests/pytest/dockerCluster/Dockerfile similarity index 100% rename from tests/pytest/cluster/clusterEnvSetup/Dockerfile rename to tests/pytest/dockerCluster/Dockerfile diff --git a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py new file mode 100644 index 0000000000..ee663f89b0 --- /dev/null +++ b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py @@ -0,0 +1,39 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from basic import * +from util.sql import tdSql + + + +class TDTestCase: + + def init(self): + # tdLog.debug("start to execute %s" % __file__) + + self.numOfNodes = 5 + self.dockerDir = "/data" + cluster.init(self.numOfNodes, self.dockerDir) + cluster.prepardBuild() + for i in range(self.numOfNodes): + if i == 0: + cluster.cfg("role", "1", i + 1) + else: + cluster.cfg("role", "2", i + 1) + cluster.run() + +td = TDTestCase() +td.init() + + + diff --git a/tests/pytest/dockerCluster/__init__.py b/tests/pytest/dockerCluster/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py new file mode 100644 index 0000000000..50914b0be9 --- /dev/null +++ b/tests/pytest/dockerCluster/basic.py @@ -0,0 +1,152 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import taos + +class BuildDockerCluser: + + def init(self, numOfNodes, dockerDir): + self.numOfNodes = numOfNodes + self.dockerDir = dockerDir + + self.hostName = "tdnode1" + self.user = "root" + self.password = "taosdata" + self.configDir = "/etc/taos" + self.dirs = ["data", "cfg", "log", "core"] + self.cfgDict = { + "numOfLogLines":"100000000", + "mnodeEqualVnodeNum":"0", + "walLevel":"1", + "numOfThreadsPerCore":"2.0", + "monitor":"0", + "vnodeBak":"1", + "dDebugFlag":"135", + "mDebugFlag":"135", + "sdbDebugFlag":"135", + "rpcDebugFlag":"135", + "tmrDebugFlag":"131", + "cDebugFlag":"135", + "httpDebugFlag":"135", + "monitorDebugFlag":"135", + "udebugFlag":"135", + "jnidebugFlag":"135", + "qdebugFlag":"135", + "maxSQLLength":"1048576" + } + + # execute command, and return the output + # ref: https://blog.csdn.net/wowocpp/article/details/80775650 + def execCmdAndGetOutput(self, cmd): + r = os.popen(cmd) + text = r.read() + r.close() + return text + + def execCmd(self, cmd): + if os.system(cmd) != 0: + quit() + + def getTaosdVersion(self): + cmd = "taosd -V |grep version|awk '{print $3}'" + taosdVersion = self.execCmdAndGetOutput(cmd) + cmd = "find %s -name '*server*.tar.gz' | awk -F- '{print $(NF-2)}'|sort|awk 'END {print}'" % self.dockerDir + packageVersion = self.execCmdAndGetOutput(cmd) + + if (taosdVersion is None or taosdVersion.isspace()) and (packageVersion is None or packageVersion.isspace()): + print("Please install taosd or have a install package ready") + quit() + else: + self.version = taosdVersion if taosdVersion >= packageVersion else packageVersion + return self.version.strip() + + def getConnection(self): + self.conn = taos.connect( + host = self.hostName, + user = self.user, + password = self.password, + config = self.configDir) + + def removeFile(self, rootDir, index, dir): + cmd = "rm -rf %s/node%d/%s/*" % (rootDir, index, dir) + self.execCmd(cmd) + + def clearEnv(self): + cmd = "cd %s && docker-compose down --remove-orphans" % self.dockerDir + self.execCmd(cmd) + for i in range(1, self.numOfNodes + 1): + self.removeFile(self.dockerDir, i, self.dirs[0]) + self.removeFile(self.dockerDir, i, self.dirs[1]) + self.removeFile(self.dockerDir, i, self.dirs[2]) + + def createDir(self, rootDir, index, dir): + cmd = "mkdir -p %s/node%d/%s" % (rootDir, index, dir) + self.execCmd(cmd) + + def createDirs(self): + for i in range(1, self.numOfNodes + 1): + for j in range(len(self.dirs)): + self.createDir(self.dockerDir, i, self.dirs[j]) + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value, nodeIndex): + cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) + cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + self.execCmd(cmd) + + def updateLocalhosts(self): + cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts" + result = self.execCmdAndGetOutput(cmd) + if result and not result.isspace(): + cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + self.execCmd(cmd) + + def deploy(self): + self.clearEnv() + self.createDirs() + for i in range(1, self.numOfNodes + 1): + self.cfg("firstEp", "tdnode1:6030", i) + + for key, value in self.cfgDict.items(): + self.cfg(key, value, i) + + def createDondes(self): + self.cursor = self.conn.cursor() + for i in range(2, self.numOfNodes + 1): + self.cursor.execute("create dnode tdnode%d" % i) + + def startArbitrator(self): + for i in range(1, self.numOfNodes + 1): + self.cfg("arbitrator", "tdnode1:6042", i) + cmd = "docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator" + self.execCmd(cmd) + + def prepardBuild(self): + if self.numOfNodes < 2 or self.numOfNodes > 10: + print("the number of nodes must be between 2 and 10") + exit(0) + self.clearEnv() + self.createDirs() + self.updateLocalhosts() + self.deploy() + + def run(self): + cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir) + self.execCmd(cmd) + self.getConnection() + self.createDondes() + +cluster = BuildDockerCluser() \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh b/tests/pytest/dockerCluster/buildClusterEnv.sh similarity index 69% rename from tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh rename to tests/pytest/dockerCluster/buildClusterEnv.sh index 60c81cd82b..7bd92cad72 100755 --- a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh +++ b/tests/pytest/dockerCluster/buildClusterEnv.sh @@ -32,43 +32,14 @@ do esac done -function addTaoscfg { - for((i=1;i<=$NUM_OF_NODES;i++)) - do - touch $DOCKER_DIR/node$i/cfg/taos.cfg - echo 'firstEp tdnode1:6030' > $DOCKER_DIR/node$i/cfg/taos.cfg - echo 'fqdn tdnode'$i >> $DOCKER_DIR/node$i/cfg/taos.cfg - echo 'arbitrator tdnode1:6042' >> $DOCKER_DIR/node$i/cfg/taos.cfg - done -} - -function createDIR { - for((i=1;i<=$NUM_OF_NODES;i++)) - do - mkdir -p $DOCKER_DIR/node$i/data - mkdir -p $DOCKER_DIR/node$i/log - mkdir -p $DOCKER_DIR/node$i/cfg - mkdir -p $DOCKER_DIR/node$i/core - done -} - -function cleanEnv { - echo "Clean up docker environment" - for((i=1;i<=$NUM_OF_NODES;i++)) - do - rm -rf $DOCKER_DIR/node$i/data/* - rm -rf $DOCKER_DIR/node$i/log/* - done -} - function prepareBuild { - if [ -d $CURR_DIR/../../../../release ]; then + if [ -d $CURR_DIR/../../../release ]; then echo release exists - rm -rf $CURR_DIR/../../../../release/* + rm -rf $CURR_DIR/../../../release/* fi - cd $CURR_DIR/../../../../packaging + cd $CURR_DIR/../../../packaging if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then @@ -76,17 +47,17 @@ function prepareBuild { echo "generating TDeninge enterprise packages" ./release.sh -v cluster -n $VERSION >> /dev/null 2>&1 - if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then echo "no TDengine install package found" exit 1 fi - if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../../release/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then echo "no arbitrator install package found" exit 1 fi - cd $CURR_DIR/../../../../release + cd $CURR_DIR/../../../release mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR fi @@ -96,17 +67,17 @@ function prepareBuild { echo "generating TDeninge community packages" ./release.sh -v edge -n $VERSION >> /dev/null 2>&1 - if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then echo "no TDengine install package found" exit 1 fi - if [ ! -e $CURR_DIR/../../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then echo "no arbitrator install package found" exit 1 fi - cd $CURR_DIR/../../../../release + cd $CURR_DIR/../../../release mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR fi @@ -147,13 +118,10 @@ function clusterUp { done docker_run=$docker_run" up -d" fi - echo $docker_run |sh + echo $docker_run |sh echo "docker compose finish" } -createDIR -cleanEnv -addTaoscfg prepareBuild clusterUp \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml b/tests/pytest/dockerCluster/docker-compose.yml similarity index 94% rename from tests/pytest/cluster/clusterEnvSetup/docker-compose.yml rename to tests/pytest/dockerCluster/docker-compose.yml index d241062a2d..7855f30136 100644 --- a/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml +++ b/tests/pytest/dockerCluster/docker-compose.yml @@ -53,7 +53,7 @@ services: source: ${DATADIR}/node1/core target: /coredump - type: bind - source: /data + source: ${DATADIR} target: /root hostname: tdnode1 networks: @@ -90,6 +90,11 @@ services: - "tdnode3:172.27.0.9" - "tdnode4:172.27.0.10" - "tdnode5:172.27.0.11" + - "tdnode6:172.27.0.12" + - "tdnode7:172.27.0.13" + - "tdnode8:172.27.0.14" + - "tdnode9:172.27.0.15" + - "tdnode10:172.27.0.16" volumes: # bind data directory - type: bind diff --git a/tests/pytest/cluster/clusterEnvSetup/insert.json b/tests/pytest/dockerCluster/insert.json similarity index 100% rename from tests/pytest/cluster/clusterEnvSetup/insert.json rename to tests/pytest/dockerCluster/insert.json diff --git a/tests/pytest/cluster/clusterEnvSetup/node3.yml b/tests/pytest/dockerCluster/node3.yml similarity index 98% rename from tests/pytest/cluster/clusterEnvSetup/node3.yml rename to tests/pytest/dockerCluster/node3.yml index 18f1b37c1c..86e37c2f30 100644 --- a/tests/pytest/cluster/clusterEnvSetup/node3.yml +++ b/tests/pytest/dockerCluster/node3.yml @@ -26,6 +26,7 @@ services: sysctl -p && exec my-main-application" extra_hosts: + - "tdnode1:172.27.0.7" - "tdnode2:172.27.0.8" - "tdnode3:172.27.0.9" - "tdnode4:172.27.0.10" diff --git a/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py b/tests/pytest/dockerCluster/taosdemoWrapper.py similarity index 100% rename from tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py rename to tests/pytest/dockerCluster/taosdemoWrapper.py From bd0b8df12d79baa0b5d2751d89d26a499025afc2 Mon Sep 17 00:00:00 2001 From: tomchon Date: Fri, 7 May 2021 14:39:19 +0800 Subject: [PATCH 075/140] [TD-4037]: add taodemo-testcase that childtable_count is less than or equal zero and num_of_records_per_req is equal zero --- tests/pytest/fulltest.sh | 6 +- .../taosdemoAllTest/insertChildTab0.json | 88 +++++++++++++++++++ .../taosdemoAllTest/insertChildTabLess0.json | 88 +++++++++++++++++++ .../taosdemoTestInsertWithJson.py | 14 +-- .../taosdemoTestQueryWithJson.py | 3 +- 5 files changed, 190 insertions(+), 9 deletions(-) create mode 100644 tests/pytest/tools/taosdemoAllTest/insertChildTab0.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 54eef46628..1a8745d7ce 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -152,8 +152,7 @@ python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py + # update @@ -329,4 +328,7 @@ python3 ./test.py -f alter/alter_debugFlag.py python3 ./test.py -f query/queryBetweenAnd.py python3 ./test.py -f tag_lite/alter_tag.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py + #======================p4-end=============== diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json new file mode 100644 index 0000000000..1634e1cf06 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 0, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json new file mode 100644 index 0000000000..f4e3ec8e9f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": -1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index d4525e9764..5ecc4d70b2 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -182,14 +182,16 @@ class TDTestCase: tdSql.error("select * from db.stb2") tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) - tdSql.query("select count(*) from db.stb0") - tdSql.checkRows(0) - tdSql.query("select * from db.stb1") - tdSql.checkRows(0) + tdSql.error("select count(*) from db.stb0") tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) - tdSql.error("select count(*) from db.stb0") - tdSql.error("select * from db.stb1") + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) + tdSql.error("use db") tdSql.execute("drop database if exists blf") os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) tdSql.execute("use blf") diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py index 57848b8d72..643cad942c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -85,10 +85,11 @@ class TDTestCase: # # use restful api to query + # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath) # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryRestful.json" % binPath) # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") - # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") + # # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") # tdSql.execute("use db") # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') # os.system("python3 tools/taosdemoAllTest/convertResFile.py") From f4afe2f9d8f270e27c63c9130be86152a5ce2aaa Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 7 May 2021 15:00:40 +0800 Subject: [PATCH 076/140] [TD-4019] add test case --- tests/pytest/query/unionAllTest.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/pytest/query/unionAllTest.py b/tests/pytest/query/unionAllTest.py index 1b69c8ac4d..49d745ebd5 100644 --- a/tests/pytest/query/unionAllTest.py +++ b/tests/pytest/query/unionAllTest.py @@ -56,6 +56,24 @@ class TDTestCase: tdSql.query(sql) tdSql.checkRows(6) + tdSql.execute("create table stb(ts timestamp, options binary(7), city binary(10)) tags(type int)") + tdSql.execute("insert into tb1 using stb tags(1) values(%d, 'option1', 'beijing')" % self.ts) + tdSql.execute("insert into tb2 using stb tags(2) values(%d, 'option2', 'shanghai')" % self.ts) + + tdSql.query("select options from stb where type = 1 limit 1 union all select options from stb where type = 2 limit 1") + tdSql.checkData(0, 0, "option1") + tdSql.checkData(1, 0, "option2") + + tdSql.query("select 'dc' as options from stb where type = 1 limit 1 union all select 'ad' as options from stb where type = 2 limit 1") + tdSql.checkData(0, 0, "dc") + tdSql.checkData(1, 0, "ad") + + tdSql.query("select 'dc' as options from stb where type = 1 limit 1 union all select 'adc' as options from stb where type = 2 limit 1") + tdSql.checkData(0, 0, "dc") + tdSql.checkData(1, 0, "adc") + + tdSql.error("select 'dc' as options from stb where type = 1 limit 1 union all select 'ad' as city from stb where type = 2 limit 1") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From 6df8ed9f4eec85a66569b8247d89cfe156599431 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 7 May 2021 15:06:31 +0800 Subject: [PATCH 077/140] support bind multiple tables --- src/client/inc/tscSubquery.h | 2 + src/client/inc/tsclient.h | 3 +- src/client/src/tscParseInsert.c | 7 +- src/client/src/tscPrepare.c | 376 ++++++++++++++++++++++++++++++-- src/client/src/tscUtil.c | 116 +++++----- src/inc/taos.h | 2 + src/query/src/qTokenizer.c | 22 ++ src/util/inc/tstoken.h | 3 + tests/examples/c/makefile | 2 + 9 files changed, 457 insertions(+), 76 deletions(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 15ef54b7b1..f0349c2b3d 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -48,6 +48,8 @@ void tscLockByThread(int64_t *lockedBy); void tscUnlockByThread(int64_t *lockedBy); +int tsInsertInitialCheck(SSqlObj *pSql); + #ifdef __cplusplus } #endif diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index ec3b0c4421..73b0172e85 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -372,7 +372,8 @@ typedef struct SSqlObj { tsem_t rspSem; SSqlCmd cmd; SSqlRes res; - + bool isBind; + SSubqueryState subState; struct SSqlObj **pSubs; diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 6b88c90747..f964c5173e 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -386,7 +386,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha * The server time/client time should not be mixed up in one sql string * Do not employ sort operation is not involved if server time is used. */ -static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { +int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { // once the data block is disordered, we do NOT keep previous timestamp any more if (!pDataBlocks->ordered) { return TSDB_CODE_SUCCESS; @@ -411,6 +411,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) { pDataBlocks->ordered = false; + tscWarn("NOT ordered input timestamp"); } pDataBlocks->prevTS = k; @@ -693,6 +694,8 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) { pBlocks->numOfRows = i + 1; dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows; } + + dataBuf->prevTS = INT64_MIN; } static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { @@ -1262,7 +1265,7 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId + if ((pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) { goto _clean; } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index c3c8986e2f..baff65d6b9 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -24,6 +24,7 @@ #include "tscSubquery.h" int tsParseInsertSql(SSqlObj *pSql); +int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start); //////////////////////////////////////////////////////////////////////////////// // functions for normal statement preparation @@ -43,10 +44,21 @@ typedef struct SNormalStmt { tVariant* params; } SNormalStmt; +typedef struct SMultiTbStmt { + bool nameSet; + uint64_t currentUid; + uint32_t tbNum; + SStrToken tbname; + SHashObj *pTableHash; +} SMultiTbStmt; + typedef struct STscStmt { bool isInsert; + bool multiTbInsert; + int64_t prevTs; STscObj* taos; SSqlObj* pSql; + SMultiTbStmt mtb; SNormalStmt normal; } STscStmt; @@ -255,7 +267,7 @@ static char* normalStmtBuildSql(STscStmt* stmt) { //////////////////////////////////////////////////////////////////////////////// // functions for insertion statement preparation -static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { +static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind) { if (bind->is_null != NULL && *(bind->is_null)) { setNull(data + param->offset, param->type, param->bytes); return TSDB_CODE_SUCCESS; @@ -690,29 +702,52 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { } memcpy(data + param->offset, bind->buffer, size); + if (param->offset == 0) { + if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { + tscError("invalid timestamp"); + return TSDB_CODE_TSC_INVALID_VALUE; + } + } + return TSDB_CODE_SUCCESS; } static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { SSqlCmd* pCmd = &stmt->pSql->cmd; - - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0); - - STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - if (pCmd->pTableBlockHashList == NULL) { - pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); - } - + STscStmt* pStmt = (STscStmt*)stmt; + STableDataBlocks* pBlock = NULL; + + if (pStmt->multiTbInsert) { + if (pCmd->pTableBlockHashList == NULL) { + tscError("Table block hash list is empty"); + return TSDB_CODE_TSC_APP_ERROR; + } + + STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); + if (t1 == NULL) { + tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); + return TSDB_CODE_TSC_APP_ERROR; + } - int32_t ret = - tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk), - pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL); - if (ret != 0) { - // todo handle error + pBlock = *t1; + } else { + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0); + + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + if (pCmd->pTableBlockHashList == NULL) { + pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + } + + int32_t ret = + tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk), + pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL); + if (ret != 0) { + return ret; + } } - uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize; + uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + 1) * pBlock->rowSize; if (totalDataSize > pBlock->nAllocSize) { const double factor = 1.5; @@ -729,7 +764,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { SParamInfo* param = &pBlock->params[j]; - int code = doBindParam(data, param, &bind[param->idx]); + int code = doBindParam(pBlock, data, param, &bind[param->idx]); if (code != TSDB_CODE_SUCCESS) { tscDebug("param %d: type mismatch or invalid", param->idx); return code; @@ -739,9 +774,98 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { return TSDB_CODE_SUCCESS; } + +static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_BIND* bind, int32_t num) { + SSqlCmd* pCmd = &stmt->pSql->cmd; + STscStmt* pStmt = (STscStmt*)stmt; + + STableDataBlocks* pBlock = NULL; + + if (pCmd->pTableBlockHashList == NULL) { + tscError("Table block hash list is empty"); + return TSDB_CODE_TSC_APP_ERROR; + } + + STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); + if (t1 == NULL) { + tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); + return TSDB_CODE_TSC_APP_ERROR; + } + + pBlock = *t1; + + uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + num) * pBlock->rowSize; + if (totalDataSize > pBlock->nAllocSize) { + const double factor = 1.5; + + void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor)); + if (tmp == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pBlock->pData = (char*)tmp; + pBlock->nAllocSize = (uint32_t)(totalDataSize * factor); + } + + for (uint32_t i = 0; i < num; ++i) { + char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (pCmd->batchSize + i); + TAOS_BIND* tbind = bind + pBlock->numOfParams * i; + + for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { + SParamInfo* param = &pBlock->params[j]; + + int code = doBindParam(pBlock, data, param, &tbind[param->idx]); + if (code != TSDB_CODE_SUCCESS) { + tscError("param %d: type mismatch or invalid", param->idx); + return code; + } + } + } + + pCmd->batchSize += num - 1; + + return TSDB_CODE_SUCCESS; +} + + +static int insertStmtUpdateBatch(STscStmt* stmt) { + SSqlObj* pSql = stmt->pSql; + SSqlCmd* pCmd = &pSql->cmd; + STableDataBlocks* pBlock = NULL; + + assert(pCmd->numOfClause == 1); + if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) { + return TSDB_CODE_SUCCESS; + } + + STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid)); + if (t1 == NULL) { + tscError("no table data block in hash list, uid:%" PRId64 , stmt->mtb.currentUid); + return TSDB_CODE_TSC_APP_ERROR; + } + + pBlock = *t1; + + STableMeta* pTableMeta = pBlock->pTableMeta; + + pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize; + SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData; + pBlk->numOfRows = pCmd->batchSize; + pBlk->dataLen = 0; + pBlk->uid = pTableMeta->id.uid; + pBlk->tid = pTableMeta->id.tid; + + return TSDB_CODE_SUCCESS; +} + static int insertStmtAddBatch(STscStmt* stmt) { SSqlCmd* pCmd = &stmt->pSql->cmd; ++pCmd->batchSize; + + if (stmt->multiTbInsert) { + return insertStmtUpdateBatch(stmt); + } + return TSDB_CODE_SUCCESS; } @@ -835,6 +959,80 @@ static int insertStmtExecute(STscStmt* stmt) { return pSql->res.code; } +static void insertBatchClean(STscStmt* pStmt) { + SSqlCmd *pCmd = &pStmt->pSql->cmd; + SSqlObj *pSql = pStmt->pSql; + int32_t size = taosHashGetSize(pCmd->pTableBlockHashList); + + // data block reset + pCmd->batchSize = 0; + + for(int32_t i = 0; i < size; ++i) { + if (pCmd->pTableNameList && pCmd->pTableNameList[i]) { + tfree(pCmd->pTableNameList[i]); + } + } + + tfree(pCmd->pTableNameList); + + STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL); + + STableDataBlocks* pOneTableBlock = *p; + + while (1) { + SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; + + pOneTableBlock->size = sizeof(SSubmitBlk); + + pBlocks->numOfRows = 0; + + p = taosHashIterate(pCmd->pTableBlockHashList, p); + if (p == NULL) { + break; + } + + pOneTableBlock = *p; + } + + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + pCmd->numOfTables = 0; + + tscFreeSqlResult(pSql); + tscFreeSubobj(pSql); + tfree(pSql->pSubs); + pSql->subState.numOfSub = 0; +} + +static int insertBatchStmtExecute(STscStmt* pStmt) { + int32_t code = 0; + + if(pStmt->mtb.nameSet == false) { + tscError("no table name set"); + return TSDB_CODE_TSC_APP_ERROR; + } + + pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry + + if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) > 0) { // merge according to vgId + if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) { + return code; + } + } + + code = tscHandleMultivnodeInsert(pStmt->pSql); + + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + // wait for the callback function to post the semaphore + tsem_wait(&pStmt->pSql->rspSem); + + insertBatchClean(pStmt); + + return pStmt->pSql->res.code; +} + //////////////////////////////////////////////////////////////////////////////// // interface functions @@ -866,6 +1064,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) { pSql->signature = pSql; pSql->pTscObj = pObj; pSql->maxRetry = TSDB_MAX_REPLICA; + pSql->isBind = true; pStmt->pSql = pSql; return pStmt; @@ -917,6 +1116,32 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { registerSqlObj(pSql); + int32_t ret = TSDB_CODE_SUCCESS; + + if ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS) { + return ret; + } + + int32_t index = 0; + SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false); + + if (sToken.n == 0) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + if (sToken.n == 1 && sToken.type == TK_QUESTION) { + pStmt->multiTbInsert = true; + pStmt->mtb.tbname = sToken; + pStmt->mtb.nameSet = false; + if (pStmt->mtb.pTableHash == NULL) { + pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); + } + return TSDB_CODE_SUCCESS; + } + + pStmt->multiTbInsert = false; + memset(&pStmt->mtb, 0, sizeof(pStmt->mtb)); + int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { // wait for the callback function to post the semaphore @@ -931,9 +1156,100 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { return normalStmtPrepare(pStmt); } + +int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { + STscStmt* pStmt = (STscStmt*)stmt; + SSqlObj* pSql = pStmt->pSql; + SSqlCmd* pCmd = &pSql->cmd; + + if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + + if (name == NULL) { + terrno = TSDB_CODE_TSC_APP_ERROR; + tscError("name is NULL"); + return TSDB_CODE_TSC_APP_ERROR; + } + + if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) { + terrno = TSDB_CODE_TSC_APP_ERROR; + tscError("not multi table insert"); + return TSDB_CODE_TSC_APP_ERROR; + } + + uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name)); + if (uid != NULL) { + pStmt->mtb.currentUid = *uid; + + STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); + if (t1 == NULL) { + tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); + return TSDB_CODE_TSC_APP_ERROR; + } + + SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData; + pCmd->batchSize = pBlk->numOfRows; + + tscDebug("table:%s is already prepared, uid:%" PRIu64, name, pStmt->mtb.currentUid); + return TSDB_CODE_SUCCESS; + } + + pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name); + + pStmt->mtb.nameSet = true; + + tscDebug("sqlstr set to %s", pSql->sqlstr); + + pSql->cmd.parseFinished = 0; + pSql->cmd.numOfParams = 0; + pSql->cmd.batchSize = 0; + + if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { + SHashObj* hashList = pCmd->pTableBlockHashList; + pCmd->pTableBlockHashList = NULL; + tscResetSqlCmd(pCmd, true); + pCmd->pTableBlockHashList = hashList; + } + + int32_t code = tsParseSql(pStmt->pSql, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + // wait for the callback function to post the semaphore + tsem_wait(&pStmt->pSql->rspSem); + + code = pStmt->pSql->res.code; + } + + if (code == TSDB_CODE_SUCCESS) { + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + STableDataBlocks* pBlock = NULL; + code = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk), + pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData; + blk->numOfRows = 0; + + pStmt->mtb.currentUid = pTableMeta->id.uid; + pStmt->mtb.tbNum++; + + taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid)); + + tscDebug("table:%s is prepared, uid:%" PRIu64, name, pStmt->mtb.currentUid); + } + + return code; +} + int taos_stmt_close(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; if (!pStmt->isInsert) { + taosHashCleanup(pStmt->mtb.pTableHash); + SNormalStmt* normal = &pStmt->normal; if (normal->params != NULL) { for (uint16_t i = 0; i < normal->numParams; i++) { @@ -953,12 +1269,34 @@ int taos_stmt_close(TAOS_STMT* stmt) { int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { STscStmt* pStmt = (STscStmt*)stmt; if (pStmt->isInsert) { + if (pStmt->multiTbInsert && pStmt->mtb.nameSet == false) { + tscError("no table name set"); + return TSDB_CODE_TSC_APP_ERROR; + } + return insertStmtBindParam(pStmt, bind); } else { return normalStmtBindParam(pStmt, bind); } } + +int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_BIND* bind, int32_t num) { + STscStmt* pStmt = (STscStmt*)stmt; + if (num <= 0 || bind == NULL) { + tscError("invalid parameter"); + return TSDB_CODE_TSC_APP_ERROR; + } + + if (!pStmt->isInsert || !pStmt->multiTbInsert || !pStmt->mtb.nameSet) { + tscError("not or invalid batch insert"); + return TSDB_CODE_TSC_APP_ERROR; + } + + return insertStmtBindParamBatch(pStmt, bind, num); +} + + int taos_stmt_add_batch(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; if (pStmt->isInsert) { @@ -979,7 +1317,11 @@ int taos_stmt_execute(TAOS_STMT* stmt) { int ret = 0; STscStmt* pStmt = (STscStmt*)stmt; if (pStmt->isInsert) { - ret = insertStmtExecute(pStmt); + if (pStmt->multiTbInsert) { + ret = insertBatchStmtExecute(pStmt); + } else { + ret = insertStmtExecute(pStmt); + } } else { // normal stmt query char* sql = normalStmtBuildSql(pStmt); if (sql == NULL) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 420b78f64d..eb31048433 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1255,67 +1255,71 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { STableDataBlocks* pOneTableBlock = *p; while(pOneTableBlock) { - // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); - STableDataBlocks* dataBuf = NULL; - - int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, - INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); - if (ret != TSDB_CODE_SUCCESS) { - tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret); - taosHashCleanup(pVnodeDataBlockHashList); - tscDestroyBlockArrayList(pVnodeDataBlockList); - return ret; - } - SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; - int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); - - if (dataBuf->nAllocSize < destSize) { - while (dataBuf->nAllocSize < destSize) { - dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5); - } - - char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize); - if (tmp != NULL) { - dataBuf->pData = tmp; - memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); - } else { // failed to allocate memory, free already allocated memory and return error code - tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize); - + if (pBlocks->numOfRows > 0) { + // the maximum expanded size in byte when a row-wise data is converted to SDataRow format + int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); + STableDataBlocks* dataBuf = NULL; + + int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, + INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); + if (ret != TSDB_CODE_SUCCESS) { + tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); - tfree(dataBuf->pData); - - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return ret; } + + int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + + if (dataBuf->nAllocSize < destSize) { + while (dataBuf->nAllocSize < destSize) { + dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5); + } + + char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize); + if (tmp != NULL) { + dataBuf->pData = tmp; + memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); + } else { // failed to allocate memory, free already allocated memory and return error code + tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize); + + taosHashCleanup(pVnodeDataBlockHashList); + tscDestroyBlockArrayList(pVnodeDataBlockList); + tfree(dataBuf->pData); + + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } + + tscSortRemoveDataBlockDupRows(pOneTableBlock); + char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); + + tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName), + pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); + + int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + + pBlocks->tid = htonl(pBlocks->tid); + pBlocks->uid = htobe64(pBlocks->uid); + pBlocks->sversion = htonl(pBlocks->sversion); + pBlocks->numOfRows = htons(pBlocks->numOfRows); + pBlocks->schemaLen = 0; + + // erase the empty space reserved for binary data + int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema); + assert(finalLen <= len); + + dataBuf->size += (finalLen + sizeof(SSubmitBlk)); + assert(dataBuf->size <= dataBuf->nAllocSize); + + // the length does not include the SSubmitBlk structure + pBlocks->dataLen = htonl(finalLen); + dataBuf->numOfTables += 1; + }else { + tscWarn("table %s data block is empty", pOneTableBlock->tableName.tname); } - - tscSortRemoveDataBlockDupRows(pOneTableBlock); - char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); - - tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName), - pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); - - int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); - - pBlocks->tid = htonl(pBlocks->tid); - pBlocks->uid = htobe64(pBlocks->uid); - pBlocks->sversion = htonl(pBlocks->sversion); - pBlocks->numOfRows = htons(pBlocks->numOfRows); - pBlocks->schemaLen = 0; - - // erase the empty space reserved for binary data - int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema); - assert(finalLen <= len); - - dataBuf->size += (finalLen + sizeof(SSubmitBlk)); - assert(dataBuf->size <= dataBuf->nAllocSize); - - // the length does not include the SSubmitBlk structure - pBlocks->dataLen = htonl(finalLen); - dataBuf->numOfTables += 1; - + p = taosHashIterate(pCmd->pTableBlockHashList, p); if (p == NULL) { break; diff --git a/src/inc/taos.h b/src/inc/taos.h index cd8e116053..3f6609f6df 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -101,10 +101,12 @@ typedef struct TAOS_BIND { TAOS_STMT *taos_stmt_init(TAOS *taos); int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); +int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name); int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); +int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_BIND* bind, int32_t num); int taos_stmt_add_batch(TAOS_STMT *stmt); int taos_stmt_execute(TAOS_STMT *stmt); TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 7869e27707..00fcaf82f5 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -560,6 +560,28 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) { return 0; } +SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* new) { + char *src = *str; + int32_t nsize = strlen(new); + int32_t size = strlen(*str) - token->n + nsize + 1; + int32_t bsize = (uint64_t)token->z - (uint64_t)src; + SStrToken ntoken; + + *str = calloc(1, size); + + strncpy(*str, src, bsize); + strcat(*str, new); + strcat(*str, token->z + token->n); + + ntoken.n = nsize; + ntoken.z = *str + bsize; + + tfree(src); + + return ntoken; +} + + SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) { SStrToken t0 = {0}; diff --git a/src/util/inc/tstoken.h b/src/util/inc/tstoken.h index ab1ef7b279..93d48e01cb 100644 --- a/src/util/inc/tstoken.h +++ b/src/util/inc/tstoken.h @@ -182,6 +182,9 @@ static FORCE_INLINE int32_t tGetNumericStringType(const SStrToken* pToken) { void taosCleanupKeywordsTable(); +SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* new); + + #ifdef __cplusplus } #endif diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index b06fe551db..09682d35ef 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -14,6 +14,7 @@ exe: gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)asyncdemo $(LFLAGS) gcc $(CFLAGS) ./demo.c -o $(ROOT)demo $(LFLAGS) gcc $(CFLAGS) ./prepare.c -o $(ROOT)prepare $(LFLAGS) + gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS) gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS) gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS) @@ -22,6 +23,7 @@ clean: rm $(ROOT)asyncdemo rm $(ROOT)demo rm $(ROOT)prepare + rm $(ROOT)batchprepare rm $(ROOT)stream rm $(ROOT)subscribe rm $(ROOT)apitest From b6e04604cb268e731e24f0141fac529752c80ab9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 May 2021 17:29:00 +0800 Subject: [PATCH 078/140] [td-3967] --- src/client/src/tscParseInsert.c | 4 ---- src/util/src/ttokenizer.c | 2 +- tests/pytest/insert/nchar-unicode.py | 5 ++++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 3882be56dc..923d95c888 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -468,11 +468,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1 int32_t j = 0; for (uint32_t k = 1; k < sToken.n - 1; ++k) { if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { - if (sToken.z[k] == '\\') { - tmpTokenBuf[j] = GET_ESCAPE_CHAR(sToken.z[k+1]); - } else { tmpTokenBuf[j] = sToken.z[k + 1]; - } cnt++; j++; diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 794420d55b..54da75cae0 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -415,7 +415,7 @@ uint32_t tGetToken(char* z, uint32_t* tokenId) { int delim = z[0]; bool strEnd = false; for (i = 1; z[i]; i++) { - if (z[i] == '\\') { + if (z[i] == '\\') { // ignore the escaped character that follows this backslash i++; continue; } diff --git a/tests/pytest/insert/nchar-unicode.py b/tests/pytest/insert/nchar-unicode.py index c417a6bca2..4afcf5b760 100644 --- a/tests/pytest/insert/nchar-unicode.py +++ b/tests/pytest/insert/nchar-unicode.py @@ -57,12 +57,15 @@ class TDTestCase: # https://www.ltg.ed.ac.uk/~richard/unicode-sample.html # Basic Latin - data = r'! # $ % & ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~' + data = r'! # $ % & ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \\ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~' tdLog.info("insert Basic Latin %d length data: %s" % (len(data), data)) tdSql.execute("insert into tb values (now, '%s')" % data) tdSql.query("select * from tb") tdSql.checkRows(3) + + data = data.replace('\\\\', '\\') tdSql.checkData(2, 1, data) + # tdSql.execute("insert into tb values(now, 'abc')") # Latin-1 Supplement data = ' ¡ ¢ £ ¤ ¥ ¦ § ¨ © ª « ¬ ­ ® ¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö × Ø Ù Ú Û Ü Ý Þ ß à á â ã ä å æ ç è é ê ë ì í î ï ð ñ ò ó ô õ ö ÷ ø ù ú û ü ý þ ÿ' From 3d7650d12c454944c4030cad24a3b8bd10e739f5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 7 May 2021 18:18:40 +0800 Subject: [PATCH 079/140] [TD-3902]: taosdemo subscribe. (#6028) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 143 +++++++++++++++++++----------------- 1 file changed, 77 insertions(+), 66 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 20d7f67138..847a8045af 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1630,64 +1630,68 @@ static void printfQueryMeta() { printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); printf("\n"); - printf("specified table query info: \n"); - printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", + + if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { + printf("specified table query info: \n"); + printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); - printf("specified tbl query times:\n"); - printf(" \033[33m%"PRIu64"\033[0m\n", + if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { + printf("specified tbl query times:\n"); + printf(" \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); - - if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.mode); - printf("interval: \033[33m%"PRIu64"\033[0m\n", + printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryInterval); + printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.specifiedQueryInfo.concurrent); + printf("mod: \033[33m%s\033[0m\n", + (g_queryInfo.specifiedQueryInfo.mode)?"async":"sync"); + printf("interval: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", + printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", + printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - } - for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n", - i, g_queryInfo.specifiedQueryInfo.sql[i]); - } - printf("\n"); - printf("super table query info:\n"); - printf("query interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.queryInterval); - printf("threadCnt: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.queryTimes); + for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n", + i, g_queryInfo.specifiedQueryInfo.sql[i]); + } + printf("\n"); + } - if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.mode); - printf("interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress); - } - - printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", + printf("super table query info:\n"); + printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.superQueryInfo.sql[i]); + + if (g_queryInfo.superQueryInfo.sqlCount > 0) { + printf("query interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.queryInterval); + printf("threadCnt: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", + g_queryInfo.superQueryInfo.sTblName); + printf("stb query times:\033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.queryTimes); + + printf("mod: \033[33m%s\033[0m\n", + (g_queryInfo.superQueryInfo.mode)?"async":"sync"); + printf("interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress); + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.superQueryInfo.sql[i]); + } + printf("\n"); + } } - printf("\n"); SHOW_PARSE_RESULT_END(); } @@ -2847,7 +2851,7 @@ static void* createTable(void *sarg) } static int startMultiThreadCreateChildTable( - char* cols, int threads, int64_t startFrom, int64_t ntables, + char* cols, int threads, uint64_t startFrom, uint64_t ntables, char* db_name, SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); @@ -2862,13 +2866,13 @@ static int startMultiThreadCreateChildTable( threads = 1; } - int64_t a = ntables / threads; + uint64_t a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - int64_t b = 0; + uint64_t b = 0; b = ntables % threads; for (int64_t i = 0; i < threads; i++) { @@ -4212,7 +4216,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } } - // sub_table_query + // super_table_query cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); if (!superQuery) { g_queryInfo.superQueryInfo.threadCnt = 1; @@ -5679,13 +5683,13 @@ static void startMultiThreadInsertData(int threads, char* db_name, taos_close(taos); - int a = ntables / threads; + uint64_t a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - int b = 0; + uint64_t b = 0; if (threads != 0) { b = ntables % threads; } @@ -6380,7 +6384,7 @@ static int queryTestProcess() { b = ntables % threads; } - int startFrom = 0; + uint64_t startFrom = 0; for (int i = 0; i < threads; i++) { threadInfo *t_info = infosOfSub + i; t_info->threadID = i; @@ -6436,13 +6440,14 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c } getResult(res, (char*)param); - taos_free_result(res); + // tao_unscribe() will free result. } -static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { +static TAOS_SUB* subscribeImpl( + TAOS *taos, char *sql, char* topic, char* resultFileName) { TAOS_SUB* tsub = NULL; - if (g_queryInfo.specifiedQueryInfo.mode) { + if (ASYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { tsub = taos_subscribe(taos, g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, @@ -6466,6 +6471,9 @@ static void *superSubscribe(void *sarg) { char subSqlstr[1024]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; + if (g_queryInfo.superQueryInfo.sqlCount == 0) + return NULL; + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, @@ -6524,7 +6532,7 @@ static void *superSubscribe(void *sarg) { TAOS_RES* res = NULL; while(1) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.superQueryInfo.mode) { + if (ASYNC_QUERY_MODE == g_queryInfo.superQueryInfo.mode) { continue; } @@ -6554,6 +6562,9 @@ static void *specifiedSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; + if (g_queryInfo.specifiedQueryInfo.sqlCount == 0) + return NULL; + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, @@ -6591,7 +6602,7 @@ static void *specifiedSubscribe(void *sarg) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { sprintf(topic, "taosdemo-subscribe-%d", i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); } @@ -6610,7 +6621,7 @@ static void *specifiedSubscribe(void *sarg) { TAOS_RES* res = NULL; while(1) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { + if (ASYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { continue; } @@ -6710,21 +6721,21 @@ static int subscribeTestProcess() { exit(-1); } - int ntables = g_queryInfo.superQueryInfo.childTblCount; + uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount; int threads = g_queryInfo.superQueryInfo.threadCnt; - int a = ntables / threads; + uint64_t a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - int b = 0; + uint64_t b = 0; if (threads != 0) { b = ntables % threads; } - int startFrom = 0; + uint64_t startFrom = 0; for (int i = 0; i < threads; i++) { threadInfo *t_info = infosOfSub + i; t_info->threadID = i; From e91dd72c0ee0c6320486d40d2c72c50bf7eae941 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 8 May 2021 03:33:24 +0800 Subject: [PATCH 080/140] [TD-4096] support 'show create stable xx' --- src/client/src/tscLocal.c | 6 +++++- src/client/src/tscSQLParser.c | 1 + src/client/src/tscServer.c | 1 + src/client/src/tscSql.c | 1 + src/common/inc/tcmdtype.h | 1 + src/query/inc/sql.y | 4 ++++ 6 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index a7882ffa61..6b55780af9 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -326,6 +326,7 @@ TAOS_ROW tscFetchRow(void *param) { pCmd->command == TSDB_SQL_FETCH || pCmd->command == TSDB_SQL_SHOW || pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || + pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE || pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_DESCRIBE_TABLE || @@ -679,6 +680,9 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) { assert(pTableMetaInfo->pTableMeta != NULL); const char* tableName = tNameGetTableName(&pTableMetaInfo->name); + if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + return TSDB_CODE_TSC_INVALID_VALUE; + } char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN); int32_t code = TSDB_CODE_SUCCESS; @@ -907,7 +911,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { */ pRes->qId = 0x1; pRes->numOfRows = 0; - } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) { + } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) { pRes->code = tscProcessShowCreateTable(pSql); } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) { pRes->code = tscProcessShowCreateDatabase(pSql); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 87b4669a04..931189e603 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -446,6 +446,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { return tscGetTableMeta(pSql, pTableMetaInfo); } + case TSDB_SQL_SHOW_CREATE_STABLE: case TSDB_SQL_SHOW_CREATE_TABLE: { const char* msg1 = "invalid table name"; const char* msg2 = "table name is too long"; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9528a553b2..d2bf458e58 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2641,6 +2641,7 @@ void tscInitMsgsFp() { tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp; tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp; + tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp; tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp; tscKeepConn[TSDB_SQL_SHOW] = 1; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8dbb1c0a52..364af4e8b1 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -457,6 +457,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) { pCmd->command == TSDB_SQL_FETCH || pCmd->command == TSDB_SQL_SHOW || pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || + pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE || pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_DESCRIBE_TABLE || diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index be16e80124..adf210cfeb 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -80,6 +80,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table") + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable") TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database") /* diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index fd922240c2..8ef8ef0e2b 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -94,6 +94,10 @@ cmd ::= SHOW CREATE TABLE ids(X) cpxName(Y). { X.n += Y.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &X); } +cmd ::= SHOW CREATE STABLE ids(X) cpxName(Y). { + X.n += Y.n; + setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &X); +} cmd ::= SHOW CREATE DATABASE ids(X). { setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &X); From 56bea59c3ba7a99552db9e47170ad55827e59470 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 8 May 2021 11:34:21 +0800 Subject: [PATCH 081/140] [TD-4093]update python connector every test --- Jenkinsfile | 1 + tests/Jenkinsfile | 1 + 2 files changed, 2 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index dfe9ed4389..33ce784bce 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -94,6 +94,7 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests + pip3 install ${WKC}/src/connector/python/linux/python3/ ''' return 1 } diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index 178a0446c3..64a71ef8f9 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -21,6 +21,7 @@ def pre_test(){ cmake .. > /dev/null make > /dev/null make install > /dev/null + pip3 install ${WKC}/src/connector/python/linux/python3/ ''' return 1 } From 020fa3dd06751e50c96b11b5ec82c82d79dc9411 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sat, 8 May 2021 11:52:24 +0800 Subject: [PATCH 082/140] performance issue --- src/client/src/tscPrepare.c | 16 ++++++++++++++-- src/client/src/tscUtil.c | 4 +++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index baff65d6b9..ec85dff359 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -49,7 +49,8 @@ typedef struct SMultiTbStmt { uint64_t currentUid; uint32_t tbNum; SStrToken tbname; - SHashObj *pTableHash; + SHashObj *pTableHash; + SHashObj *pTableBlockHashList; // data block for each table } SMultiTbStmt; typedef struct STscStmt { @@ -975,6 +976,7 @@ static void insertBatchClean(STscStmt* pStmt) { tfree(pCmd->pTableNameList); +/* STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL); STableDataBlocks* pOneTableBlock = *p; @@ -993,10 +995,12 @@ static void insertBatchClean(STscStmt* pStmt) { pOneTableBlock = *p; } +*/ pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); pCmd->numOfTables = 0; + taosHashEmpty(pCmd->pTableBlockHashList); tscFreeSqlResult(pSql); tscFreeSubobj(pSql); tfree(pSql->pSubs); @@ -1136,6 +1140,10 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { if (pStmt->mtb.pTableHash == NULL) { pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); } + if (pStmt->mtb.pTableBlockHashList == NULL) { + pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + } + return TSDB_CODE_SUCCESS; } @@ -1183,7 +1191,7 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { if (uid != NULL) { pStmt->mtb.currentUid = *uid; - STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); + STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); if (t1 == NULL) { tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); return TSDB_CODE_TSC_APP_ERROR; @@ -1191,6 +1199,8 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData; pCmd->batchSize = pBlk->numOfRows; + + taosHashPut(pCmd->pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES); tscDebug("table:%s is already prepared, uid:%" PRIu64, name, pStmt->mtb.currentUid); return TSDB_CODE_SUCCESS; @@ -1236,6 +1246,8 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { pStmt->mtb.currentUid = pTableMeta->id.uid; pStmt->mtb.tbNum++; + + taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES); taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid)); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index eb31048433..7a6842869a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1316,8 +1316,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { // the length does not include the SSubmitBlk structure pBlocks->dataLen = htonl(finalLen); dataBuf->numOfTables += 1; + + pBlocks->numOfRows = 0; }else { - tscWarn("table %s data block is empty", pOneTableBlock->tableName.tname); + tscDebug("table %s data block is empty", pOneTableBlock->tableName.tname); } p = taosHashIterate(pCmd->pTableBlockHashList, p); From a35c422793ad7b0e89396e4c7072c7dd143cb57f Mon Sep 17 00:00:00 2001 From: wu champion Date: Sat, 8 May 2021 12:29:54 +0800 Subject: [PATCH 083/140] [TD-3921] fix the case for new python connector --- tests/pytest/stream/showStreamExecTimeisNull.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/pytest/stream/showStreamExecTimeisNull.py b/tests/pytest/stream/showStreamExecTimeisNull.py index 39b0259018..0ffbdb2042 100644 --- a/tests/pytest/stream/showStreamExecTimeisNull.py +++ b/tests/pytest/stream/showStreamExecTimeisNull.py @@ -67,7 +67,9 @@ class TDTestCase: self.queryRows = len(self.queryResult) self.queryCols = len(tdSql.cursor.description) # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) - if self.queryRows >= timeout: + if self.queryRows >= 1: + tdSql.query(sql) + tdSql.checkData(0, 5, None) return (self.queryRows, i) time.sleep(1) except Exception as e: @@ -75,6 +77,8 @@ class TDTestCase: else: tdLog.exit(f"sql: {sql} except raise {exception}, actually not") + tdSql.checkData(0, 5, None) + def run(self): tdSql.execute("drop database if exists dbcq") From b551731da15355775b34997047e31783548e641b Mon Sep 17 00:00:00 2001 From: wu champion Date: Sat, 8 May 2021 12:39:32 +0800 Subject: [PATCH 084/140] fix the case --- tests/pytest/stream/showStreamExecTimeisNull.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/pytest/stream/showStreamExecTimeisNull.py b/tests/pytest/stream/showStreamExecTimeisNull.py index 0ffbdb2042..8a2a09cec6 100644 --- a/tests/pytest/stream/showStreamExecTimeisNull.py +++ b/tests/pytest/stream/showStreamExecTimeisNull.py @@ -73,12 +73,9 @@ class TDTestCase: return (self.queryRows, i) time.sleep(1) except Exception as e: - tdLog.info(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ") - else: - tdLog.exit(f"sql: {sql} except raise {exception}, actually not") - - tdSql.checkData(0, 5, None) - + tdLog.exit(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ") + # else: + # tdLog.exit(f"sql: {sql} except raise {exception}, actually not") def run(self): tdSql.execute("drop database if exists dbcq") From 70ed9796ff8d179c577d0684ee92e74da4c29525 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Sat, 8 May 2021 15:56:18 +0800 Subject: [PATCH 085/140] [TD-4017]: add test case --- tests/pytest/query/unionAllTest.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/pytest/query/unionAllTest.py b/tests/pytest/query/unionAllTest.py index 49d745ebd5..3064e2f63e 100644 --- a/tests/pytest/query/unionAllTest.py +++ b/tests/pytest/query/unionAllTest.py @@ -74,6 +74,16 @@ class TDTestCase: tdSql.error("select 'dc' as options from stb where type = 1 limit 1 union all select 'ad' as city from stb where type = 2 limit 1") + # for defect https://jira.taosdata.com:18080/browse/TD-4017 + tdSql.execute("alter table stb add column col int") + tdSql.execute("insert into tb1 values(%d, 'option1', 'beijing', 10)" % (self.ts + 1000)) + + tdSql.query("select 'dc' as options from stb where col > 10 limit 1") + tdSql.checkRows(0) + + tdSql.query("select 'dcs' as options from stb where col > 200 limit 1 union all select 'aaa' as options from stb limit 10") + tdSql.checkData(0, 0, 'aaa') + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From a0020b15ef900c68f23d7b15d9476c9b495e15d6 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sat, 8 May 2021 16:25:03 +0800 Subject: [PATCH 086/140] add assert info --- src/client/src/tscServer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d9405f300a..2035d6261f 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1973,6 +1973,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { return TSDB_CODE_TSC_INVALID_VALUE; } + assert(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE || pTableMeta->tableType == TSDB_NORMAL_TABLE || pTableMeta->tableType == TSDB_STREAM_TABLE); + if (pTableMeta->tableType == TSDB_CHILD_TABLE) { // check if super table hashmap or not int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN); From 720aea627847800dfa9fb2d201e5c960598f975d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 8 May 2021 16:36:08 +0800 Subject: [PATCH 087/140] [TD-4083]: taosdemo query times need be more than 0. (#6038) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 45 +++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 847a8045af..56352a3bee 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -771,48 +771,49 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n"); exit(EXIT_FAILURE); } arguments->query_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-T need a number following!\n"); exit(EXIT_FAILURE); } arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-i need a number following!\n"); exit(EXIT_FAILURE); } arguments->insert_interval = atoi(argv[++i]); } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1])) + || (atoi(argv[i+1]) <= 0)) { printHelp(); - errorPrint("%s", "\n\t-qt need a number following!\n"); + errorPrint("%s", "\n\t-qt need a valid (>0) number following!\n"); exit(EXIT_FAILURE); } arguments->query_times = atoi(argv[++i]); } else if (strcmp(argv[i], "-B") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-B need a number following!\n"); exit(EXIT_FAILURE); } arguments->interlace_rows = atoi(argv[++i]); } else if (strcmp(argv[i], "-r") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-r need a number following!\n"); exit(EXIT_FAILURE); @@ -4040,9 +4041,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + if (gQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, gQueryTimes->valueint); goto PARSE_OVER; } g_args.query_times = gQueryTimes->valueint; @@ -4091,9 +4092,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + if (specifiedQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, specifiedQueryTimes->valueint); goto PARSE_OVER; } @@ -4235,9 +4236,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + if (superQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, superQueryTimes->valueint); goto PARSE_OVER; } g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; From 003562bc78de37a5682c3c79000047cbac283337 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sat, 8 May 2021 17:19:21 +0800 Subject: [PATCH 088/140] fix bug --- src/client/src/tscPrepare.c | 119 ++++++++++++++++++++++++++++++------ src/inc/taos.h | 14 ++++- 2 files changed, 113 insertions(+), 20 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index ec85dff359..1c263487df 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -268,12 +268,13 @@ static char* normalStmtBuildSql(STscStmt* stmt) { //////////////////////////////////////////////////////////////////////////////// // functions for insertion statement preparation -static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind) { +static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) { if (bind->is_null != NULL && *(bind->is_null)) { setNull(data + param->offset, param->type, param->bytes); return TSDB_CODE_SUCCESS; } +#if 0 if (0) { // allow user bind param data with different type union { @@ -654,6 +655,7 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, } } } +#endif if (bind->buffer_type != param->type) { return TSDB_CODE_TSC_INVALID_VALUE; @@ -713,6 +715,85 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, return TSDB_CODE_SUCCESS; } + +static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) { + if (bind->buffer_type != param->type) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + + short size = 0; + switch(param->type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + size = 1; + break; + + case TSDB_DATA_TYPE_SMALLINT: + size = 2; + break; + + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_FLOAT: + size = 4; + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + size = 8; + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + if (bind->length == NULL) { + tscError("BINARY/NCHAR no length"); + return TSDB_CODE_TSC_INVALID_VALUE; + } + break; + + default: + assert(false); + return TSDB_CODE_TSC_INVALID_VALUE; + } + + + for (int i = 0; i < bind->num; ++i) { + char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i); + + if (bind->is_null != NULL && bind->is_null[i]) { + setNull(data + param->offset, param->type, param->bytes); + return TSDB_CODE_SUCCESS; + } + + if (size > 0) { + memcpy(data + param->offset, bind->buffer + bind->buffer_length * i, size); + + if (param->offset == 0) { + if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { + tscError("invalid timestamp"); + return TSDB_CODE_TSC_INVALID_VALUE; + } + } + } else if (param->type == TSDB_DATA_TYPE_BINARY) { + if (bind->length[i] > (uintptr_t)param->bytes) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + size = (short)bind->length[i]; + STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer + bind->buffer_length * i, size); + } else if (param->type == TSDB_DATA_TYPE_NCHAR) { + int32_t output = 0; + if (!taosMbsToUcs4(bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + varDataSetLen(data + param->offset, output); + } + } + + return TSDB_CODE_SUCCESS; +} + + + static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { SSqlCmd* pCmd = &stmt->pSql->cmd; STscStmt* pStmt = (STscStmt*)stmt; @@ -765,7 +846,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { SParamInfo* param = &pBlock->params[j]; - int code = doBindParam(pBlock, data, param, &bind[param->idx]); + int code = doBindParam(pBlock, data, param, &bind[param->idx], 1); if (code != TSDB_CODE_SUCCESS) { tscDebug("param %d: type mismatch or invalid", param->idx); return code; @@ -776,9 +857,10 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { } -static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_BIND* bind, int32_t num) { +static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind) { SSqlCmd* pCmd = &stmt->pSql->cmd; STscStmt* pStmt = (STscStmt*)stmt; + int rowNum = bind->num; STableDataBlocks* pBlock = NULL; @@ -795,7 +877,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_BIND* bind, int32_t num pBlock = *t1; - uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + num) * pBlock->rowSize; + uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize; if (totalDataSize > pBlock->nAllocSize) { const double factor = 1.5; @@ -808,22 +890,21 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_BIND* bind, int32_t num pBlock->nAllocSize = (uint32_t)(totalDataSize * factor); } - for (uint32_t i = 0; i < num; ++i) { - char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (pCmd->batchSize + i); - TAOS_BIND* tbind = bind + pBlock->numOfParams * i; + for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { + SParamInfo* param = &pBlock->params[j]; + if (bind[param->idx].num != rowNum) { + tscError("param %d: num[%d:%d] not match", param->idx, rowNum, bind[param->idx].num); + return TSDB_CODE_TSC_INVALID_VALUE; + } - for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { - SParamInfo* param = &pBlock->params[j]; - - int code = doBindParam(pBlock, data, param, &tbind[param->idx]); - if (code != TSDB_CODE_SUCCESS) { - tscError("param %d: type mismatch or invalid", param->idx); - return code; - } + int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); + if (code != TSDB_CODE_SUCCESS) { + tscError("param %d: type mismatch or invalid", param->idx); + return code; } } - pCmd->batchSize += num - 1; + pCmd->batchSize += rowNum - 1; return TSDB_CODE_SUCCESS; } @@ -1293,9 +1374,9 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { } -int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_BIND* bind, int32_t num) { +int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { STscStmt* pStmt = (STscStmt*)stmt; - if (num <= 0 || bind == NULL) { + if (bind == NULL || bind->num <= 0) { tscError("invalid parameter"); return TSDB_CODE_TSC_APP_ERROR; } @@ -1305,7 +1386,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_BIND* bind, int32_t num) { return TSDB_CODE_TSC_APP_ERROR; } - return insertStmtBindParamBatch(pStmt, bind, num); + return insertStmtBindParamBatch(pStmt, bind); } diff --git a/src/inc/taos.h b/src/inc/taos.h index 3f6609f6df..ca87337800 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -82,6 +82,7 @@ typedef struct TAOS_BIND { uintptr_t buffer_length; // unused uintptr_t *length; int * is_null; + int is_unsigned; // unused int * error; // unused union { @@ -99,6 +100,17 @@ typedef struct TAOS_BIND { unsigned int allocated; } TAOS_BIND; +typedef struct TAOS_MULTI_BIND { + int buffer_type; + void * buffer; + uintptr_t buffer_length; // unused + uintptr_t *length; + int * is_null; + + int num; +} TAOS_MULTI_BIND; + + TAOS_STMT *taos_stmt_init(TAOS *taos); int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name); @@ -106,7 +118,7 @@ int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); -int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_BIND* bind, int32_t num); +int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind); int taos_stmt_add_batch(TAOS_STMT *stmt); int taos_stmt_execute(TAOS_STMT *stmt); TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); From 427e67ee4cbd57a4401fe136772b9bec93bc51e0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 8 May 2021 18:15:17 +0800 Subject: [PATCH 089/140] [TD-4070]: taosdemo use friendly msg if the setting has improper values. (#6040) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 56352a3bee..99befbaf31 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1073,7 +1073,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { if (code != 0) { if (!quiet) { debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res)); + errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); //taos_close(taos); @@ -5201,6 +5201,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTs = taosGetTimestampMs(); + if (recOfBatch == 0) { + errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch); + errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n"); + goto free_of_interlace; + } int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); endTs = taosGetTimestampMs(); From be06778ff70b166ebe836f706c4c5763e0fc8f63 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 8 May 2021 18:55:48 +0800 Subject: [PATCH 090/140] [TD-4096] support show create stable xx --- src/query/src/sql.c | 1967 ++++++++++++++++++++++--------------------- 1 file changed, 988 insertions(+), 979 deletions(-) diff --git a/src/query/src/sql.c b/src/query/src/sql.c index f3929da022..9436942f71 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -127,17 +127,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 315 -#define YYNRULE 269 +#define YYNSTATE 317 +#define YYNRULE 270 #define YYNTOKEN 187 -#define YY_MAX_SHIFT 314 -#define YY_MIN_SHIFTREDUCE 508 -#define YY_MAX_SHIFTREDUCE 776 -#define YY_ERROR_ACTION 777 -#define YY_ACCEPT_ACTION 778 -#define YY_NO_ACTION 779 -#define YY_MIN_REDUCE 780 -#define YY_MAX_REDUCE 1048 +#define YY_MAX_SHIFT 316 +#define YY_MIN_SHIFTREDUCE 511 +#define YY_MAX_SHIFTREDUCE 780 +#define YY_ERROR_ACTION 781 +#define YY_ACCEPT_ACTION 782 +#define YY_NO_ACTION 783 +#define YY_MIN_REDUCE 784 +#define YY_MAX_REDUCE 1053 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -203,148 +203,148 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (683) +#define YY_ACTTAB_COUNT (685) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 133, 555, 204, 312, 208, 140, 947, 226, 140, 556, - /* 10 */ 778, 314, 17, 47, 48, 140, 51, 52, 30, 181, - /* 20 */ 214, 41, 181, 50, 262, 55, 53, 57, 54, 1029, - /* 30 */ 926, 211, 1030, 46, 45, 179, 181, 44, 43, 42, - /* 40 */ 47, 48, 924, 51, 52, 210, 1030, 214, 41, 555, - /* 50 */ 50, 262, 55, 53, 57, 54, 938, 556, 185, 205, - /* 60 */ 46, 45, 923, 247, 44, 43, 42, 48, 944, 51, - /* 70 */ 52, 242, 978, 214, 41, 79, 50, 262, 55, 53, - /* 80 */ 57, 54, 979, 634, 257, 30, 46, 45, 278, 225, - /* 90 */ 44, 43, 42, 509, 510, 511, 512, 513, 514, 515, - /* 100 */ 516, 517, 518, 519, 520, 521, 313, 555, 85, 231, - /* 110 */ 70, 288, 287, 47, 48, 556, 51, 52, 298, 219, - /* 120 */ 214, 41, 555, 50, 262, 55, 53, 57, 54, 922, - /* 130 */ 556, 105, 720, 46, 45, 1026, 298, 44, 43, 42, - /* 140 */ 47, 49, 914, 51, 52, 926, 140, 214, 41, 234, - /* 150 */ 50, 262, 55, 53, 57, 54, 1025, 238, 237, 227, - /* 160 */ 46, 45, 285, 284, 44, 43, 42, 23, 276, 307, - /* 170 */ 306, 275, 274, 273, 305, 272, 304, 303, 302, 271, - /* 180 */ 301, 300, 886, 30, 874, 875, 876, 877, 878, 879, - /* 190 */ 880, 881, 882, 883, 884, 885, 887, 888, 51, 52, - /* 200 */ 825, 1024, 214, 41, 166, 50, 262, 55, 53, 57, - /* 210 */ 54, 259, 18, 78, 82, 46, 45, 61, 223, 44, - /* 220 */ 43, 42, 213, 735, 217, 25, 724, 923, 727, 190, - /* 230 */ 730, 221, 213, 735, 198, 191, 724, 912, 727, 62, - /* 240 */ 730, 118, 117, 189, 69, 909, 910, 29, 913, 44, - /* 250 */ 43, 42, 30, 74, 200, 201, 308, 926, 261, 30, - /* 260 */ 23, 36, 307, 306, 200, 201, 938, 305, 30, 304, - /* 270 */ 303, 302, 74, 301, 300, 894, 911, 199, 892, 893, - /* 280 */ 36, 206, 926, 895, 920, 897, 898, 896, 224, 899, - /* 290 */ 900, 280, 658, 218, 834, 655, 923, 656, 166, 657, - /* 300 */ 281, 673, 241, 923, 68, 55, 53, 57, 54, 282, - /* 310 */ 197, 263, 923, 46, 45, 30, 278, 44, 43, 42, - /* 320 */ 80, 103, 108, 228, 229, 56, 220, 97, 107, 113, - /* 330 */ 116, 106, 736, 71, 726, 56, 729, 110, 732, 30, - /* 340 */ 1, 154, 736, 5, 156, 725, 183, 728, 732, 33, - /* 350 */ 155, 92, 87, 91, 731, 680, 286, 184, 826, 923, - /* 360 */ 174, 170, 166, 245, 731, 212, 172, 169, 121, 120, - /* 370 */ 119, 46, 45, 3, 167, 44, 43, 42, 12, 677, - /* 380 */ 290, 722, 84, 923, 81, 670, 311, 310, 126, 701, - /* 390 */ 702, 243, 24, 686, 692, 31, 693, 135, 60, 756, - /* 400 */ 20, 659, 737, 19, 64, 186, 19, 739, 644, 6, - /* 410 */ 180, 265, 31, 187, 646, 31, 267, 723, 60, 645, - /* 420 */ 83, 188, 28, 60, 65, 268, 662, 67, 663, 633, - /* 430 */ 96, 95, 660, 194, 661, 115, 114, 14, 13, 102, - /* 440 */ 101, 195, 16, 15, 131, 129, 733, 193, 178, 192, - /* 450 */ 182, 1040, 925, 989, 988, 215, 985, 734, 239, 984, - /* 460 */ 216, 289, 132, 946, 39, 971, 954, 970, 956, 939, - /* 470 */ 246, 130, 248, 134, 138, 921, 150, 244, 151, 207, - /* 480 */ 250, 299, 685, 149, 919, 255, 142, 936, 143, 141, - /* 490 */ 144, 152, 256, 153, 260, 258, 66, 145, 837, 270, - /* 500 */ 63, 37, 58, 176, 34, 254, 279, 833, 1045, 252, - /* 510 */ 93, 1044, 1042, 249, 147, 157, 283, 1039, 99, 1038, - /* 520 */ 146, 1036, 158, 855, 35, 32, 38, 177, 822, 40, - /* 530 */ 109, 104, 820, 111, 112, 818, 817, 230, 168, 815, - /* 540 */ 814, 813, 812, 811, 810, 171, 173, 807, 805, 803, - /* 550 */ 291, 801, 799, 175, 292, 72, 75, 293, 251, 972, - /* 560 */ 294, 295, 296, 297, 309, 776, 202, 232, 222, 269, - /* 570 */ 233, 775, 236, 235, 774, 761, 203, 762, 88, 196, - /* 580 */ 89, 240, 245, 264, 8, 73, 76, 665, 687, 690, - /* 590 */ 816, 161, 136, 122, 856, 159, 164, 160, 162, 163, - /* 600 */ 165, 123, 809, 2, 890, 124, 808, 4, 125, 800, - /* 610 */ 137, 209, 77, 148, 253, 26, 694, 139, 9, 902, - /* 620 */ 10, 27, 738, 7, 11, 740, 21, 22, 266, 86, - /* 630 */ 597, 593, 84, 591, 590, 589, 586, 559, 277, 90, - /* 640 */ 31, 94, 98, 59, 100, 636, 635, 632, 581, 579, - /* 650 */ 571, 577, 573, 575, 569, 567, 600, 599, 598, 596, - /* 660 */ 595, 594, 592, 588, 587, 60, 557, 525, 523, 780, - /* 670 */ 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, - /* 680 */ 779, 127, 128, + /* 0 */ 925, 559, 206, 314, 211, 141, 952, 3, 168, 560, + /* 10 */ 782, 316, 134, 47, 48, 141, 51, 52, 30, 183, + /* 20 */ 217, 41, 183, 50, 264, 55, 53, 57, 54, 1034, + /* 30 */ 931, 214, 1035, 46, 45, 17, 183, 44, 43, 42, + /* 40 */ 47, 48, 223, 51, 52, 213, 1035, 217, 41, 559, + /* 50 */ 50, 264, 55, 53, 57, 54, 943, 560, 181, 208, + /* 60 */ 46, 45, 928, 222, 44, 43, 42, 48, 949, 51, + /* 70 */ 52, 244, 983, 217, 41, 249, 50, 264, 55, 53, + /* 80 */ 57, 54, 984, 638, 259, 85, 46, 45, 280, 931, + /* 90 */ 44, 43, 42, 512, 513, 514, 515, 516, 517, 518, + /* 100 */ 519, 520, 521, 522, 523, 524, 315, 943, 187, 207, + /* 110 */ 70, 290, 289, 47, 48, 30, 51, 52, 300, 919, + /* 120 */ 217, 41, 209, 50, 264, 55, 53, 57, 54, 44, + /* 130 */ 43, 42, 724, 46, 45, 674, 224, 44, 43, 42, + /* 140 */ 47, 49, 24, 51, 52, 228, 141, 217, 41, 559, + /* 150 */ 50, 264, 55, 53, 57, 54, 220, 560, 105, 928, + /* 160 */ 46, 45, 931, 300, 44, 43, 42, 23, 278, 309, + /* 170 */ 308, 277, 276, 275, 307, 274, 306, 305, 304, 273, + /* 180 */ 303, 302, 891, 30, 879, 880, 881, 882, 883, 884, + /* 190 */ 885, 886, 887, 888, 889, 890, 892, 893, 51, 52, + /* 200 */ 830, 1031, 217, 41, 167, 50, 264, 55, 53, 57, + /* 210 */ 54, 261, 18, 78, 230, 46, 45, 287, 286, 44, + /* 220 */ 43, 42, 216, 739, 221, 30, 728, 928, 731, 192, + /* 230 */ 734, 216, 739, 310, 1030, 728, 193, 731, 236, 734, + /* 240 */ 30, 118, 117, 191, 677, 559, 240, 239, 55, 53, + /* 250 */ 57, 54, 25, 560, 202, 203, 46, 45, 263, 931, + /* 260 */ 44, 43, 42, 202, 203, 74, 283, 61, 23, 928, + /* 270 */ 309, 308, 74, 36, 730, 307, 733, 306, 305, 304, + /* 280 */ 36, 303, 302, 899, 927, 662, 897, 898, 659, 62, + /* 290 */ 660, 900, 661, 902, 903, 901, 82, 904, 905, 103, + /* 300 */ 97, 108, 243, 917, 68, 30, 107, 113, 116, 106, + /* 310 */ 199, 5, 33, 157, 141, 110, 231, 232, 156, 92, + /* 320 */ 87, 91, 681, 226, 30, 56, 30, 914, 915, 29, + /* 330 */ 918, 729, 740, 732, 56, 175, 173, 171, 736, 1, + /* 340 */ 155, 740, 170, 121, 120, 119, 284, 736, 229, 928, + /* 350 */ 265, 46, 45, 69, 735, 44, 43, 42, 839, 666, + /* 360 */ 12, 667, 167, 735, 84, 288, 81, 292, 928, 215, + /* 370 */ 928, 313, 312, 126, 132, 130, 129, 80, 705, 706, + /* 380 */ 831, 79, 280, 929, 167, 916, 737, 245, 726, 684, + /* 390 */ 71, 31, 227, 994, 663, 282, 690, 247, 696, 697, + /* 400 */ 136, 760, 60, 20, 741, 19, 64, 648, 19, 241, + /* 410 */ 267, 31, 650, 6, 31, 269, 60, 1029, 649, 83, + /* 420 */ 28, 200, 60, 270, 727, 201, 65, 96, 95, 185, + /* 430 */ 14, 13, 993, 102, 101, 67, 218, 637, 16, 15, + /* 440 */ 664, 186, 665, 738, 115, 114, 743, 188, 182, 189, + /* 450 */ 190, 196, 197, 195, 180, 194, 184, 133, 1045, 990, + /* 460 */ 930, 989, 219, 291, 39, 951, 959, 944, 961, 135, + /* 470 */ 139, 976, 248, 975, 926, 131, 152, 151, 924, 153, + /* 480 */ 250, 154, 689, 210, 252, 150, 257, 145, 142, 842, + /* 490 */ 941, 143, 272, 144, 262, 37, 146, 66, 58, 178, + /* 500 */ 63, 260, 34, 258, 256, 281, 838, 147, 1050, 254, + /* 510 */ 93, 1049, 1047, 158, 285, 1044, 99, 148, 1043, 1041, + /* 520 */ 159, 860, 251, 35, 32, 38, 149, 179, 827, 109, + /* 530 */ 825, 111, 112, 823, 822, 233, 169, 820, 819, 818, + /* 540 */ 817, 816, 815, 172, 174, 40, 812, 810, 808, 806, + /* 550 */ 176, 803, 177, 301, 246, 72, 75, 104, 253, 977, + /* 560 */ 293, 294, 295, 296, 297, 204, 225, 298, 271, 299, + /* 570 */ 311, 780, 205, 198, 234, 88, 89, 235, 779, 237, + /* 580 */ 238, 778, 766, 765, 242, 247, 821, 814, 162, 266, + /* 590 */ 122, 861, 160, 165, 161, 164, 163, 166, 123, 124, + /* 600 */ 813, 805, 895, 125, 804, 2, 8, 73, 4, 669, + /* 610 */ 76, 691, 137, 212, 694, 86, 138, 77, 907, 255, + /* 620 */ 9, 698, 140, 26, 742, 7, 27, 11, 10, 21, + /* 630 */ 84, 744, 22, 268, 601, 597, 595, 594, 593, 590, + /* 640 */ 563, 279, 94, 90, 31, 59, 640, 639, 636, 585, + /* 650 */ 583, 98, 575, 581, 577, 579, 573, 571, 604, 603, + /* 660 */ 602, 600, 599, 100, 598, 596, 592, 591, 60, 561, + /* 670 */ 528, 784, 526, 783, 783, 783, 783, 783, 783, 127, + /* 680 */ 783, 783, 783, 783, 128, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 191, 1, 190, 191, 210, 191, 191, 191, 191, 9, - /* 10 */ 188, 189, 252, 13, 14, 191, 16, 17, 191, 252, + /* 0 */ 191, 1, 190, 191, 210, 191, 191, 194, 195, 9, + /* 10 */ 188, 189, 191, 13, 14, 191, 16, 17, 191, 252, /* 20 */ 20, 21, 252, 23, 24, 25, 26, 27, 28, 262, /* 30 */ 236, 261, 262, 33, 34, 252, 252, 37, 38, 39, - /* 40 */ 13, 14, 226, 16, 17, 261, 262, 20, 21, 1, + /* 40 */ 13, 14, 233, 16, 17, 261, 262, 20, 21, 1, /* 50 */ 23, 24, 25, 26, 27, 28, 234, 9, 252, 232, - /* 60 */ 33, 34, 235, 254, 37, 38, 39, 14, 253, 16, - /* 70 */ 17, 249, 258, 20, 21, 258, 23, 24, 25, 26, - /* 80 */ 27, 28, 258, 5, 260, 191, 33, 34, 79, 67, + /* 60 */ 33, 34, 235, 210, 37, 38, 39, 14, 253, 16, + /* 70 */ 17, 249, 258, 20, 21, 254, 23, 24, 25, 26, + /* 80 */ 27, 28, 258, 5, 260, 197, 33, 34, 79, 236, /* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51, - /* 100 */ 52, 53, 54, 55, 56, 57, 58, 1, 197, 61, - /* 110 */ 110, 33, 34, 13, 14, 9, 16, 17, 81, 210, - /* 120 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 235, - /* 130 */ 9, 76, 105, 33, 34, 252, 81, 37, 38, 39, - /* 140 */ 13, 14, 231, 16, 17, 236, 191, 20, 21, 135, - /* 150 */ 23, 24, 25, 26, 27, 28, 252, 143, 144, 137, - /* 160 */ 33, 34, 140, 141, 37, 38, 39, 88, 89, 90, + /* 100 */ 52, 53, 54, 55, 56, 57, 58, 234, 252, 61, + /* 110 */ 110, 33, 34, 13, 14, 191, 16, 17, 81, 231, + /* 120 */ 20, 21, 249, 23, 24, 25, 26, 27, 28, 37, + /* 130 */ 38, 39, 105, 33, 34, 109, 210, 37, 38, 39, + /* 140 */ 13, 14, 116, 16, 17, 68, 191, 20, 21, 1, + /* 150 */ 23, 24, 25, 26, 27, 28, 232, 9, 76, 235, + /* 160 */ 33, 34, 236, 81, 37, 38, 39, 88, 89, 90, /* 170 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, /* 180 */ 101, 102, 209, 191, 211, 212, 213, 214, 215, 216, /* 190 */ 217, 218, 219, 220, 221, 222, 223, 224, 16, 17, /* 200 */ 196, 252, 20, 21, 200, 23, 24, 25, 26, 27, - /* 210 */ 28, 256, 44, 258, 197, 33, 34, 109, 67, 37, - /* 220 */ 38, 39, 1, 2, 232, 104, 5, 235, 7, 61, - /* 230 */ 9, 210, 1, 2, 252, 67, 5, 0, 7, 131, - /* 240 */ 9, 73, 74, 75, 197, 228, 229, 230, 231, 37, - /* 250 */ 38, 39, 191, 104, 33, 34, 210, 236, 37, 191, - /* 260 */ 88, 112, 90, 91, 33, 34, 234, 95, 191, 97, - /* 270 */ 98, 99, 104, 101, 102, 209, 229, 252, 212, 213, - /* 280 */ 112, 249, 236, 217, 191, 219, 220, 221, 137, 223, - /* 290 */ 224, 140, 2, 232, 196, 5, 235, 7, 200, 9, - /* 300 */ 232, 37, 134, 235, 136, 25, 26, 27, 28, 232, - /* 310 */ 142, 15, 235, 33, 34, 191, 79, 37, 38, 39, - /* 320 */ 237, 62, 63, 33, 34, 104, 233, 68, 69, 70, - /* 330 */ 71, 72, 111, 250, 5, 104, 7, 78, 117, 191, - /* 340 */ 198, 199, 111, 62, 63, 5, 252, 7, 117, 68, - /* 350 */ 69, 70, 71, 72, 133, 105, 232, 252, 196, 235, - /* 360 */ 62, 63, 200, 113, 133, 60, 68, 69, 70, 71, - /* 370 */ 72, 33, 34, 194, 195, 37, 38, 39, 104, 115, - /* 380 */ 232, 1, 108, 235, 110, 109, 64, 65, 66, 124, - /* 390 */ 125, 105, 116, 105, 105, 109, 105, 109, 109, 105, - /* 400 */ 109, 111, 105, 109, 109, 252, 109, 111, 105, 104, - /* 410 */ 252, 105, 109, 252, 105, 109, 105, 37, 109, 105, - /* 420 */ 109, 252, 104, 109, 129, 107, 5, 104, 7, 106, - /* 430 */ 138, 139, 5, 252, 7, 76, 77, 138, 139, 138, - /* 440 */ 139, 252, 138, 139, 62, 63, 117, 252, 252, 252, - /* 450 */ 252, 236, 236, 227, 227, 227, 227, 117, 191, 227, - /* 460 */ 227, 227, 191, 191, 251, 259, 191, 259, 191, 234, - /* 470 */ 234, 60, 255, 191, 191, 234, 238, 192, 191, 255, - /* 480 */ 255, 103, 117, 239, 191, 255, 246, 248, 245, 247, - /* 490 */ 244, 191, 121, 191, 122, 126, 128, 243, 191, 191, - /* 500 */ 130, 191, 127, 191, 191, 120, 191, 191, 191, 119, - /* 510 */ 191, 191, 191, 118, 241, 191, 191, 191, 191, 191, - /* 520 */ 242, 191, 191, 191, 191, 191, 191, 191, 191, 132, - /* 530 */ 191, 87, 191, 191, 191, 191, 191, 191, 191, 191, - /* 540 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, - /* 550 */ 86, 191, 191, 191, 50, 192, 192, 83, 192, 192, - /* 560 */ 85, 54, 84, 82, 79, 5, 192, 145, 192, 192, - /* 570 */ 5, 5, 5, 145, 5, 89, 192, 90, 197, 192, - /* 580 */ 197, 135, 113, 107, 104, 114, 109, 105, 105, 105, - /* 590 */ 192, 202, 104, 193, 208, 207, 204, 206, 205, 203, - /* 600 */ 201, 193, 192, 198, 225, 193, 192, 194, 193, 192, - /* 610 */ 109, 1, 104, 240, 104, 109, 105, 104, 123, 225, - /* 620 */ 123, 109, 105, 104, 104, 111, 104, 104, 107, 76, - /* 630 */ 9, 5, 108, 5, 5, 5, 5, 80, 15, 76, - /* 640 */ 109, 139, 139, 16, 139, 5, 5, 105, 5, 5, - /* 650 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 660 */ 5, 5, 5, 5, 5, 109, 80, 60, 59, 0, - /* 670 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, - /* 680 */ 263, 21, 21, 263, 263, 263, 263, 263, 263, 263, + /* 210 */ 28, 256, 44, 258, 137, 33, 34, 140, 141, 37, + /* 220 */ 38, 39, 1, 2, 232, 191, 5, 235, 7, 61, + /* 230 */ 9, 1, 2, 210, 252, 5, 68, 7, 135, 9, + /* 240 */ 191, 73, 74, 75, 37, 1, 143, 144, 25, 26, + /* 250 */ 27, 28, 104, 9, 33, 34, 33, 34, 37, 236, + /* 260 */ 37, 38, 39, 33, 34, 104, 232, 109, 88, 235, + /* 270 */ 90, 91, 104, 112, 5, 95, 7, 97, 98, 99, + /* 280 */ 112, 101, 102, 209, 235, 2, 212, 213, 5, 131, + /* 290 */ 7, 217, 9, 219, 220, 221, 197, 223, 224, 62, + /* 300 */ 63, 64, 134, 0, 136, 191, 69, 70, 71, 72, + /* 310 */ 142, 62, 63, 64, 191, 78, 33, 34, 69, 70, + /* 320 */ 71, 72, 115, 68, 191, 104, 191, 228, 229, 230, + /* 330 */ 231, 5, 111, 7, 104, 62, 63, 64, 117, 198, + /* 340 */ 199, 111, 69, 70, 71, 72, 232, 117, 191, 235, + /* 350 */ 15, 33, 34, 197, 133, 37, 38, 39, 196, 5, + /* 360 */ 104, 7, 200, 133, 108, 232, 110, 232, 235, 60, + /* 370 */ 235, 65, 66, 67, 62, 63, 64, 237, 124, 125, + /* 380 */ 196, 258, 79, 226, 200, 229, 117, 105, 1, 105, + /* 390 */ 250, 109, 137, 227, 111, 140, 105, 113, 105, 105, + /* 400 */ 109, 105, 109, 109, 105, 109, 109, 105, 109, 191, + /* 410 */ 105, 109, 105, 104, 109, 105, 109, 252, 105, 109, + /* 420 */ 104, 252, 109, 107, 37, 252, 129, 138, 139, 252, + /* 430 */ 138, 139, 227, 138, 139, 104, 227, 106, 138, 139, + /* 440 */ 5, 252, 7, 117, 76, 77, 111, 252, 252, 252, + /* 450 */ 252, 252, 252, 252, 252, 252, 252, 191, 236, 227, + /* 460 */ 236, 227, 227, 227, 251, 191, 191, 234, 191, 191, + /* 470 */ 191, 259, 234, 259, 234, 60, 191, 238, 191, 191, + /* 480 */ 255, 191, 117, 255, 255, 239, 255, 244, 247, 191, + /* 490 */ 248, 246, 191, 245, 122, 191, 243, 128, 127, 191, + /* 500 */ 130, 126, 191, 121, 120, 191, 191, 242, 191, 119, + /* 510 */ 191, 191, 191, 191, 191, 191, 191, 241, 191, 191, + /* 520 */ 191, 191, 118, 191, 191, 191, 240, 191, 191, 191, + /* 530 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, + /* 540 */ 191, 191, 191, 191, 191, 132, 191, 191, 191, 191, + /* 550 */ 191, 191, 191, 103, 192, 192, 192, 87, 192, 192, + /* 560 */ 86, 50, 83, 85, 54, 192, 192, 84, 192, 82, + /* 570 */ 79, 5, 192, 192, 145, 197, 197, 5, 5, 145, + /* 580 */ 5, 5, 90, 89, 135, 113, 192, 192, 202, 107, + /* 590 */ 193, 208, 207, 204, 206, 203, 205, 201, 193, 193, + /* 600 */ 192, 192, 225, 193, 192, 198, 104, 114, 194, 105, + /* 610 */ 109, 105, 104, 1, 105, 76, 109, 104, 225, 104, + /* 620 */ 123, 105, 104, 109, 105, 104, 109, 104, 123, 104, + /* 630 */ 108, 111, 104, 107, 9, 5, 5, 5, 5, 5, + /* 640 */ 80, 15, 139, 76, 109, 16, 5, 5, 105, 5, + /* 650 */ 5, 139, 5, 5, 5, 5, 5, 5, 5, 5, + /* 660 */ 5, 5, 5, 139, 5, 5, 5, 5, 109, 80, + /* 670 */ 60, 0, 59, 263, 263, 263, 263, 263, 263, 21, + /* 680 */ 263, 263, 263, 263, 21, 263, 263, 263, 263, 263, /* 690 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 700 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 710 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, @@ -363,100 +363,101 @@ static const YYCODETYPE yy_lookahead[] = { /* 840 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 850 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 860 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 870 */ 263, 263, }; -#define YY_SHIFT_COUNT (314) +#define YY_SHIFT_COUNT (316) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (669) +#define YY_SHIFT_MAX (671) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 168, 79, 79, 172, 172, 9, 221, 231, 106, 106, - /* 10 */ 106, 106, 106, 106, 106, 106, 106, 0, 48, 231, - /* 20 */ 290, 290, 290, 290, 121, 149, 106, 106, 106, 237, - /* 30 */ 106, 106, 55, 9, 37, 37, 683, 683, 683, 231, - /* 40 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, - /* 50 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 290, - /* 60 */ 290, 78, 78, 78, 78, 78, 78, 78, 106, 106, - /* 70 */ 106, 264, 106, 149, 149, 106, 106, 106, 265, 265, - /* 80 */ 276, 149, 106, 106, 106, 106, 106, 106, 106, 106, - /* 90 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, - /* 100 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, - /* 110 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, - /* 120 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, - /* 130 */ 106, 106, 411, 411, 411, 365, 365, 365, 411, 365, - /* 140 */ 411, 368, 370, 375, 372, 369, 371, 385, 390, 395, - /* 150 */ 397, 411, 411, 411, 378, 9, 9, 411, 411, 444, - /* 160 */ 464, 504, 474, 475, 507, 478, 481, 378, 411, 485, - /* 170 */ 485, 411, 485, 411, 485, 411, 683, 683, 27, 100, - /* 180 */ 127, 100, 100, 53, 182, 280, 280, 280, 280, 259, - /* 190 */ 281, 298, 338, 338, 338, 338, 22, 14, 212, 212, - /* 200 */ 329, 340, 274, 151, 322, 286, 250, 288, 289, 291, - /* 210 */ 294, 297, 380, 305, 296, 108, 295, 303, 306, 309, - /* 220 */ 311, 314, 318, 292, 299, 301, 323, 304, 421, 427, - /* 230 */ 359, 382, 560, 422, 565, 566, 428, 567, 569, 487, - /* 240 */ 486, 446, 469, 476, 480, 471, 482, 477, 483, 488, - /* 250 */ 484, 501, 508, 610, 510, 511, 513, 506, 495, 512, - /* 260 */ 497, 517, 519, 514, 520, 476, 522, 521, 523, 524, - /* 270 */ 553, 621, 626, 628, 629, 630, 631, 557, 623, 563, - /* 280 */ 502, 531, 531, 627, 503, 505, 531, 640, 641, 542, - /* 290 */ 531, 643, 644, 645, 646, 647, 648, 649, 650, 651, - /* 300 */ 652, 653, 654, 655, 656, 657, 658, 659, 556, 586, - /* 310 */ 660, 661, 607, 609, 669, + /* 0 */ 168, 79, 79, 180, 180, 9, 221, 230, 244, 244, + /* 10 */ 244, 244, 244, 244, 244, 244, 244, 0, 48, 230, + /* 20 */ 283, 283, 283, 283, 148, 161, 244, 244, 244, 303, + /* 30 */ 244, 244, 82, 9, 37, 37, 685, 685, 685, 230, + /* 40 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, + /* 50 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 283, + /* 60 */ 283, 78, 78, 78, 78, 78, 78, 78, 244, 244, + /* 70 */ 244, 207, 244, 161, 161, 244, 244, 244, 254, 254, + /* 80 */ 26, 161, 244, 244, 244, 244, 244, 244, 244, 244, + /* 90 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, + /* 100 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, + /* 110 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, + /* 120 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, + /* 130 */ 244, 244, 244, 415, 415, 415, 365, 365, 365, 415, + /* 140 */ 365, 415, 369, 370, 371, 372, 375, 382, 384, 390, + /* 150 */ 404, 413, 415, 415, 415, 450, 9, 9, 415, 415, + /* 160 */ 470, 474, 511, 479, 478, 510, 483, 487, 450, 415, + /* 170 */ 491, 491, 415, 491, 415, 491, 415, 415, 685, 685, + /* 180 */ 27, 100, 127, 100, 100, 53, 182, 223, 223, 223, + /* 190 */ 223, 237, 249, 273, 318, 318, 318, 318, 77, 103, + /* 200 */ 92, 92, 269, 326, 256, 255, 306, 312, 282, 284, + /* 210 */ 291, 293, 294, 296, 299, 387, 309, 335, 158, 297, + /* 220 */ 302, 305, 307, 310, 313, 316, 289, 292, 295, 331, + /* 230 */ 300, 354, 435, 368, 566, 429, 572, 573, 434, 575, + /* 240 */ 576, 492, 494, 449, 472, 482, 502, 493, 504, 501, + /* 250 */ 506, 508, 509, 507, 513, 612, 515, 516, 518, 514, + /* 260 */ 497, 517, 505, 519, 521, 520, 523, 482, 525, 526, + /* 270 */ 528, 522, 539, 625, 630, 631, 632, 633, 634, 560, + /* 280 */ 626, 567, 503, 535, 535, 629, 512, 524, 535, 641, + /* 290 */ 642, 543, 535, 644, 645, 647, 648, 649, 650, 651, + /* 300 */ 652, 653, 654, 655, 656, 657, 659, 660, 661, 662, + /* 310 */ 559, 589, 658, 663, 610, 613, 671, }; -#define YY_REDUCE_COUNT (177) -#define YY_REDUCE_MIN (-240) -#define YY_REDUCE_MAX (417) +#define YY_REDUCE_COUNT (179) +#define YY_REDUCE_MIN (-233) +#define YY_REDUCE_MAX (414) static const short yy_reduce_ofst[] = { - /* 0 */ -178, -27, -27, 66, 66, 17, -230, -216, -173, -176, - /* 10 */ -45, -8, 61, 68, 77, 124, 148, -185, -188, -233, - /* 20 */ -206, -91, 21, 46, -191, 32, -186, -183, 93, -89, - /* 30 */ -184, -106, 4, 47, 98, 162, 83, 142, 179, -240, - /* 40 */ -217, -194, -117, -96, -51, -18, 25, 94, 105, 153, - /* 50 */ 158, 161, 169, 181, 189, 195, 196, 197, 198, 215, - /* 60 */ 216, 226, 227, 228, 229, 232, 233, 234, 267, 271, - /* 70 */ 272, 213, 275, 235, 236, 277, 282, 283, 206, 208, - /* 80 */ 238, 241, 287, 293, 300, 302, 307, 308, 310, 312, - /* 90 */ 313, 315, 316, 317, 319, 320, 321, 324, 325, 326, - /* 100 */ 327, 328, 330, 331, 332, 333, 334, 335, 336, 337, - /* 110 */ 339, 341, 342, 343, 344, 345, 346, 347, 348, 349, - /* 120 */ 350, 351, 352, 353, 354, 355, 356, 357, 358, 360, - /* 130 */ 361, 362, 285, 363, 364, 217, 224, 225, 366, 230, - /* 140 */ 367, 239, 242, 240, 243, 246, 254, 278, 273, 373, - /* 150 */ 244, 374, 376, 377, 379, 381, 383, 384, 387, 386, - /* 160 */ 388, 391, 389, 393, 396, 392, 399, 394, 398, 400, - /* 170 */ 408, 410, 412, 414, 415, 417, 405, 413, + /* 0 */ -178, -27, -27, 74, 74, 99, -230, -216, -173, -176, + /* 10 */ -45, -76, -8, 34, 114, 133, 135, -185, -188, -233, + /* 20 */ -206, -147, -74, 23, -179, -127, -186, 123, -191, -112, + /* 30 */ 157, 49, 4, 156, 162, 184, 140, 141, -187, -217, + /* 40 */ -194, -144, -51, -18, 165, 169, 173, 177, 189, 195, + /* 50 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 222, + /* 60 */ 224, 166, 205, 209, 232, 234, 235, 236, 218, 266, + /* 70 */ 274, 213, 275, 233, 238, 277, 278, 279, 212, 214, + /* 80 */ 239, 240, 285, 287, 288, 290, 298, 301, 304, 308, + /* 90 */ 311, 314, 315, 317, 319, 320, 321, 322, 323, 324, + /* 100 */ 325, 327, 328, 329, 330, 332, 333, 334, 336, 337, + /* 110 */ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, + /* 120 */ 348, 349, 350, 351, 352, 353, 355, 356, 357, 358, + /* 130 */ 359, 360, 361, 362, 363, 364, 225, 228, 229, 366, + /* 140 */ 231, 367, 242, 241, 245, 248, 243, 253, 265, 276, + /* 150 */ 286, 246, 373, 374, 376, 377, 378, 379, 380, 381, + /* 160 */ 383, 385, 388, 386, 391, 392, 389, 396, 393, 394, + /* 170 */ 397, 405, 395, 406, 408, 410, 409, 412, 407, 414, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 777, 889, 835, 901, 823, 832, 1032, 1032, 777, 777, - /* 10 */ 777, 777, 777, 777, 777, 777, 777, 948, 796, 1032, - /* 20 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 832, - /* 30 */ 777, 777, 838, 832, 838, 838, 943, 873, 891, 777, - /* 40 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 50 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 60 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 70 */ 777, 950, 953, 777, 777, 955, 777, 777, 975, 975, - /* 80 */ 941, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 90 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 100 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 821, - /* 110 */ 777, 819, 777, 777, 777, 777, 777, 777, 777, 777, - /* 120 */ 777, 777, 777, 777, 777, 777, 806, 777, 777, 777, - /* 130 */ 777, 777, 798, 798, 798, 777, 777, 777, 798, 777, - /* 140 */ 798, 982, 986, 980, 968, 976, 967, 963, 961, 960, - /* 150 */ 990, 798, 798, 798, 836, 832, 832, 798, 798, 854, - /* 160 */ 852, 850, 842, 848, 844, 846, 840, 824, 798, 830, - /* 170 */ 830, 798, 830, 798, 830, 798, 873, 891, 777, 991, - /* 180 */ 777, 1031, 981, 1021, 1020, 1027, 1019, 1018, 1017, 777, - /* 190 */ 777, 777, 1013, 1014, 1016, 1015, 777, 777, 1023, 1022, - /* 200 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 210 */ 777, 777, 777, 993, 777, 987, 983, 777, 777, 777, - /* 220 */ 777, 777, 777, 777, 777, 777, 903, 777, 777, 777, - /* 230 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 240 */ 777, 777, 940, 777, 777, 777, 777, 951, 777, 777, - /* 250 */ 777, 777, 777, 777, 777, 777, 777, 977, 777, 969, - /* 260 */ 777, 777, 777, 777, 777, 915, 777, 777, 777, 777, - /* 270 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 280 */ 777, 1043, 1041, 777, 777, 777, 1037, 777, 777, 777, - /* 290 */ 1035, 777, 777, 777, 777, 777, 777, 777, 777, 777, - /* 300 */ 777, 777, 777, 777, 777, 777, 777, 777, 857, 777, - /* 310 */ 804, 802, 777, 794, 777, + /* 0 */ 781, 894, 840, 906, 828, 837, 1037, 1037, 781, 781, + /* 10 */ 781, 781, 781, 781, 781, 781, 781, 953, 800, 1037, + /* 20 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 837, + /* 30 */ 781, 781, 843, 837, 843, 843, 948, 878, 896, 781, + /* 40 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 50 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 60 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 70 */ 781, 955, 958, 781, 781, 960, 781, 781, 980, 980, + /* 80 */ 946, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 90 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 100 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 826, + /* 110 */ 781, 824, 781, 781, 781, 781, 781, 781, 781, 781, + /* 120 */ 781, 781, 781, 781, 781, 781, 811, 781, 781, 781, + /* 130 */ 781, 781, 781, 802, 802, 802, 781, 781, 781, 802, + /* 140 */ 781, 802, 987, 991, 985, 973, 981, 972, 968, 966, + /* 150 */ 965, 995, 802, 802, 802, 841, 837, 837, 802, 802, + /* 160 */ 859, 857, 855, 847, 853, 849, 851, 845, 829, 802, + /* 170 */ 835, 835, 802, 835, 802, 835, 802, 802, 878, 896, + /* 180 */ 781, 996, 781, 1036, 986, 1026, 1025, 1032, 1024, 1023, + /* 190 */ 1022, 781, 781, 781, 1018, 1019, 1021, 1020, 781, 781, + /* 200 */ 1028, 1027, 781, 781, 781, 781, 781, 781, 781, 781, + /* 210 */ 781, 781, 781, 781, 781, 781, 998, 781, 992, 988, + /* 220 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 908, + /* 230 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 240 */ 781, 781, 781, 781, 945, 781, 781, 781, 781, 956, + /* 250 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 982, + /* 260 */ 781, 974, 781, 781, 781, 781, 781, 920, 781, 781, + /* 270 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 280 */ 781, 781, 781, 1048, 1046, 781, 781, 781, 1042, 781, + /* 290 */ 781, 781, 1040, 781, 781, 781, 781, 781, 781, 781, + /* 300 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, + /* 310 */ 862, 781, 809, 807, 781, 798, 781, }; /********** End of lemon-generated parsing tables *****************************/ @@ -539,12 +540,12 @@ static const YYCODETYPE yyFallback[] = { 0, /* DOT => nothing */ 0, /* CREATE => nothing */ 0, /* TABLE => nothing */ + 1, /* STABLE => ID */ 1, /* DATABASE => ID */ 0, /* TABLES => nothing */ 0, /* STABLES => nothing */ 0, /* VGROUPS => nothing */ 0, /* DROP => nothing */ - 1, /* STABLE => ID */ 0, /* TOPIC => nothing */ 0, /* DNODE => nothing */ 0, /* USER => nothing */ @@ -812,12 +813,12 @@ static const char *const yyTokenName[] = { /* 60 */ "DOT", /* 61 */ "CREATE", /* 62 */ "TABLE", - /* 63 */ "DATABASE", - /* 64 */ "TABLES", - /* 65 */ "STABLES", - /* 66 */ "VGROUPS", - /* 67 */ "DROP", - /* 68 */ "STABLE", + /* 63 */ "STABLE", + /* 64 */ "DATABASE", + /* 65 */ "TABLES", + /* 66 */ "STABLES", + /* 67 */ "VGROUPS", + /* 68 */ "DROP", /* 69 */ "TOPIC", /* 70 */ "DNODE", /* 71 */ "USER", @@ -1040,254 +1041,255 @@ static const char *const yyRuleName[] = { /* 18 */ "cpxName ::=", /* 19 */ "cpxName ::= DOT ids", /* 20 */ "cmd ::= SHOW CREATE TABLE ids cpxName", - /* 21 */ "cmd ::= SHOW CREATE DATABASE ids", - /* 22 */ "cmd ::= SHOW dbPrefix TABLES", - /* 23 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", - /* 24 */ "cmd ::= SHOW dbPrefix STABLES", - /* 25 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", - /* 26 */ "cmd ::= SHOW dbPrefix VGROUPS", - /* 27 */ "cmd ::= SHOW dbPrefix VGROUPS ids", - /* 28 */ "cmd ::= DROP TABLE ifexists ids cpxName", - /* 29 */ "cmd ::= DROP STABLE ifexists ids cpxName", - /* 30 */ "cmd ::= DROP DATABASE ifexists ids", - /* 31 */ "cmd ::= DROP TOPIC ifexists ids", - /* 32 */ "cmd ::= DROP DNODE ids", - /* 33 */ "cmd ::= DROP USER ids", - /* 34 */ "cmd ::= DROP ACCOUNT ids", - /* 35 */ "cmd ::= USE ids", - /* 36 */ "cmd ::= DESCRIBE ids cpxName", - /* 37 */ "cmd ::= ALTER USER ids PASS ids", - /* 38 */ "cmd ::= ALTER USER ids PRIVILEGE ids", - /* 39 */ "cmd ::= ALTER DNODE ids ids", - /* 40 */ "cmd ::= ALTER DNODE ids ids ids", - /* 41 */ "cmd ::= ALTER LOCAL ids", - /* 42 */ "cmd ::= ALTER LOCAL ids ids", - /* 43 */ "cmd ::= ALTER DATABASE ids alter_db_optr", - /* 44 */ "cmd ::= ALTER TOPIC ids alter_topic_optr", - /* 45 */ "cmd ::= ALTER ACCOUNT ids acct_optr", - /* 46 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", - /* 47 */ "ids ::= ID", - /* 48 */ "ids ::= STRING", - /* 49 */ "ifexists ::= IF EXISTS", - /* 50 */ "ifexists ::=", - /* 51 */ "ifnotexists ::= IF NOT EXISTS", - /* 52 */ "ifnotexists ::=", - /* 53 */ "cmd ::= CREATE DNODE ids", - /* 54 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", - /* 55 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", - /* 56 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr", - /* 57 */ "cmd ::= CREATE USER ids PASS ids", - /* 58 */ "pps ::=", - /* 59 */ "pps ::= PPS INTEGER", - /* 60 */ "tseries ::=", - /* 61 */ "tseries ::= TSERIES INTEGER", - /* 62 */ "dbs ::=", - /* 63 */ "dbs ::= DBS INTEGER", - /* 64 */ "streams ::=", - /* 65 */ "streams ::= STREAMS INTEGER", - /* 66 */ "storage ::=", - /* 67 */ "storage ::= STORAGE INTEGER", - /* 68 */ "qtime ::=", - /* 69 */ "qtime ::= QTIME INTEGER", - /* 70 */ "users ::=", - /* 71 */ "users ::= USERS INTEGER", - /* 72 */ "conns ::=", - /* 73 */ "conns ::= CONNS INTEGER", - /* 74 */ "state ::=", - /* 75 */ "state ::= STATE ids", - /* 76 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", - /* 77 */ "keep ::= KEEP tagitemlist", - /* 78 */ "cache ::= CACHE INTEGER", - /* 79 */ "replica ::= REPLICA INTEGER", - /* 80 */ "quorum ::= QUORUM INTEGER", - /* 81 */ "days ::= DAYS INTEGER", - /* 82 */ "minrows ::= MINROWS INTEGER", - /* 83 */ "maxrows ::= MAXROWS INTEGER", - /* 84 */ "blocks ::= BLOCKS INTEGER", - /* 85 */ "ctime ::= CTIME INTEGER", - /* 86 */ "wal ::= WAL INTEGER", - /* 87 */ "fsync ::= FSYNC INTEGER", - /* 88 */ "comp ::= COMP INTEGER", - /* 89 */ "prec ::= PRECISION STRING", - /* 90 */ "update ::= UPDATE INTEGER", - /* 91 */ "cachelast ::= CACHELAST INTEGER", - /* 92 */ "partitions ::= PARTITIONS INTEGER", - /* 93 */ "db_optr ::=", - /* 94 */ "db_optr ::= db_optr cache", - /* 95 */ "db_optr ::= db_optr replica", - /* 96 */ "db_optr ::= db_optr quorum", - /* 97 */ "db_optr ::= db_optr days", - /* 98 */ "db_optr ::= db_optr minrows", - /* 99 */ "db_optr ::= db_optr maxrows", - /* 100 */ "db_optr ::= db_optr blocks", - /* 101 */ "db_optr ::= db_optr ctime", - /* 102 */ "db_optr ::= db_optr wal", - /* 103 */ "db_optr ::= db_optr fsync", - /* 104 */ "db_optr ::= db_optr comp", - /* 105 */ "db_optr ::= db_optr prec", - /* 106 */ "db_optr ::= db_optr keep", - /* 107 */ "db_optr ::= db_optr update", - /* 108 */ "db_optr ::= db_optr cachelast", - /* 109 */ "topic_optr ::= db_optr", - /* 110 */ "topic_optr ::= topic_optr partitions", - /* 111 */ "alter_db_optr ::=", - /* 112 */ "alter_db_optr ::= alter_db_optr replica", - /* 113 */ "alter_db_optr ::= alter_db_optr quorum", - /* 114 */ "alter_db_optr ::= alter_db_optr keep", - /* 115 */ "alter_db_optr ::= alter_db_optr blocks", - /* 116 */ "alter_db_optr ::= alter_db_optr comp", - /* 117 */ "alter_db_optr ::= alter_db_optr wal", - /* 118 */ "alter_db_optr ::= alter_db_optr fsync", - /* 119 */ "alter_db_optr ::= alter_db_optr update", - /* 120 */ "alter_db_optr ::= alter_db_optr cachelast", - /* 121 */ "alter_topic_optr ::= alter_db_optr", - /* 122 */ "alter_topic_optr ::= alter_topic_optr partitions", - /* 123 */ "typename ::= ids", - /* 124 */ "typename ::= ids LP signed RP", - /* 125 */ "typename ::= ids UNSIGNED", - /* 126 */ "signed ::= INTEGER", - /* 127 */ "signed ::= PLUS INTEGER", - /* 128 */ "signed ::= MINUS INTEGER", - /* 129 */ "cmd ::= CREATE TABLE create_table_args", - /* 130 */ "cmd ::= CREATE TABLE create_stable_args", - /* 131 */ "cmd ::= CREATE STABLE create_stable_args", - /* 132 */ "cmd ::= CREATE TABLE create_table_list", - /* 133 */ "create_table_list ::= create_from_stable", - /* 134 */ "create_table_list ::= create_table_list create_from_stable", - /* 135 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", - /* 136 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", - /* 137 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", - /* 138 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", - /* 139 */ "tagNamelist ::= tagNamelist COMMA ids", - /* 140 */ "tagNamelist ::= ids", - /* 141 */ "create_table_args ::= ifnotexists ids cpxName AS select", - /* 142 */ "columnlist ::= columnlist COMMA column", - /* 143 */ "columnlist ::= column", - /* 144 */ "column ::= ids typename", - /* 145 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 146 */ "tagitemlist ::= tagitem", - /* 147 */ "tagitem ::= INTEGER", - /* 148 */ "tagitem ::= FLOAT", - /* 149 */ "tagitem ::= STRING", - /* 150 */ "tagitem ::= BOOL", - /* 151 */ "tagitem ::= NULL", - /* 152 */ "tagitem ::= MINUS INTEGER", - /* 153 */ "tagitem ::= MINUS FLOAT", - /* 154 */ "tagitem ::= PLUS INTEGER", - /* 155 */ "tagitem ::= PLUS FLOAT", - /* 156 */ "select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 157 */ "select ::= LP select RP", - /* 158 */ "union ::= select", - /* 159 */ "union ::= union UNION ALL select", - /* 160 */ "cmd ::= union", - /* 161 */ "select ::= SELECT selcollist", - /* 162 */ "sclp ::= selcollist COMMA", - /* 163 */ "sclp ::=", - /* 164 */ "selcollist ::= sclp distinct expr as", - /* 165 */ "selcollist ::= sclp STAR", - /* 166 */ "as ::= AS ids", - /* 167 */ "as ::= ids", - /* 168 */ "as ::=", - /* 169 */ "distinct ::= DISTINCT", - /* 170 */ "distinct ::=", - /* 171 */ "from ::= FROM tablelist", - /* 172 */ "from ::= FROM LP union RP", - /* 173 */ "tablelist ::= ids cpxName", - /* 174 */ "tablelist ::= ids cpxName ids", - /* 175 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 176 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 177 */ "tmvar ::= VARIABLE", - /* 178 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 179 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", - /* 180 */ "interval_opt ::=", - /* 181 */ "session_option ::=", - /* 182 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", - /* 183 */ "fill_opt ::=", - /* 184 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 185 */ "fill_opt ::= FILL LP ID RP", - /* 186 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 187 */ "sliding_opt ::=", - /* 188 */ "orderby_opt ::=", - /* 189 */ "orderby_opt ::= ORDER BY sortlist", - /* 190 */ "sortlist ::= sortlist COMMA item sortorder", - /* 191 */ "sortlist ::= item sortorder", - /* 192 */ "item ::= ids cpxName", - /* 193 */ "sortorder ::= ASC", - /* 194 */ "sortorder ::= DESC", - /* 195 */ "sortorder ::=", - /* 196 */ "groupby_opt ::=", - /* 197 */ "groupby_opt ::= GROUP BY grouplist", - /* 198 */ "grouplist ::= grouplist COMMA item", - /* 199 */ "grouplist ::= item", - /* 200 */ "having_opt ::=", - /* 201 */ "having_opt ::= HAVING expr", - /* 202 */ "limit_opt ::=", - /* 203 */ "limit_opt ::= LIMIT signed", - /* 204 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 205 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 206 */ "slimit_opt ::=", - /* 207 */ "slimit_opt ::= SLIMIT signed", - /* 208 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 209 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 210 */ "where_opt ::=", - /* 211 */ "where_opt ::= WHERE expr", - /* 212 */ "expr ::= LP expr RP", - /* 213 */ "expr ::= ID", - /* 214 */ "expr ::= ID DOT ID", - /* 215 */ "expr ::= ID DOT STAR", - /* 216 */ "expr ::= INTEGER", - /* 217 */ "expr ::= MINUS INTEGER", - /* 218 */ "expr ::= PLUS INTEGER", - /* 219 */ "expr ::= FLOAT", - /* 220 */ "expr ::= MINUS FLOAT", - /* 221 */ "expr ::= PLUS FLOAT", - /* 222 */ "expr ::= STRING", - /* 223 */ "expr ::= NOW", - /* 224 */ "expr ::= VARIABLE", - /* 225 */ "expr ::= PLUS VARIABLE", - /* 226 */ "expr ::= MINUS VARIABLE", - /* 227 */ "expr ::= BOOL", - /* 228 */ "expr ::= NULL", - /* 229 */ "expr ::= ID LP exprlist RP", - /* 230 */ "expr ::= ID LP STAR RP", - /* 231 */ "expr ::= expr IS NULL", - /* 232 */ "expr ::= expr IS NOT NULL", - /* 233 */ "expr ::= expr LT expr", - /* 234 */ "expr ::= expr GT expr", - /* 235 */ "expr ::= expr LE expr", - /* 236 */ "expr ::= expr GE expr", - /* 237 */ "expr ::= expr NE expr", - /* 238 */ "expr ::= expr EQ expr", - /* 239 */ "expr ::= expr BETWEEN expr AND expr", - /* 240 */ "expr ::= expr AND expr", - /* 241 */ "expr ::= expr OR expr", - /* 242 */ "expr ::= expr PLUS expr", - /* 243 */ "expr ::= expr MINUS expr", - /* 244 */ "expr ::= expr STAR expr", - /* 245 */ "expr ::= expr SLASH expr", - /* 246 */ "expr ::= expr REM expr", - /* 247 */ "expr ::= expr LIKE expr", - /* 248 */ "expr ::= expr IN LP exprlist RP", - /* 249 */ "exprlist ::= exprlist COMMA expritem", - /* 250 */ "exprlist ::= expritem", - /* 251 */ "expritem ::= expr", - /* 252 */ "expritem ::=", - /* 253 */ "cmd ::= RESET QUERY CACHE", - /* 254 */ "cmd ::= SYNCDB ids REPLICA", - /* 255 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 256 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 257 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 258 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 259 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 260 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 261 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 262 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 263 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 264 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 265 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 266 */ "cmd ::= KILL CONNECTION INTEGER", - /* 267 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 268 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 21 */ "cmd ::= SHOW CREATE STABLE ids cpxName", + /* 22 */ "cmd ::= SHOW CREATE DATABASE ids", + /* 23 */ "cmd ::= SHOW dbPrefix TABLES", + /* 24 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", + /* 25 */ "cmd ::= SHOW dbPrefix STABLES", + /* 26 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", + /* 27 */ "cmd ::= SHOW dbPrefix VGROUPS", + /* 28 */ "cmd ::= SHOW dbPrefix VGROUPS ids", + /* 29 */ "cmd ::= DROP TABLE ifexists ids cpxName", + /* 30 */ "cmd ::= DROP STABLE ifexists ids cpxName", + /* 31 */ "cmd ::= DROP DATABASE ifexists ids", + /* 32 */ "cmd ::= DROP TOPIC ifexists ids", + /* 33 */ "cmd ::= DROP DNODE ids", + /* 34 */ "cmd ::= DROP USER ids", + /* 35 */ "cmd ::= DROP ACCOUNT ids", + /* 36 */ "cmd ::= USE ids", + /* 37 */ "cmd ::= DESCRIBE ids cpxName", + /* 38 */ "cmd ::= ALTER USER ids PASS ids", + /* 39 */ "cmd ::= ALTER USER ids PRIVILEGE ids", + /* 40 */ "cmd ::= ALTER DNODE ids ids", + /* 41 */ "cmd ::= ALTER DNODE ids ids ids", + /* 42 */ "cmd ::= ALTER LOCAL ids", + /* 43 */ "cmd ::= ALTER LOCAL ids ids", + /* 44 */ "cmd ::= ALTER DATABASE ids alter_db_optr", + /* 45 */ "cmd ::= ALTER TOPIC ids alter_topic_optr", + /* 46 */ "cmd ::= ALTER ACCOUNT ids acct_optr", + /* 47 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", + /* 48 */ "ids ::= ID", + /* 49 */ "ids ::= STRING", + /* 50 */ "ifexists ::= IF EXISTS", + /* 51 */ "ifexists ::=", + /* 52 */ "ifnotexists ::= IF NOT EXISTS", + /* 53 */ "ifnotexists ::=", + /* 54 */ "cmd ::= CREATE DNODE ids", + /* 55 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", + /* 56 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", + /* 57 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr", + /* 58 */ "cmd ::= CREATE USER ids PASS ids", + /* 59 */ "pps ::=", + /* 60 */ "pps ::= PPS INTEGER", + /* 61 */ "tseries ::=", + /* 62 */ "tseries ::= TSERIES INTEGER", + /* 63 */ "dbs ::=", + /* 64 */ "dbs ::= DBS INTEGER", + /* 65 */ "streams ::=", + /* 66 */ "streams ::= STREAMS INTEGER", + /* 67 */ "storage ::=", + /* 68 */ "storage ::= STORAGE INTEGER", + /* 69 */ "qtime ::=", + /* 70 */ "qtime ::= QTIME INTEGER", + /* 71 */ "users ::=", + /* 72 */ "users ::= USERS INTEGER", + /* 73 */ "conns ::=", + /* 74 */ "conns ::= CONNS INTEGER", + /* 75 */ "state ::=", + /* 76 */ "state ::= STATE ids", + /* 77 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", + /* 78 */ "keep ::= KEEP tagitemlist", + /* 79 */ "cache ::= CACHE INTEGER", + /* 80 */ "replica ::= REPLICA INTEGER", + /* 81 */ "quorum ::= QUORUM INTEGER", + /* 82 */ "days ::= DAYS INTEGER", + /* 83 */ "minrows ::= MINROWS INTEGER", + /* 84 */ "maxrows ::= MAXROWS INTEGER", + /* 85 */ "blocks ::= BLOCKS INTEGER", + /* 86 */ "ctime ::= CTIME INTEGER", + /* 87 */ "wal ::= WAL INTEGER", + /* 88 */ "fsync ::= FSYNC INTEGER", + /* 89 */ "comp ::= COMP INTEGER", + /* 90 */ "prec ::= PRECISION STRING", + /* 91 */ "update ::= UPDATE INTEGER", + /* 92 */ "cachelast ::= CACHELAST INTEGER", + /* 93 */ "partitions ::= PARTITIONS INTEGER", + /* 94 */ "db_optr ::=", + /* 95 */ "db_optr ::= db_optr cache", + /* 96 */ "db_optr ::= db_optr replica", + /* 97 */ "db_optr ::= db_optr quorum", + /* 98 */ "db_optr ::= db_optr days", + /* 99 */ "db_optr ::= db_optr minrows", + /* 100 */ "db_optr ::= db_optr maxrows", + /* 101 */ "db_optr ::= db_optr blocks", + /* 102 */ "db_optr ::= db_optr ctime", + /* 103 */ "db_optr ::= db_optr wal", + /* 104 */ "db_optr ::= db_optr fsync", + /* 105 */ "db_optr ::= db_optr comp", + /* 106 */ "db_optr ::= db_optr prec", + /* 107 */ "db_optr ::= db_optr keep", + /* 108 */ "db_optr ::= db_optr update", + /* 109 */ "db_optr ::= db_optr cachelast", + /* 110 */ "topic_optr ::= db_optr", + /* 111 */ "topic_optr ::= topic_optr partitions", + /* 112 */ "alter_db_optr ::=", + /* 113 */ "alter_db_optr ::= alter_db_optr replica", + /* 114 */ "alter_db_optr ::= alter_db_optr quorum", + /* 115 */ "alter_db_optr ::= alter_db_optr keep", + /* 116 */ "alter_db_optr ::= alter_db_optr blocks", + /* 117 */ "alter_db_optr ::= alter_db_optr comp", + /* 118 */ "alter_db_optr ::= alter_db_optr wal", + /* 119 */ "alter_db_optr ::= alter_db_optr fsync", + /* 120 */ "alter_db_optr ::= alter_db_optr update", + /* 121 */ "alter_db_optr ::= alter_db_optr cachelast", + /* 122 */ "alter_topic_optr ::= alter_db_optr", + /* 123 */ "alter_topic_optr ::= alter_topic_optr partitions", + /* 124 */ "typename ::= ids", + /* 125 */ "typename ::= ids LP signed RP", + /* 126 */ "typename ::= ids UNSIGNED", + /* 127 */ "signed ::= INTEGER", + /* 128 */ "signed ::= PLUS INTEGER", + /* 129 */ "signed ::= MINUS INTEGER", + /* 130 */ "cmd ::= CREATE TABLE create_table_args", + /* 131 */ "cmd ::= CREATE TABLE create_stable_args", + /* 132 */ "cmd ::= CREATE STABLE create_stable_args", + /* 133 */ "cmd ::= CREATE TABLE create_table_list", + /* 134 */ "create_table_list ::= create_from_stable", + /* 135 */ "create_table_list ::= create_table_list create_from_stable", + /* 136 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", + /* 137 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", + /* 138 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", + /* 139 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", + /* 140 */ "tagNamelist ::= tagNamelist COMMA ids", + /* 141 */ "tagNamelist ::= ids", + /* 142 */ "create_table_args ::= ifnotexists ids cpxName AS select", + /* 143 */ "columnlist ::= columnlist COMMA column", + /* 144 */ "columnlist ::= column", + /* 145 */ "column ::= ids typename", + /* 146 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 147 */ "tagitemlist ::= tagitem", + /* 148 */ "tagitem ::= INTEGER", + /* 149 */ "tagitem ::= FLOAT", + /* 150 */ "tagitem ::= STRING", + /* 151 */ "tagitem ::= BOOL", + /* 152 */ "tagitem ::= NULL", + /* 153 */ "tagitem ::= MINUS INTEGER", + /* 154 */ "tagitem ::= MINUS FLOAT", + /* 155 */ "tagitem ::= PLUS INTEGER", + /* 156 */ "tagitem ::= PLUS FLOAT", + /* 157 */ "select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 158 */ "select ::= LP select RP", + /* 159 */ "union ::= select", + /* 160 */ "union ::= union UNION ALL select", + /* 161 */ "cmd ::= union", + /* 162 */ "select ::= SELECT selcollist", + /* 163 */ "sclp ::= selcollist COMMA", + /* 164 */ "sclp ::=", + /* 165 */ "selcollist ::= sclp distinct expr as", + /* 166 */ "selcollist ::= sclp STAR", + /* 167 */ "as ::= AS ids", + /* 168 */ "as ::= ids", + /* 169 */ "as ::=", + /* 170 */ "distinct ::= DISTINCT", + /* 171 */ "distinct ::=", + /* 172 */ "from ::= FROM tablelist", + /* 173 */ "from ::= FROM LP union RP", + /* 174 */ "tablelist ::= ids cpxName", + /* 175 */ "tablelist ::= ids cpxName ids", + /* 176 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 177 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 178 */ "tmvar ::= VARIABLE", + /* 179 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 180 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", + /* 181 */ "interval_opt ::=", + /* 182 */ "session_option ::=", + /* 183 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", + /* 184 */ "fill_opt ::=", + /* 185 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 186 */ "fill_opt ::= FILL LP ID RP", + /* 187 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 188 */ "sliding_opt ::=", + /* 189 */ "orderby_opt ::=", + /* 190 */ "orderby_opt ::= ORDER BY sortlist", + /* 191 */ "sortlist ::= sortlist COMMA item sortorder", + /* 192 */ "sortlist ::= item sortorder", + /* 193 */ "item ::= ids cpxName", + /* 194 */ "sortorder ::= ASC", + /* 195 */ "sortorder ::= DESC", + /* 196 */ "sortorder ::=", + /* 197 */ "groupby_opt ::=", + /* 198 */ "groupby_opt ::= GROUP BY grouplist", + /* 199 */ "grouplist ::= grouplist COMMA item", + /* 200 */ "grouplist ::= item", + /* 201 */ "having_opt ::=", + /* 202 */ "having_opt ::= HAVING expr", + /* 203 */ "limit_opt ::=", + /* 204 */ "limit_opt ::= LIMIT signed", + /* 205 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 206 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 207 */ "slimit_opt ::=", + /* 208 */ "slimit_opt ::= SLIMIT signed", + /* 209 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 210 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 211 */ "where_opt ::=", + /* 212 */ "where_opt ::= WHERE expr", + /* 213 */ "expr ::= LP expr RP", + /* 214 */ "expr ::= ID", + /* 215 */ "expr ::= ID DOT ID", + /* 216 */ "expr ::= ID DOT STAR", + /* 217 */ "expr ::= INTEGER", + /* 218 */ "expr ::= MINUS INTEGER", + /* 219 */ "expr ::= PLUS INTEGER", + /* 220 */ "expr ::= FLOAT", + /* 221 */ "expr ::= MINUS FLOAT", + /* 222 */ "expr ::= PLUS FLOAT", + /* 223 */ "expr ::= STRING", + /* 224 */ "expr ::= NOW", + /* 225 */ "expr ::= VARIABLE", + /* 226 */ "expr ::= PLUS VARIABLE", + /* 227 */ "expr ::= MINUS VARIABLE", + /* 228 */ "expr ::= BOOL", + /* 229 */ "expr ::= NULL", + /* 230 */ "expr ::= ID LP exprlist RP", + /* 231 */ "expr ::= ID LP STAR RP", + /* 232 */ "expr ::= expr IS NULL", + /* 233 */ "expr ::= expr IS NOT NULL", + /* 234 */ "expr ::= expr LT expr", + /* 235 */ "expr ::= expr GT expr", + /* 236 */ "expr ::= expr LE expr", + /* 237 */ "expr ::= expr GE expr", + /* 238 */ "expr ::= expr NE expr", + /* 239 */ "expr ::= expr EQ expr", + /* 240 */ "expr ::= expr BETWEEN expr AND expr", + /* 241 */ "expr ::= expr AND expr", + /* 242 */ "expr ::= expr OR expr", + /* 243 */ "expr ::= expr PLUS expr", + /* 244 */ "expr ::= expr MINUS expr", + /* 245 */ "expr ::= expr STAR expr", + /* 246 */ "expr ::= expr SLASH expr", + /* 247 */ "expr ::= expr REM expr", + /* 248 */ "expr ::= expr LIKE expr", + /* 249 */ "expr ::= expr IN LP exprlist RP", + /* 250 */ "exprlist ::= exprlist COMMA expritem", + /* 251 */ "exprlist ::= expritem", + /* 252 */ "expritem ::= expr", + /* 253 */ "expritem ::=", + /* 254 */ "cmd ::= RESET QUERY CACHE", + /* 255 */ "cmd ::= SYNCDB ids REPLICA", + /* 256 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 257 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 258 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 259 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 260 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 261 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 262 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 263 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 264 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 265 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 266 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 267 */ "cmd ::= KILL CONNECTION INTEGER", + /* 268 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 269 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1774,254 +1776,255 @@ static const struct { { 192, 0 }, /* (18) cpxName ::= */ { 192, -2 }, /* (19) cpxName ::= DOT ids */ { 189, -5 }, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 189, -4 }, /* (21) cmd ::= SHOW CREATE DATABASE ids */ - { 189, -3 }, /* (22) cmd ::= SHOW dbPrefix TABLES */ - { 189, -5 }, /* (23) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 189, -3 }, /* (24) cmd ::= SHOW dbPrefix STABLES */ - { 189, -5 }, /* (25) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 189, -3 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS */ - { 189, -4 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 189, -5 }, /* (28) cmd ::= DROP TABLE ifexists ids cpxName */ - { 189, -5 }, /* (29) cmd ::= DROP STABLE ifexists ids cpxName */ - { 189, -4 }, /* (30) cmd ::= DROP DATABASE ifexists ids */ - { 189, -4 }, /* (31) cmd ::= DROP TOPIC ifexists ids */ - { 189, -3 }, /* (32) cmd ::= DROP DNODE ids */ - { 189, -3 }, /* (33) cmd ::= DROP USER ids */ - { 189, -3 }, /* (34) cmd ::= DROP ACCOUNT ids */ - { 189, -2 }, /* (35) cmd ::= USE ids */ - { 189, -3 }, /* (36) cmd ::= DESCRIBE ids cpxName */ - { 189, -5 }, /* (37) cmd ::= ALTER USER ids PASS ids */ - { 189, -5 }, /* (38) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 189, -4 }, /* (39) cmd ::= ALTER DNODE ids ids */ - { 189, -5 }, /* (40) cmd ::= ALTER DNODE ids ids ids */ - { 189, -3 }, /* (41) cmd ::= ALTER LOCAL ids */ - { 189, -4 }, /* (42) cmd ::= ALTER LOCAL ids ids */ - { 189, -4 }, /* (43) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 189, -4 }, /* (44) cmd ::= ALTER TOPIC ids alter_topic_optr */ - { 189, -4 }, /* (45) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 189, -6 }, /* (46) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 191, -1 }, /* (47) ids ::= ID */ - { 191, -1 }, /* (48) ids ::= STRING */ - { 193, -2 }, /* (49) ifexists ::= IF EXISTS */ - { 193, 0 }, /* (50) ifexists ::= */ - { 197, -3 }, /* (51) ifnotexists ::= IF NOT EXISTS */ - { 197, 0 }, /* (52) ifnotexists ::= */ - { 189, -3 }, /* (53) cmd ::= CREATE DNODE ids */ - { 189, -6 }, /* (54) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 189, -5 }, /* (55) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 189, -5 }, /* (56) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - { 189, -5 }, /* (57) cmd ::= CREATE USER ids PASS ids */ - { 200, 0 }, /* (58) pps ::= */ - { 200, -2 }, /* (59) pps ::= PPS INTEGER */ - { 201, 0 }, /* (60) tseries ::= */ - { 201, -2 }, /* (61) tseries ::= TSERIES INTEGER */ - { 202, 0 }, /* (62) dbs ::= */ - { 202, -2 }, /* (63) dbs ::= DBS INTEGER */ - { 203, 0 }, /* (64) streams ::= */ - { 203, -2 }, /* (65) streams ::= STREAMS INTEGER */ - { 204, 0 }, /* (66) storage ::= */ - { 204, -2 }, /* (67) storage ::= STORAGE INTEGER */ - { 205, 0 }, /* (68) qtime ::= */ - { 205, -2 }, /* (69) qtime ::= QTIME INTEGER */ - { 206, 0 }, /* (70) users ::= */ - { 206, -2 }, /* (71) users ::= USERS INTEGER */ - { 207, 0 }, /* (72) conns ::= */ - { 207, -2 }, /* (73) conns ::= CONNS INTEGER */ - { 208, 0 }, /* (74) state ::= */ - { 208, -2 }, /* (75) state ::= STATE ids */ - { 196, -9 }, /* (76) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 209, -2 }, /* (77) keep ::= KEEP tagitemlist */ - { 211, -2 }, /* (78) cache ::= CACHE INTEGER */ - { 212, -2 }, /* (79) replica ::= REPLICA INTEGER */ - { 213, -2 }, /* (80) quorum ::= QUORUM INTEGER */ - { 214, -2 }, /* (81) days ::= DAYS INTEGER */ - { 215, -2 }, /* (82) minrows ::= MINROWS INTEGER */ - { 216, -2 }, /* (83) maxrows ::= MAXROWS INTEGER */ - { 217, -2 }, /* (84) blocks ::= BLOCKS INTEGER */ - { 218, -2 }, /* (85) ctime ::= CTIME INTEGER */ - { 219, -2 }, /* (86) wal ::= WAL INTEGER */ - { 220, -2 }, /* (87) fsync ::= FSYNC INTEGER */ - { 221, -2 }, /* (88) comp ::= COMP INTEGER */ - { 222, -2 }, /* (89) prec ::= PRECISION STRING */ - { 223, -2 }, /* (90) update ::= UPDATE INTEGER */ - { 224, -2 }, /* (91) cachelast ::= CACHELAST INTEGER */ - { 225, -2 }, /* (92) partitions ::= PARTITIONS INTEGER */ - { 198, 0 }, /* (93) db_optr ::= */ - { 198, -2 }, /* (94) db_optr ::= db_optr cache */ - { 198, -2 }, /* (95) db_optr ::= db_optr replica */ - { 198, -2 }, /* (96) db_optr ::= db_optr quorum */ - { 198, -2 }, /* (97) db_optr ::= db_optr days */ - { 198, -2 }, /* (98) db_optr ::= db_optr minrows */ - { 198, -2 }, /* (99) db_optr ::= db_optr maxrows */ - { 198, -2 }, /* (100) db_optr ::= db_optr blocks */ - { 198, -2 }, /* (101) db_optr ::= db_optr ctime */ - { 198, -2 }, /* (102) db_optr ::= db_optr wal */ - { 198, -2 }, /* (103) db_optr ::= db_optr fsync */ - { 198, -2 }, /* (104) db_optr ::= db_optr comp */ - { 198, -2 }, /* (105) db_optr ::= db_optr prec */ - { 198, -2 }, /* (106) db_optr ::= db_optr keep */ - { 198, -2 }, /* (107) db_optr ::= db_optr update */ - { 198, -2 }, /* (108) db_optr ::= db_optr cachelast */ - { 199, -1 }, /* (109) topic_optr ::= db_optr */ - { 199, -2 }, /* (110) topic_optr ::= topic_optr partitions */ - { 194, 0 }, /* (111) alter_db_optr ::= */ - { 194, -2 }, /* (112) alter_db_optr ::= alter_db_optr replica */ - { 194, -2 }, /* (113) alter_db_optr ::= alter_db_optr quorum */ - { 194, -2 }, /* (114) alter_db_optr ::= alter_db_optr keep */ - { 194, -2 }, /* (115) alter_db_optr ::= alter_db_optr blocks */ - { 194, -2 }, /* (116) alter_db_optr ::= alter_db_optr comp */ - { 194, -2 }, /* (117) alter_db_optr ::= alter_db_optr wal */ - { 194, -2 }, /* (118) alter_db_optr ::= alter_db_optr fsync */ - { 194, -2 }, /* (119) alter_db_optr ::= alter_db_optr update */ - { 194, -2 }, /* (120) alter_db_optr ::= alter_db_optr cachelast */ - { 195, -1 }, /* (121) alter_topic_optr ::= alter_db_optr */ - { 195, -2 }, /* (122) alter_topic_optr ::= alter_topic_optr partitions */ - { 226, -1 }, /* (123) typename ::= ids */ - { 226, -4 }, /* (124) typename ::= ids LP signed RP */ - { 226, -2 }, /* (125) typename ::= ids UNSIGNED */ - { 227, -1 }, /* (126) signed ::= INTEGER */ - { 227, -2 }, /* (127) signed ::= PLUS INTEGER */ - { 227, -2 }, /* (128) signed ::= MINUS INTEGER */ - { 189, -3 }, /* (129) cmd ::= CREATE TABLE create_table_args */ - { 189, -3 }, /* (130) cmd ::= CREATE TABLE create_stable_args */ - { 189, -3 }, /* (131) cmd ::= CREATE STABLE create_stable_args */ - { 189, -3 }, /* (132) cmd ::= CREATE TABLE create_table_list */ - { 230, -1 }, /* (133) create_table_list ::= create_from_stable */ - { 230, -2 }, /* (134) create_table_list ::= create_table_list create_from_stable */ - { 228, -6 }, /* (135) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - { 229, -10 }, /* (136) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - { 231, -10 }, /* (137) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 231, -13 }, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - { 233, -3 }, /* (139) tagNamelist ::= tagNamelist COMMA ids */ - { 233, -1 }, /* (140) tagNamelist ::= ids */ - { 228, -5 }, /* (141) create_table_args ::= ifnotexists ids cpxName AS select */ - { 232, -3 }, /* (142) columnlist ::= columnlist COMMA column */ - { 232, -1 }, /* (143) columnlist ::= column */ - { 235, -2 }, /* (144) column ::= ids typename */ - { 210, -3 }, /* (145) tagitemlist ::= tagitemlist COMMA tagitem */ - { 210, -1 }, /* (146) tagitemlist ::= tagitem */ - { 236, -1 }, /* (147) tagitem ::= INTEGER */ - { 236, -1 }, /* (148) tagitem ::= FLOAT */ - { 236, -1 }, /* (149) tagitem ::= STRING */ - { 236, -1 }, /* (150) tagitem ::= BOOL */ - { 236, -1 }, /* (151) tagitem ::= NULL */ - { 236, -2 }, /* (152) tagitem ::= MINUS INTEGER */ - { 236, -2 }, /* (153) tagitem ::= MINUS FLOAT */ - { 236, -2 }, /* (154) tagitem ::= PLUS INTEGER */ - { 236, -2 }, /* (155) tagitem ::= PLUS FLOAT */ - { 234, -13 }, /* (156) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 234, -3 }, /* (157) select ::= LP select RP */ - { 249, -1 }, /* (158) union ::= select */ - { 249, -4 }, /* (159) union ::= union UNION ALL select */ - { 189, -1 }, /* (160) cmd ::= union */ - { 234, -2 }, /* (161) select ::= SELECT selcollist */ - { 250, -2 }, /* (162) sclp ::= selcollist COMMA */ - { 250, 0 }, /* (163) sclp ::= */ - { 237, -4 }, /* (164) selcollist ::= sclp distinct expr as */ - { 237, -2 }, /* (165) selcollist ::= sclp STAR */ - { 253, -2 }, /* (166) as ::= AS ids */ - { 253, -1 }, /* (167) as ::= ids */ - { 253, 0 }, /* (168) as ::= */ - { 251, -1 }, /* (169) distinct ::= DISTINCT */ - { 251, 0 }, /* (170) distinct ::= */ - { 238, -2 }, /* (171) from ::= FROM tablelist */ - { 238, -4 }, /* (172) from ::= FROM LP union RP */ - { 254, -2 }, /* (173) tablelist ::= ids cpxName */ - { 254, -3 }, /* (174) tablelist ::= ids cpxName ids */ - { 254, -4 }, /* (175) tablelist ::= tablelist COMMA ids cpxName */ - { 254, -5 }, /* (176) tablelist ::= tablelist COMMA ids cpxName ids */ - { 255, -1 }, /* (177) tmvar ::= VARIABLE */ - { 240, -4 }, /* (178) interval_opt ::= INTERVAL LP tmvar RP */ - { 240, -6 }, /* (179) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 240, 0 }, /* (180) interval_opt ::= */ - { 241, 0 }, /* (181) session_option ::= */ - { 241, -7 }, /* (182) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - { 242, 0 }, /* (183) fill_opt ::= */ - { 242, -6 }, /* (184) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 242, -4 }, /* (185) fill_opt ::= FILL LP ID RP */ - { 243, -4 }, /* (186) sliding_opt ::= SLIDING LP tmvar RP */ - { 243, 0 }, /* (187) sliding_opt ::= */ - { 245, 0 }, /* (188) orderby_opt ::= */ - { 245, -3 }, /* (189) orderby_opt ::= ORDER BY sortlist */ - { 256, -4 }, /* (190) sortlist ::= sortlist COMMA item sortorder */ - { 256, -2 }, /* (191) sortlist ::= item sortorder */ - { 258, -2 }, /* (192) item ::= ids cpxName */ - { 259, -1 }, /* (193) sortorder ::= ASC */ - { 259, -1 }, /* (194) sortorder ::= DESC */ - { 259, 0 }, /* (195) sortorder ::= */ - { 244, 0 }, /* (196) groupby_opt ::= */ - { 244, -3 }, /* (197) groupby_opt ::= GROUP BY grouplist */ - { 260, -3 }, /* (198) grouplist ::= grouplist COMMA item */ - { 260, -1 }, /* (199) grouplist ::= item */ - { 246, 0 }, /* (200) having_opt ::= */ - { 246, -2 }, /* (201) having_opt ::= HAVING expr */ - { 248, 0 }, /* (202) limit_opt ::= */ - { 248, -2 }, /* (203) limit_opt ::= LIMIT signed */ - { 248, -4 }, /* (204) limit_opt ::= LIMIT signed OFFSET signed */ - { 248, -4 }, /* (205) limit_opt ::= LIMIT signed COMMA signed */ - { 247, 0 }, /* (206) slimit_opt ::= */ - { 247, -2 }, /* (207) slimit_opt ::= SLIMIT signed */ - { 247, -4 }, /* (208) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 247, -4 }, /* (209) slimit_opt ::= SLIMIT signed COMMA signed */ - { 239, 0 }, /* (210) where_opt ::= */ - { 239, -2 }, /* (211) where_opt ::= WHERE expr */ - { 252, -3 }, /* (212) expr ::= LP expr RP */ - { 252, -1 }, /* (213) expr ::= ID */ - { 252, -3 }, /* (214) expr ::= ID DOT ID */ - { 252, -3 }, /* (215) expr ::= ID DOT STAR */ - { 252, -1 }, /* (216) expr ::= INTEGER */ - { 252, -2 }, /* (217) expr ::= MINUS INTEGER */ - { 252, -2 }, /* (218) expr ::= PLUS INTEGER */ - { 252, -1 }, /* (219) expr ::= FLOAT */ - { 252, -2 }, /* (220) expr ::= MINUS FLOAT */ - { 252, -2 }, /* (221) expr ::= PLUS FLOAT */ - { 252, -1 }, /* (222) expr ::= STRING */ - { 252, -1 }, /* (223) expr ::= NOW */ - { 252, -1 }, /* (224) expr ::= VARIABLE */ - { 252, -2 }, /* (225) expr ::= PLUS VARIABLE */ - { 252, -2 }, /* (226) expr ::= MINUS VARIABLE */ - { 252, -1 }, /* (227) expr ::= BOOL */ - { 252, -1 }, /* (228) expr ::= NULL */ - { 252, -4 }, /* (229) expr ::= ID LP exprlist RP */ - { 252, -4 }, /* (230) expr ::= ID LP STAR RP */ - { 252, -3 }, /* (231) expr ::= expr IS NULL */ - { 252, -4 }, /* (232) expr ::= expr IS NOT NULL */ - { 252, -3 }, /* (233) expr ::= expr LT expr */ - { 252, -3 }, /* (234) expr ::= expr GT expr */ - { 252, -3 }, /* (235) expr ::= expr LE expr */ - { 252, -3 }, /* (236) expr ::= expr GE expr */ - { 252, -3 }, /* (237) expr ::= expr NE expr */ - { 252, -3 }, /* (238) expr ::= expr EQ expr */ - { 252, -5 }, /* (239) expr ::= expr BETWEEN expr AND expr */ - { 252, -3 }, /* (240) expr ::= expr AND expr */ - { 252, -3 }, /* (241) expr ::= expr OR expr */ - { 252, -3 }, /* (242) expr ::= expr PLUS expr */ - { 252, -3 }, /* (243) expr ::= expr MINUS expr */ - { 252, -3 }, /* (244) expr ::= expr STAR expr */ - { 252, -3 }, /* (245) expr ::= expr SLASH expr */ - { 252, -3 }, /* (246) expr ::= expr REM expr */ - { 252, -3 }, /* (247) expr ::= expr LIKE expr */ - { 252, -5 }, /* (248) expr ::= expr IN LP exprlist RP */ - { 261, -3 }, /* (249) exprlist ::= exprlist COMMA expritem */ - { 261, -1 }, /* (250) exprlist ::= expritem */ - { 262, -1 }, /* (251) expritem ::= expr */ - { 262, 0 }, /* (252) expritem ::= */ - { 189, -3 }, /* (253) cmd ::= RESET QUERY CACHE */ - { 189, -3 }, /* (254) cmd ::= SYNCDB ids REPLICA */ - { 189, -7 }, /* (255) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 189, -7 }, /* (256) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 189, -7 }, /* (257) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 189, -7 }, /* (258) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 189, -8 }, /* (259) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 189, -9 }, /* (260) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 189, -7 }, /* (261) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 189, -7 }, /* (262) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 189, -7 }, /* (263) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 189, -7 }, /* (264) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 189, -8 }, /* (265) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 189, -3 }, /* (266) cmd ::= KILL CONNECTION INTEGER */ - { 189, -5 }, /* (267) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 189, -5 }, /* (268) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 189, -5 }, /* (21) cmd ::= SHOW CREATE STABLE ids cpxName */ + { 189, -4 }, /* (22) cmd ::= SHOW CREATE DATABASE ids */ + { 189, -3 }, /* (23) cmd ::= SHOW dbPrefix TABLES */ + { 189, -5 }, /* (24) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 189, -3 }, /* (25) cmd ::= SHOW dbPrefix STABLES */ + { 189, -5 }, /* (26) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 189, -3 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS */ + { 189, -4 }, /* (28) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 189, -5 }, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ + { 189, -5 }, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ + { 189, -4 }, /* (31) cmd ::= DROP DATABASE ifexists ids */ + { 189, -4 }, /* (32) cmd ::= DROP TOPIC ifexists ids */ + { 189, -3 }, /* (33) cmd ::= DROP DNODE ids */ + { 189, -3 }, /* (34) cmd ::= DROP USER ids */ + { 189, -3 }, /* (35) cmd ::= DROP ACCOUNT ids */ + { 189, -2 }, /* (36) cmd ::= USE ids */ + { 189, -3 }, /* (37) cmd ::= DESCRIBE ids cpxName */ + { 189, -5 }, /* (38) cmd ::= ALTER USER ids PASS ids */ + { 189, -5 }, /* (39) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 189, -4 }, /* (40) cmd ::= ALTER DNODE ids ids */ + { 189, -5 }, /* (41) cmd ::= ALTER DNODE ids ids ids */ + { 189, -3 }, /* (42) cmd ::= ALTER LOCAL ids */ + { 189, -4 }, /* (43) cmd ::= ALTER LOCAL ids ids */ + { 189, -4 }, /* (44) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 189, -4 }, /* (45) cmd ::= ALTER TOPIC ids alter_topic_optr */ + { 189, -4 }, /* (46) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 189, -6 }, /* (47) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 191, -1 }, /* (48) ids ::= ID */ + { 191, -1 }, /* (49) ids ::= STRING */ + { 193, -2 }, /* (50) ifexists ::= IF EXISTS */ + { 193, 0 }, /* (51) ifexists ::= */ + { 197, -3 }, /* (52) ifnotexists ::= IF NOT EXISTS */ + { 197, 0 }, /* (53) ifnotexists ::= */ + { 189, -3 }, /* (54) cmd ::= CREATE DNODE ids */ + { 189, -6 }, /* (55) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 189, -5 }, /* (56) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 189, -5 }, /* (57) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + { 189, -5 }, /* (58) cmd ::= CREATE USER ids PASS ids */ + { 200, 0 }, /* (59) pps ::= */ + { 200, -2 }, /* (60) pps ::= PPS INTEGER */ + { 201, 0 }, /* (61) tseries ::= */ + { 201, -2 }, /* (62) tseries ::= TSERIES INTEGER */ + { 202, 0 }, /* (63) dbs ::= */ + { 202, -2 }, /* (64) dbs ::= DBS INTEGER */ + { 203, 0 }, /* (65) streams ::= */ + { 203, -2 }, /* (66) streams ::= STREAMS INTEGER */ + { 204, 0 }, /* (67) storage ::= */ + { 204, -2 }, /* (68) storage ::= STORAGE INTEGER */ + { 205, 0 }, /* (69) qtime ::= */ + { 205, -2 }, /* (70) qtime ::= QTIME INTEGER */ + { 206, 0 }, /* (71) users ::= */ + { 206, -2 }, /* (72) users ::= USERS INTEGER */ + { 207, 0 }, /* (73) conns ::= */ + { 207, -2 }, /* (74) conns ::= CONNS INTEGER */ + { 208, 0 }, /* (75) state ::= */ + { 208, -2 }, /* (76) state ::= STATE ids */ + { 196, -9 }, /* (77) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 209, -2 }, /* (78) keep ::= KEEP tagitemlist */ + { 211, -2 }, /* (79) cache ::= CACHE INTEGER */ + { 212, -2 }, /* (80) replica ::= REPLICA INTEGER */ + { 213, -2 }, /* (81) quorum ::= QUORUM INTEGER */ + { 214, -2 }, /* (82) days ::= DAYS INTEGER */ + { 215, -2 }, /* (83) minrows ::= MINROWS INTEGER */ + { 216, -2 }, /* (84) maxrows ::= MAXROWS INTEGER */ + { 217, -2 }, /* (85) blocks ::= BLOCKS INTEGER */ + { 218, -2 }, /* (86) ctime ::= CTIME INTEGER */ + { 219, -2 }, /* (87) wal ::= WAL INTEGER */ + { 220, -2 }, /* (88) fsync ::= FSYNC INTEGER */ + { 221, -2 }, /* (89) comp ::= COMP INTEGER */ + { 222, -2 }, /* (90) prec ::= PRECISION STRING */ + { 223, -2 }, /* (91) update ::= UPDATE INTEGER */ + { 224, -2 }, /* (92) cachelast ::= CACHELAST INTEGER */ + { 225, -2 }, /* (93) partitions ::= PARTITIONS INTEGER */ + { 198, 0 }, /* (94) db_optr ::= */ + { 198, -2 }, /* (95) db_optr ::= db_optr cache */ + { 198, -2 }, /* (96) db_optr ::= db_optr replica */ + { 198, -2 }, /* (97) db_optr ::= db_optr quorum */ + { 198, -2 }, /* (98) db_optr ::= db_optr days */ + { 198, -2 }, /* (99) db_optr ::= db_optr minrows */ + { 198, -2 }, /* (100) db_optr ::= db_optr maxrows */ + { 198, -2 }, /* (101) db_optr ::= db_optr blocks */ + { 198, -2 }, /* (102) db_optr ::= db_optr ctime */ + { 198, -2 }, /* (103) db_optr ::= db_optr wal */ + { 198, -2 }, /* (104) db_optr ::= db_optr fsync */ + { 198, -2 }, /* (105) db_optr ::= db_optr comp */ + { 198, -2 }, /* (106) db_optr ::= db_optr prec */ + { 198, -2 }, /* (107) db_optr ::= db_optr keep */ + { 198, -2 }, /* (108) db_optr ::= db_optr update */ + { 198, -2 }, /* (109) db_optr ::= db_optr cachelast */ + { 199, -1 }, /* (110) topic_optr ::= db_optr */ + { 199, -2 }, /* (111) topic_optr ::= topic_optr partitions */ + { 194, 0 }, /* (112) alter_db_optr ::= */ + { 194, -2 }, /* (113) alter_db_optr ::= alter_db_optr replica */ + { 194, -2 }, /* (114) alter_db_optr ::= alter_db_optr quorum */ + { 194, -2 }, /* (115) alter_db_optr ::= alter_db_optr keep */ + { 194, -2 }, /* (116) alter_db_optr ::= alter_db_optr blocks */ + { 194, -2 }, /* (117) alter_db_optr ::= alter_db_optr comp */ + { 194, -2 }, /* (118) alter_db_optr ::= alter_db_optr wal */ + { 194, -2 }, /* (119) alter_db_optr ::= alter_db_optr fsync */ + { 194, -2 }, /* (120) alter_db_optr ::= alter_db_optr update */ + { 194, -2 }, /* (121) alter_db_optr ::= alter_db_optr cachelast */ + { 195, -1 }, /* (122) alter_topic_optr ::= alter_db_optr */ + { 195, -2 }, /* (123) alter_topic_optr ::= alter_topic_optr partitions */ + { 226, -1 }, /* (124) typename ::= ids */ + { 226, -4 }, /* (125) typename ::= ids LP signed RP */ + { 226, -2 }, /* (126) typename ::= ids UNSIGNED */ + { 227, -1 }, /* (127) signed ::= INTEGER */ + { 227, -2 }, /* (128) signed ::= PLUS INTEGER */ + { 227, -2 }, /* (129) signed ::= MINUS INTEGER */ + { 189, -3 }, /* (130) cmd ::= CREATE TABLE create_table_args */ + { 189, -3 }, /* (131) cmd ::= CREATE TABLE create_stable_args */ + { 189, -3 }, /* (132) cmd ::= CREATE STABLE create_stable_args */ + { 189, -3 }, /* (133) cmd ::= CREATE TABLE create_table_list */ + { 230, -1 }, /* (134) create_table_list ::= create_from_stable */ + { 230, -2 }, /* (135) create_table_list ::= create_table_list create_from_stable */ + { 228, -6 }, /* (136) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + { 229, -10 }, /* (137) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + { 231, -10 }, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + { 231, -13 }, /* (139) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + { 233, -3 }, /* (140) tagNamelist ::= tagNamelist COMMA ids */ + { 233, -1 }, /* (141) tagNamelist ::= ids */ + { 228, -5 }, /* (142) create_table_args ::= ifnotexists ids cpxName AS select */ + { 232, -3 }, /* (143) columnlist ::= columnlist COMMA column */ + { 232, -1 }, /* (144) columnlist ::= column */ + { 235, -2 }, /* (145) column ::= ids typename */ + { 210, -3 }, /* (146) tagitemlist ::= tagitemlist COMMA tagitem */ + { 210, -1 }, /* (147) tagitemlist ::= tagitem */ + { 236, -1 }, /* (148) tagitem ::= INTEGER */ + { 236, -1 }, /* (149) tagitem ::= FLOAT */ + { 236, -1 }, /* (150) tagitem ::= STRING */ + { 236, -1 }, /* (151) tagitem ::= BOOL */ + { 236, -1 }, /* (152) tagitem ::= NULL */ + { 236, -2 }, /* (153) tagitem ::= MINUS INTEGER */ + { 236, -2 }, /* (154) tagitem ::= MINUS FLOAT */ + { 236, -2 }, /* (155) tagitem ::= PLUS INTEGER */ + { 236, -2 }, /* (156) tagitem ::= PLUS FLOAT */ + { 234, -13 }, /* (157) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 234, -3 }, /* (158) select ::= LP select RP */ + { 249, -1 }, /* (159) union ::= select */ + { 249, -4 }, /* (160) union ::= union UNION ALL select */ + { 189, -1 }, /* (161) cmd ::= union */ + { 234, -2 }, /* (162) select ::= SELECT selcollist */ + { 250, -2 }, /* (163) sclp ::= selcollist COMMA */ + { 250, 0 }, /* (164) sclp ::= */ + { 237, -4 }, /* (165) selcollist ::= sclp distinct expr as */ + { 237, -2 }, /* (166) selcollist ::= sclp STAR */ + { 253, -2 }, /* (167) as ::= AS ids */ + { 253, -1 }, /* (168) as ::= ids */ + { 253, 0 }, /* (169) as ::= */ + { 251, -1 }, /* (170) distinct ::= DISTINCT */ + { 251, 0 }, /* (171) distinct ::= */ + { 238, -2 }, /* (172) from ::= FROM tablelist */ + { 238, -4 }, /* (173) from ::= FROM LP union RP */ + { 254, -2 }, /* (174) tablelist ::= ids cpxName */ + { 254, -3 }, /* (175) tablelist ::= ids cpxName ids */ + { 254, -4 }, /* (176) tablelist ::= tablelist COMMA ids cpxName */ + { 254, -5 }, /* (177) tablelist ::= tablelist COMMA ids cpxName ids */ + { 255, -1 }, /* (178) tmvar ::= VARIABLE */ + { 240, -4 }, /* (179) interval_opt ::= INTERVAL LP tmvar RP */ + { 240, -6 }, /* (180) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 240, 0 }, /* (181) interval_opt ::= */ + { 241, 0 }, /* (182) session_option ::= */ + { 241, -7 }, /* (183) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + { 242, 0 }, /* (184) fill_opt ::= */ + { 242, -6 }, /* (185) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 242, -4 }, /* (186) fill_opt ::= FILL LP ID RP */ + { 243, -4 }, /* (187) sliding_opt ::= SLIDING LP tmvar RP */ + { 243, 0 }, /* (188) sliding_opt ::= */ + { 245, 0 }, /* (189) orderby_opt ::= */ + { 245, -3 }, /* (190) orderby_opt ::= ORDER BY sortlist */ + { 256, -4 }, /* (191) sortlist ::= sortlist COMMA item sortorder */ + { 256, -2 }, /* (192) sortlist ::= item sortorder */ + { 258, -2 }, /* (193) item ::= ids cpxName */ + { 259, -1 }, /* (194) sortorder ::= ASC */ + { 259, -1 }, /* (195) sortorder ::= DESC */ + { 259, 0 }, /* (196) sortorder ::= */ + { 244, 0 }, /* (197) groupby_opt ::= */ + { 244, -3 }, /* (198) groupby_opt ::= GROUP BY grouplist */ + { 260, -3 }, /* (199) grouplist ::= grouplist COMMA item */ + { 260, -1 }, /* (200) grouplist ::= item */ + { 246, 0 }, /* (201) having_opt ::= */ + { 246, -2 }, /* (202) having_opt ::= HAVING expr */ + { 248, 0 }, /* (203) limit_opt ::= */ + { 248, -2 }, /* (204) limit_opt ::= LIMIT signed */ + { 248, -4 }, /* (205) limit_opt ::= LIMIT signed OFFSET signed */ + { 248, -4 }, /* (206) limit_opt ::= LIMIT signed COMMA signed */ + { 247, 0 }, /* (207) slimit_opt ::= */ + { 247, -2 }, /* (208) slimit_opt ::= SLIMIT signed */ + { 247, -4 }, /* (209) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 247, -4 }, /* (210) slimit_opt ::= SLIMIT signed COMMA signed */ + { 239, 0 }, /* (211) where_opt ::= */ + { 239, -2 }, /* (212) where_opt ::= WHERE expr */ + { 252, -3 }, /* (213) expr ::= LP expr RP */ + { 252, -1 }, /* (214) expr ::= ID */ + { 252, -3 }, /* (215) expr ::= ID DOT ID */ + { 252, -3 }, /* (216) expr ::= ID DOT STAR */ + { 252, -1 }, /* (217) expr ::= INTEGER */ + { 252, -2 }, /* (218) expr ::= MINUS INTEGER */ + { 252, -2 }, /* (219) expr ::= PLUS INTEGER */ + { 252, -1 }, /* (220) expr ::= FLOAT */ + { 252, -2 }, /* (221) expr ::= MINUS FLOAT */ + { 252, -2 }, /* (222) expr ::= PLUS FLOAT */ + { 252, -1 }, /* (223) expr ::= STRING */ + { 252, -1 }, /* (224) expr ::= NOW */ + { 252, -1 }, /* (225) expr ::= VARIABLE */ + { 252, -2 }, /* (226) expr ::= PLUS VARIABLE */ + { 252, -2 }, /* (227) expr ::= MINUS VARIABLE */ + { 252, -1 }, /* (228) expr ::= BOOL */ + { 252, -1 }, /* (229) expr ::= NULL */ + { 252, -4 }, /* (230) expr ::= ID LP exprlist RP */ + { 252, -4 }, /* (231) expr ::= ID LP STAR RP */ + { 252, -3 }, /* (232) expr ::= expr IS NULL */ + { 252, -4 }, /* (233) expr ::= expr IS NOT NULL */ + { 252, -3 }, /* (234) expr ::= expr LT expr */ + { 252, -3 }, /* (235) expr ::= expr GT expr */ + { 252, -3 }, /* (236) expr ::= expr LE expr */ + { 252, -3 }, /* (237) expr ::= expr GE expr */ + { 252, -3 }, /* (238) expr ::= expr NE expr */ + { 252, -3 }, /* (239) expr ::= expr EQ expr */ + { 252, -5 }, /* (240) expr ::= expr BETWEEN expr AND expr */ + { 252, -3 }, /* (241) expr ::= expr AND expr */ + { 252, -3 }, /* (242) expr ::= expr OR expr */ + { 252, -3 }, /* (243) expr ::= expr PLUS expr */ + { 252, -3 }, /* (244) expr ::= expr MINUS expr */ + { 252, -3 }, /* (245) expr ::= expr STAR expr */ + { 252, -3 }, /* (246) expr ::= expr SLASH expr */ + { 252, -3 }, /* (247) expr ::= expr REM expr */ + { 252, -3 }, /* (248) expr ::= expr LIKE expr */ + { 252, -5 }, /* (249) expr ::= expr IN LP exprlist RP */ + { 261, -3 }, /* (250) exprlist ::= exprlist COMMA expritem */ + { 261, -1 }, /* (251) exprlist ::= expritem */ + { 262, -1 }, /* (252) expritem ::= expr */ + { 262, 0 }, /* (253) expritem ::= */ + { 189, -3 }, /* (254) cmd ::= RESET QUERY CACHE */ + { 189, -3 }, /* (255) cmd ::= SYNCDB ids REPLICA */ + { 189, -7 }, /* (256) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 189, -7 }, /* (257) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 189, -7 }, /* (258) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 189, -7 }, /* (259) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 189, -8 }, /* (260) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 189, -9 }, /* (261) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 189, -7 }, /* (262) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + { 189, -7 }, /* (263) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + { 189, -7 }, /* (264) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + { 189, -7 }, /* (265) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + { 189, -8 }, /* (266) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + { 189, -3 }, /* (267) cmd ::= KILL CONNECTION INTEGER */ + { 189, -5 }, /* (268) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 189, -5 }, /* (269) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2102,9 +2105,9 @@ static void yy_reduce( /********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* program ::= cmd */ - case 129: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==129); - case 130: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==130); - case 131: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==131); + case 130: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==130); + case 131: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==131); + case 132: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==132); {} break; case 1: /* cmd ::= SHOW DATABASES */ @@ -2171,163 +2174,169 @@ static void yy_reduce( setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 21: /* cmd ::= SHOW CREATE DATABASE ids */ + case 21: /* cmd ::= SHOW CREATE STABLE ids cpxName */ +{ + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &yymsp[-1].minor.yy0); +} + break; + case 22: /* cmd ::= SHOW CREATE DATABASE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0); } break; - case 22: /* cmd ::= SHOW dbPrefix TABLES */ + case 23: /* cmd ::= SHOW dbPrefix TABLES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } break; - case 23: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ + case 24: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } break; - case 24: /* cmd ::= SHOW dbPrefix STABLES */ + case 25: /* cmd ::= SHOW dbPrefix STABLES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } break; - case 25: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ + case 26: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ { SStrToken token; tSetDbName(&token, &yymsp[-3].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } break; - case 26: /* cmd ::= SHOW dbPrefix VGROUPS */ + case 27: /* cmd ::= SHOW dbPrefix VGROUPS */ { SStrToken token; tSetDbName(&token, &yymsp[-1].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } break; - case 27: /* cmd ::= SHOW dbPrefix VGROUPS ids */ + case 28: /* cmd ::= SHOW dbPrefix VGROUPS ids */ { SStrToken token; tSetDbName(&token, &yymsp[-2].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } break; - case 28: /* cmd ::= DROP TABLE ifexists ids cpxName */ + case 29: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1); } break; - case 29: /* cmd ::= DROP STABLE ifexists ids cpxName */ + case 30: /* cmd ::= DROP STABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE); } break; - case 30: /* cmd ::= DROP DATABASE ifexists ids */ + case 31: /* cmd ::= DROP DATABASE ifexists ids */ { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); } break; - case 31: /* cmd ::= DROP TOPIC ifexists ids */ + case 32: /* cmd ::= DROP TOPIC ifexists ids */ { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); } break; - case 32: /* cmd ::= DROP DNODE ids */ + case 33: /* cmd ::= DROP DNODE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } break; - case 33: /* cmd ::= DROP USER ids */ + case 34: /* cmd ::= DROP USER ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } break; - case 34: /* cmd ::= DROP ACCOUNT ids */ + case 35: /* cmd ::= DROP ACCOUNT ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } break; - case 35: /* cmd ::= USE ids */ + case 36: /* cmd ::= USE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} break; - case 36: /* cmd ::= DESCRIBE ids cpxName */ + case 37: /* cmd ::= DESCRIBE ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 37: /* cmd ::= ALTER USER ids PASS ids */ + case 38: /* cmd ::= ALTER USER ids PASS ids */ { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } break; - case 38: /* cmd ::= ALTER USER ids PRIVILEGE ids */ + case 39: /* cmd ::= ALTER USER ids PRIVILEGE ids */ { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} break; - case 39: /* cmd ::= ALTER DNODE ids ids */ + case 40: /* cmd ::= ALTER DNODE ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 40: /* cmd ::= ALTER DNODE ids ids ids */ + case 41: /* cmd ::= ALTER DNODE ids ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 41: /* cmd ::= ALTER LOCAL ids */ + case 42: /* cmd ::= ALTER LOCAL ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } break; - case 42: /* cmd ::= ALTER LOCAL ids ids */ + case 43: /* cmd ::= ALTER LOCAL ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 43: /* cmd ::= ALTER DATABASE ids alter_db_optr */ - case 44: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==44); + case 44: /* cmd ::= ALTER DATABASE ids alter_db_optr */ + case 45: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==45); { SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &t);} break; - case 45: /* cmd ::= ALTER ACCOUNT ids acct_optr */ + case 46: /* cmd ::= ALTER ACCOUNT ids acct_optr */ { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy351);} break; - case 46: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + case 47: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy351);} break; - case 47: /* ids ::= ID */ - case 48: /* ids ::= STRING */ yytestcase(yyruleno==48); + case 48: /* ids ::= ID */ + case 49: /* ids ::= STRING */ yytestcase(yyruleno==49); {yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 49: /* ifexists ::= IF EXISTS */ + case 50: /* ifexists ::= IF EXISTS */ { yymsp[-1].minor.yy0.n = 1;} break; - case 50: /* ifexists ::= */ - case 52: /* ifnotexists ::= */ yytestcase(yyruleno==52); - case 170: /* distinct ::= */ yytestcase(yyruleno==170); + case 51: /* ifexists ::= */ + case 53: /* ifnotexists ::= */ yytestcase(yyruleno==53); + case 171: /* distinct ::= */ yytestcase(yyruleno==171); { yymsp[1].minor.yy0.n = 0;} break; - case 51: /* ifnotexists ::= IF NOT EXISTS */ + case 52: /* ifnotexists ::= IF NOT EXISTS */ { yymsp[-2].minor.yy0.n = 1;} break; - case 53: /* cmd ::= CREATE DNODE ids */ + case 54: /* cmd ::= CREATE DNODE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; - case 54: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + case 55: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ { setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy351);} break; - case 55: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - case 56: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==56); + case 56: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + case 57: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==57); { setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &yymsp[-2].minor.yy0);} break; - case 57: /* cmd ::= CREATE USER ids PASS ids */ + case 58: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; - case 58: /* pps ::= */ - case 60: /* tseries ::= */ yytestcase(yyruleno==60); - case 62: /* dbs ::= */ yytestcase(yyruleno==62); - case 64: /* streams ::= */ yytestcase(yyruleno==64); - case 66: /* storage ::= */ yytestcase(yyruleno==66); - case 68: /* qtime ::= */ yytestcase(yyruleno==68); - case 70: /* users ::= */ yytestcase(yyruleno==70); - case 72: /* conns ::= */ yytestcase(yyruleno==72); - case 74: /* state ::= */ yytestcase(yyruleno==74); + case 59: /* pps ::= */ + case 61: /* tseries ::= */ yytestcase(yyruleno==61); + case 63: /* dbs ::= */ yytestcase(yyruleno==63); + case 65: /* streams ::= */ yytestcase(yyruleno==65); + case 67: /* storage ::= */ yytestcase(yyruleno==67); + case 69: /* qtime ::= */ yytestcase(yyruleno==69); + case 71: /* users ::= */ yytestcase(yyruleno==71); + case 73: /* conns ::= */ yytestcase(yyruleno==73); + case 75: /* state ::= */ yytestcase(yyruleno==75); { yymsp[1].minor.yy0.n = 0; } break; - case 59: /* pps ::= PPS INTEGER */ - case 61: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==61); - case 63: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==63); - case 65: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==65); - case 67: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==67); - case 69: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==69); - case 71: /* users ::= USERS INTEGER */ yytestcase(yyruleno==71); - case 73: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==73); - case 75: /* state ::= STATE ids */ yytestcase(yyruleno==75); + case 60: /* pps ::= PPS INTEGER */ + case 62: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==62); + case 64: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==64); + case 66: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==66); + case 68: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==68); + case 70: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==70); + case 72: /* users ::= USERS INTEGER */ yytestcase(yyruleno==72); + case 74: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==74); + case 76: /* state ::= STATE ids */ yytestcase(yyruleno==76); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 76: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + case 77: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { yylhsminor.yy351.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; yylhsminor.yy351.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; @@ -2341,119 +2350,119 @@ static void yy_reduce( } yymsp[-8].minor.yy351 = yylhsminor.yy351; break; - case 77: /* keep ::= KEEP tagitemlist */ + case 78: /* keep ::= KEEP tagitemlist */ { yymsp[-1].minor.yy159 = yymsp[0].minor.yy159; } break; - case 78: /* cache ::= CACHE INTEGER */ - case 79: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==79); - case 80: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==80); - case 81: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==81); - case 82: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==82); - case 83: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==83); - case 84: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==84); - case 85: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==85); - case 86: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==86); - case 87: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==87); - case 88: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==88); - case 89: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==89); - case 90: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==90); - case 91: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==91); - case 92: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==92); + case 79: /* cache ::= CACHE INTEGER */ + case 80: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==80); + case 81: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==81); + case 82: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==82); + case 83: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==83); + case 84: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==84); + case 85: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==85); + case 86: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==86); + case 87: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==87); + case 88: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==88); + case 89: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==89); + case 90: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==90); + case 91: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==91); + case 92: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==92); + case 93: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==93); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 93: /* db_optr ::= */ + case 94: /* db_optr ::= */ {setDefaultCreateDbOption(&yymsp[1].minor.yy322); yymsp[1].minor.yy322.dbType = TSDB_DB_TYPE_DEFAULT;} break; - case 94: /* db_optr ::= db_optr cache */ + case 95: /* db_optr ::= db_optr cache */ { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 95: /* db_optr ::= db_optr replica */ - case 112: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==112); + case 96: /* db_optr ::= db_optr replica */ + case 113: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==113); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 96: /* db_optr ::= db_optr quorum */ - case 113: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==113); + case 97: /* db_optr ::= db_optr quorum */ + case 114: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==114); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 97: /* db_optr ::= db_optr days */ + case 98: /* db_optr ::= db_optr days */ { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 98: /* db_optr ::= db_optr minrows */ + case 99: /* db_optr ::= db_optr minrows */ { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 99: /* db_optr ::= db_optr maxrows */ + case 100: /* db_optr ::= db_optr maxrows */ { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 100: /* db_optr ::= db_optr blocks */ - case 115: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==115); + case 101: /* db_optr ::= db_optr blocks */ + case 116: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==116); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 101: /* db_optr ::= db_optr ctime */ + case 102: /* db_optr ::= db_optr ctime */ { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 102: /* db_optr ::= db_optr wal */ - case 117: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==117); + case 103: /* db_optr ::= db_optr wal */ + case 118: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==118); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 103: /* db_optr ::= db_optr fsync */ - case 118: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==118); + case 104: /* db_optr ::= db_optr fsync */ + case 119: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==119); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 104: /* db_optr ::= db_optr comp */ - case 116: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==116); + case 105: /* db_optr ::= db_optr comp */ + case 117: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==117); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 105: /* db_optr ::= db_optr prec */ + case 106: /* db_optr ::= db_optr prec */ { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.precision = yymsp[0].minor.yy0; } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 106: /* db_optr ::= db_optr keep */ - case 114: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==114); + case 107: /* db_optr ::= db_optr keep */ + case 115: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==115); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.keep = yymsp[0].minor.yy159; } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 107: /* db_optr ::= db_optr update */ - case 119: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==119); + case 108: /* db_optr ::= db_optr update */ + case 120: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==120); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 108: /* db_optr ::= db_optr cachelast */ - case 120: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==120); + case 109: /* db_optr ::= db_optr cachelast */ + case 121: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==121); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 109: /* topic_optr ::= db_optr */ - case 121: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==121); + case 110: /* topic_optr ::= db_optr */ + case 122: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==122); { yylhsminor.yy322 = yymsp[0].minor.yy322; yylhsminor.yy322.dbType = TSDB_DB_TYPE_TOPIC; } yymsp[0].minor.yy322 = yylhsminor.yy322; break; - case 110: /* topic_optr ::= topic_optr partitions */ - case 122: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==122); + case 111: /* topic_optr ::= topic_optr partitions */ + case 123: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==123); { yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy322 = yylhsminor.yy322; break; - case 111: /* alter_db_optr ::= */ + case 112: /* alter_db_optr ::= */ { setDefaultCreateDbOption(&yymsp[1].minor.yy322); yymsp[1].minor.yy322.dbType = TSDB_DB_TYPE_DEFAULT;} break; - case 123: /* typename ::= ids */ + case 124: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; tSetColumnType (&yylhsminor.yy407, &yymsp[0].minor.yy0); } yymsp[0].minor.yy407 = yylhsminor.yy407; break; - case 124: /* typename ::= ids LP signed RP */ + case 125: /* typename ::= ids LP signed RP */ { if (yymsp[-1].minor.yy317 <= 0) { yymsp[-3].minor.yy0.type = 0; @@ -2465,7 +2474,7 @@ static void yy_reduce( } yymsp[-3].minor.yy407 = yylhsminor.yy407; break; - case 125: /* typename ::= ids UNSIGNED */ + case 126: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); @@ -2473,20 +2482,20 @@ static void yy_reduce( } yymsp[-1].minor.yy407 = yylhsminor.yy407; break; - case 126: /* signed ::= INTEGER */ + case 127: /* signed ::= INTEGER */ { yylhsminor.yy317 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[0].minor.yy317 = yylhsminor.yy317; break; - case 127: /* signed ::= PLUS INTEGER */ + case 128: /* signed ::= PLUS INTEGER */ { yymsp[-1].minor.yy317 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 128: /* signed ::= MINUS INTEGER */ + case 129: /* signed ::= MINUS INTEGER */ { yymsp[-1].minor.yy317 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 132: /* cmd ::= CREATE TABLE create_table_list */ + case 133: /* cmd ::= CREATE TABLE create_table_list */ { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy14;} break; - case 133: /* create_table_list ::= create_from_stable */ + case 134: /* create_table_list ::= create_from_stable */ { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); @@ -2497,14 +2506,14 @@ static void yy_reduce( } yymsp[0].minor.yy14 = yylhsminor.yy14; break; - case 134: /* create_table_list ::= create_table_list create_from_stable */ + case 135: /* create_table_list ::= create_table_list create_from_stable */ { taosArrayPush(yymsp[-1].minor.yy14->childTableInfo, &yymsp[0].minor.yy206); yylhsminor.yy14 = yymsp[-1].minor.yy14; } yymsp[-1].minor.yy14 = yylhsminor.yy14; break; - case 135: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + case 136: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { yylhsminor.yy14 = tSetCreateTableInfo(yymsp[-1].minor.yy159, NULL, NULL, TSQL_CREATE_TABLE); setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); @@ -2514,7 +2523,7 @@ static void yy_reduce( } yymsp[-5].minor.yy14 = yylhsminor.yy14; break; - case 136: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + case 137: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { yylhsminor.yy14 = tSetCreateTableInfo(yymsp[-5].minor.yy159, yymsp[-1].minor.yy159, NULL, TSQL_CREATE_STABLE); setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); @@ -2524,7 +2533,7 @@ static void yy_reduce( } yymsp[-9].minor.yy14 = yylhsminor.yy14; break; - case 137: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + case 138: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; @@ -2532,7 +2541,7 @@ static void yy_reduce( } yymsp[-9].minor.yy206 = yylhsminor.yy206; break; - case 138: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + case 139: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; @@ -2540,15 +2549,15 @@ static void yy_reduce( } yymsp[-12].minor.yy206 = yylhsminor.yy206; break; - case 139: /* tagNamelist ::= tagNamelist COMMA ids */ + case 140: /* tagNamelist ::= tagNamelist COMMA ids */ {taosArrayPush(yymsp[-2].minor.yy159, &yymsp[0].minor.yy0); yylhsminor.yy159 = yymsp[-2].minor.yy159; } yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 140: /* tagNamelist ::= ids */ + case 141: /* tagNamelist ::= ids */ {yylhsminor.yy159 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy159, &yymsp[0].minor.yy0);} yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 141: /* create_table_args ::= ifnotexists ids cpxName AS select */ + case 142: /* create_table_args ::= ifnotexists ids cpxName AS select */ { yylhsminor.yy14 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy116, TSQL_CREATE_STREAM); setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); @@ -2558,43 +2567,43 @@ static void yy_reduce( } yymsp[-4].minor.yy14 = yylhsminor.yy14; break; - case 142: /* columnlist ::= columnlist COMMA column */ + case 143: /* columnlist ::= columnlist COMMA column */ {taosArrayPush(yymsp[-2].minor.yy159, &yymsp[0].minor.yy407); yylhsminor.yy159 = yymsp[-2].minor.yy159; } yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 143: /* columnlist ::= column */ + case 144: /* columnlist ::= column */ {yylhsminor.yy159 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy159, &yymsp[0].minor.yy407);} yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 144: /* column ::= ids typename */ + case 145: /* column ::= ids typename */ { tSetColumnInfo(&yylhsminor.yy407, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy407); } yymsp[-1].minor.yy407 = yylhsminor.yy407; break; - case 145: /* tagitemlist ::= tagitemlist COMMA tagitem */ + case 146: /* tagitemlist ::= tagitemlist COMMA tagitem */ { yylhsminor.yy159 = tVariantListAppend(yymsp[-2].minor.yy159, &yymsp[0].minor.yy488, -1); } yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 146: /* tagitemlist ::= tagitem */ + case 147: /* tagitemlist ::= tagitem */ { yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[0].minor.yy488, -1); } yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 147: /* tagitem ::= INTEGER */ - case 148: /* tagitem ::= FLOAT */ yytestcase(yyruleno==148); - case 149: /* tagitem ::= STRING */ yytestcase(yyruleno==149); - case 150: /* tagitem ::= BOOL */ yytestcase(yyruleno==150); + case 148: /* tagitem ::= INTEGER */ + case 149: /* tagitem ::= FLOAT */ yytestcase(yyruleno==149); + case 150: /* tagitem ::= STRING */ yytestcase(yyruleno==150); + case 151: /* tagitem ::= BOOL */ yytestcase(yyruleno==151); { toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy488, &yymsp[0].minor.yy0); } yymsp[0].minor.yy488 = yylhsminor.yy488; break; - case 151: /* tagitem ::= NULL */ + case 152: /* tagitem ::= NULL */ { yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy488, &yymsp[0].minor.yy0); } yymsp[0].minor.yy488 = yylhsminor.yy488; break; - case 152: /* tagitem ::= MINUS INTEGER */ - case 153: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==153); - case 154: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==154); - case 155: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==155); + case 153: /* tagitem ::= MINUS INTEGER */ + case 154: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==154); + case 155: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==155); + case 156: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==156); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; @@ -2603,128 +2612,128 @@ static void yy_reduce( } yymsp[-1].minor.yy488 = yylhsminor.yy488; break; - case 156: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 157: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy159, yymsp[-10].minor.yy236, yymsp[-9].minor.yy118, yymsp[-4].minor.yy159, yymsp[-3].minor.yy159, &yymsp[-8].minor.yy184, &yymsp[-7].minor.yy249, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy159, &yymsp[0].minor.yy440, &yymsp[-1].minor.yy440, yymsp[-2].minor.yy118); } yymsp[-12].minor.yy116 = yylhsminor.yy116; break; - case 157: /* select ::= LP select RP */ + case 158: /* select ::= LP select RP */ {yymsp[-2].minor.yy116 = yymsp[-1].minor.yy116;} break; - case 158: /* union ::= select */ + case 159: /* union ::= select */ { yylhsminor.yy159 = setSubclause(NULL, yymsp[0].minor.yy116); } yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 159: /* union ::= union UNION ALL select */ + case 160: /* union ::= union UNION ALL select */ { yylhsminor.yy159 = appendSelectClause(yymsp[-3].minor.yy159, yymsp[0].minor.yy116); } yymsp[-3].minor.yy159 = yylhsminor.yy159; break; - case 160: /* cmd ::= union */ + case 161: /* cmd ::= union */ { setSqlInfo(pInfo, yymsp[0].minor.yy159, NULL, TSDB_SQL_SELECT); } break; - case 161: /* select ::= SELECT selcollist */ + case 162: /* select ::= SELECT selcollist */ { yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy159, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } yymsp[-1].minor.yy116 = yylhsminor.yy116; break; - case 162: /* sclp ::= selcollist COMMA */ + case 163: /* sclp ::= selcollist COMMA */ {yylhsminor.yy159 = yymsp[-1].minor.yy159;} yymsp[-1].minor.yy159 = yylhsminor.yy159; break; - case 163: /* sclp ::= */ - case 188: /* orderby_opt ::= */ yytestcase(yyruleno==188); + case 164: /* sclp ::= */ + case 189: /* orderby_opt ::= */ yytestcase(yyruleno==189); {yymsp[1].minor.yy159 = 0;} break; - case 164: /* selcollist ::= sclp distinct expr as */ + case 165: /* selcollist ::= sclp distinct expr as */ { yylhsminor.yy159 = tSqlExprListAppend(yymsp[-3].minor.yy159, yymsp[-1].minor.yy118, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } yymsp[-3].minor.yy159 = yylhsminor.yy159; break; - case 165: /* selcollist ::= sclp STAR */ + case 166: /* selcollist ::= sclp STAR */ { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); yylhsminor.yy159 = tSqlExprListAppend(yymsp[-1].minor.yy159, pNode, 0, 0); } yymsp[-1].minor.yy159 = yylhsminor.yy159; break; - case 166: /* as ::= AS ids */ + case 167: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 167: /* as ::= ids */ + case 168: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 168: /* as ::= */ + case 169: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 169: /* distinct ::= DISTINCT */ + case 170: /* distinct ::= DISTINCT */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 171: /* from ::= FROM tablelist */ + case 172: /* from ::= FROM tablelist */ {yymsp[-1].minor.yy236 = yymsp[0].minor.yy236;} break; - case 172: /* from ::= FROM LP union RP */ + case 173: /* from ::= FROM LP union RP */ {yymsp[-3].minor.yy236 = setSubquery(NULL, yymsp[-1].minor.yy159);} break; - case 173: /* tablelist ::= ids cpxName */ + case 174: /* tablelist ::= ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy236 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } yymsp[-1].minor.yy236 = yylhsminor.yy236; break; - case 174: /* tablelist ::= ids cpxName ids */ + case 175: /* tablelist ::= ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy236 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy236 = yylhsminor.yy236; break; - case 175: /* tablelist ::= tablelist COMMA ids cpxName */ + case 176: /* tablelist ::= tablelist COMMA ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy236 = setTableNameList(yymsp[-3].minor.yy236, &yymsp[-1].minor.yy0, NULL); } yymsp[-3].minor.yy236 = yylhsminor.yy236; break; - case 176: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 177: /* tablelist ::= tablelist COMMA ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy236 = setTableNameList(yymsp[-4].minor.yy236, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } yymsp[-4].minor.yy236 = yylhsminor.yy236; break; - case 177: /* tmvar ::= VARIABLE */ + case 178: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 178: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 179: /* interval_opt ::= INTERVAL LP tmvar RP */ {yymsp[-3].minor.yy184.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy184.offset.n = 0;} break; - case 179: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + case 180: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ {yymsp[-5].minor.yy184.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy184.offset = yymsp[-1].minor.yy0;} break; - case 180: /* interval_opt ::= */ + case 181: /* interval_opt ::= */ {memset(&yymsp[1].minor.yy184, 0, sizeof(yymsp[1].minor.yy184));} break; - case 181: /* session_option ::= */ + case 182: /* session_option ::= */ {yymsp[1].minor.yy249.col.n = 0; yymsp[1].minor.yy249.gap.n = 0;} break; - case 182: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + case 183: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; yymsp[-6].minor.yy249.col = yymsp[-4].minor.yy0; yymsp[-6].minor.yy249.gap = yymsp[-1].minor.yy0; } break; - case 183: /* fill_opt ::= */ + case 184: /* fill_opt ::= */ { yymsp[1].minor.yy159 = 0; } break; - case 184: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 185: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); @@ -2734,34 +2743,34 @@ static void yy_reduce( yymsp[-5].minor.yy159 = yymsp[-1].minor.yy159; } break; - case 185: /* fill_opt ::= FILL LP ID RP */ + case 186: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-3].minor.yy159 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 186: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 187: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 187: /* sliding_opt ::= */ + case 188: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 189: /* orderby_opt ::= ORDER BY sortlist */ + case 190: /* orderby_opt ::= ORDER BY sortlist */ {yymsp[-2].minor.yy159 = yymsp[0].minor.yy159;} break; - case 190: /* sortlist ::= sortlist COMMA item sortorder */ + case 191: /* sortlist ::= sortlist COMMA item sortorder */ { yylhsminor.yy159 = tVariantListAppend(yymsp[-3].minor.yy159, &yymsp[-1].minor.yy488, yymsp[0].minor.yy20); } yymsp[-3].minor.yy159 = yylhsminor.yy159; break; - case 191: /* sortlist ::= item sortorder */ + case 192: /* sortlist ::= item sortorder */ { yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[-1].minor.yy488, yymsp[0].minor.yy20); } yymsp[-1].minor.yy159 = yylhsminor.yy159; break; - case 192: /* item ::= ids cpxName */ + case 193: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2770,227 +2779,227 @@ static void yy_reduce( } yymsp[-1].minor.yy488 = yylhsminor.yy488; break; - case 193: /* sortorder ::= ASC */ + case 194: /* sortorder ::= ASC */ { yymsp[0].minor.yy20 = TSDB_ORDER_ASC; } break; - case 194: /* sortorder ::= DESC */ + case 195: /* sortorder ::= DESC */ { yymsp[0].minor.yy20 = TSDB_ORDER_DESC;} break; - case 195: /* sortorder ::= */ + case 196: /* sortorder ::= */ { yymsp[1].minor.yy20 = TSDB_ORDER_ASC; } break; - case 196: /* groupby_opt ::= */ + case 197: /* groupby_opt ::= */ { yymsp[1].minor.yy159 = 0;} break; - case 197: /* groupby_opt ::= GROUP BY grouplist */ + case 198: /* groupby_opt ::= GROUP BY grouplist */ { yymsp[-2].minor.yy159 = yymsp[0].minor.yy159;} break; - case 198: /* grouplist ::= grouplist COMMA item */ + case 199: /* grouplist ::= grouplist COMMA item */ { yylhsminor.yy159 = tVariantListAppend(yymsp[-2].minor.yy159, &yymsp[0].minor.yy488, -1); } yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 199: /* grouplist ::= item */ + case 200: /* grouplist ::= item */ { yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[0].minor.yy488, -1); } yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 200: /* having_opt ::= */ - case 210: /* where_opt ::= */ yytestcase(yyruleno==210); - case 252: /* expritem ::= */ yytestcase(yyruleno==252); + case 201: /* having_opt ::= */ + case 211: /* where_opt ::= */ yytestcase(yyruleno==211); + case 253: /* expritem ::= */ yytestcase(yyruleno==253); {yymsp[1].minor.yy118 = 0;} break; - case 201: /* having_opt ::= HAVING expr */ - case 211: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==211); + case 202: /* having_opt ::= HAVING expr */ + case 212: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==212); {yymsp[-1].minor.yy118 = yymsp[0].minor.yy118;} break; - case 202: /* limit_opt ::= */ - case 206: /* slimit_opt ::= */ yytestcase(yyruleno==206); + case 203: /* limit_opt ::= */ + case 207: /* slimit_opt ::= */ yytestcase(yyruleno==207); {yymsp[1].minor.yy440.limit = -1; yymsp[1].minor.yy440.offset = 0;} break; - case 203: /* limit_opt ::= LIMIT signed */ - case 207: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==207); + case 204: /* limit_opt ::= LIMIT signed */ + case 208: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==208); {yymsp[-1].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-1].minor.yy440.offset = 0;} break; - case 204: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 205: /* limit_opt ::= LIMIT signed OFFSET signed */ { yymsp[-3].minor.yy440.limit = yymsp[-2].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[0].minor.yy317;} break; - case 205: /* limit_opt ::= LIMIT signed COMMA signed */ + case 206: /* limit_opt ::= LIMIT signed COMMA signed */ { yymsp[-3].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[-2].minor.yy317;} break; - case 208: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ + case 209: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ {yymsp[-3].minor.yy440.limit = yymsp[-2].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[0].minor.yy317;} break; - case 209: /* slimit_opt ::= SLIMIT signed COMMA signed */ + case 210: /* slimit_opt ::= SLIMIT signed COMMA signed */ {yymsp[-3].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[-2].minor.yy317;} break; - case 212: /* expr ::= LP expr RP */ + case 213: /* expr ::= LP expr RP */ {yylhsminor.yy118 = yymsp[-1].minor.yy118; yylhsminor.yy118->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy118->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 213: /* expr ::= ID */ + case 214: /* expr ::= ID */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 214: /* expr ::= ID DOT ID */ + case 215: /* expr ::= ID DOT ID */ { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 215: /* expr ::= ID DOT STAR */ + case 216: /* expr ::= ID DOT STAR */ { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 216: /* expr ::= INTEGER */ + case 217: /* expr ::= INTEGER */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 217: /* expr ::= MINUS INTEGER */ - case 218: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==218); + case 218: /* expr ::= MINUS INTEGER */ + case 219: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==219); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} yymsp[-1].minor.yy118 = yylhsminor.yy118; break; - case 219: /* expr ::= FLOAT */ + case 220: /* expr ::= FLOAT */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 220: /* expr ::= MINUS FLOAT */ - case 221: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==221); + case 221: /* expr ::= MINUS FLOAT */ + case 222: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==222); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} yymsp[-1].minor.yy118 = yylhsminor.yy118; break; - case 222: /* expr ::= STRING */ + case 223: /* expr ::= STRING */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 223: /* expr ::= NOW */ + case 224: /* expr ::= NOW */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 224: /* expr ::= VARIABLE */ + case 225: /* expr ::= VARIABLE */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 225: /* expr ::= PLUS VARIABLE */ - case 226: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==226); + case 226: /* expr ::= PLUS VARIABLE */ + case 227: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==227); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} yymsp[-1].minor.yy118 = yylhsminor.yy118; break; - case 227: /* expr ::= BOOL */ + case 228: /* expr ::= BOOL */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 228: /* expr ::= NULL */ + case 229: /* expr ::= NULL */ { yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 229: /* expr ::= ID LP exprlist RP */ + case 230: /* expr ::= ID LP exprlist RP */ { yylhsminor.yy118 = tSqlExprCreateFunction(yymsp[-1].minor.yy159, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 230: /* expr ::= ID LP STAR RP */ + case 231: /* expr ::= ID LP STAR RP */ { yylhsminor.yy118 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 231: /* expr ::= expr IS NULL */ + case 232: /* expr ::= expr IS NULL */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, NULL, TK_ISNULL);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 232: /* expr ::= expr IS NOT NULL */ + case 233: /* expr ::= expr IS NOT NULL */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-3].minor.yy118, NULL, TK_NOTNULL);} yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 233: /* expr ::= expr LT expr */ + case 234: /* expr ::= expr LT expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LT);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 234: /* expr ::= expr GT expr */ + case 235: /* expr ::= expr GT expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_GT);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 235: /* expr ::= expr LE expr */ + case 236: /* expr ::= expr LE expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LE);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 236: /* expr ::= expr GE expr */ + case 237: /* expr ::= expr GE expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_GE);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 237: /* expr ::= expr NE expr */ + case 238: /* expr ::= expr NE expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_NE);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 238: /* expr ::= expr EQ expr */ + case 239: /* expr ::= expr EQ expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_EQ);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 239: /* expr ::= expr BETWEEN expr AND expr */ + case 240: /* expr ::= expr BETWEEN expr AND expr */ { tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy118); yylhsminor.yy118 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy118, yymsp[-2].minor.yy118, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy118, TK_LE), TK_AND);} yymsp[-4].minor.yy118 = yylhsminor.yy118; break; - case 240: /* expr ::= expr AND expr */ + case 241: /* expr ::= expr AND expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_AND);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 241: /* expr ::= expr OR expr */ + case 242: /* expr ::= expr OR expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_OR); } yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 242: /* expr ::= expr PLUS expr */ + case 243: /* expr ::= expr PLUS expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_PLUS); } yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 243: /* expr ::= expr MINUS expr */ + case 244: /* expr ::= expr MINUS expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_MINUS); } yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 244: /* expr ::= expr STAR expr */ + case 245: /* expr ::= expr STAR expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_STAR); } yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 245: /* expr ::= expr SLASH expr */ + case 246: /* expr ::= expr SLASH expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_DIVIDE);} yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 246: /* expr ::= expr REM expr */ + case 247: /* expr ::= expr REM expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_REM); } yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 247: /* expr ::= expr LIKE expr */ + case 248: /* expr ::= expr LIKE expr */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LIKE); } yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 248: /* expr ::= expr IN LP exprlist RP */ + case 249: /* expr ::= expr IN LP exprlist RP */ {yylhsminor.yy118 = tSqlExprCreate(yymsp[-4].minor.yy118, (tSqlExpr*)yymsp[-1].minor.yy159, TK_IN); } yymsp[-4].minor.yy118 = yylhsminor.yy118; break; - case 249: /* exprlist ::= exprlist COMMA expritem */ + case 250: /* exprlist ::= exprlist COMMA expritem */ {yylhsminor.yy159 = tSqlExprListAppend(yymsp[-2].minor.yy159,yymsp[0].minor.yy118,0, 0);} yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 250: /* exprlist ::= expritem */ + case 251: /* exprlist ::= expritem */ {yylhsminor.yy159 = tSqlExprListAppend(0,yymsp[0].minor.yy118,0, 0);} yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 251: /* expritem ::= expr */ + case 252: /* expritem ::= expr */ {yylhsminor.yy118 = yymsp[0].minor.yy118;} yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 253: /* cmd ::= RESET QUERY CACHE */ + case 254: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 254: /* cmd ::= SYNCDB ids REPLICA */ + case 255: /* cmd ::= SYNCDB ids REPLICA */ { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} break; - case 255: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 256: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 256: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 257: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3001,14 +3010,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 257: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 258: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 258: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 259: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3019,7 +3028,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 259: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 260: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3033,7 +3042,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 260: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 261: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3045,14 +3054,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 261: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 262: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 262: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 263: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3063,14 +3072,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 263: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 264: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 264: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 265: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3081,7 +3090,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 265: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 266: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3095,13 +3104,13 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 266: /* cmd ::= KILL CONNECTION INTEGER */ + case 267: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 267: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 268: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 268: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 269: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: From 9cc8a36c1ffb38d8996ff2987c9d129a7fa8c957 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sat, 8 May 2021 19:49:45 +0800 Subject: [PATCH 091/140] support bind single column --- src/client/src/tscPrepare.c | 62 ++++++++++++++++++++++++++++--------- src/inc/taos.h | 1 + 2 files changed, 49 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 1c263487df..9af7b11615 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -762,7 +762,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU if (bind->is_null != NULL && bind->is_null[i]) { setNull(data + param->offset, param->type, param->bytes); - return TSDB_CODE_SUCCESS; + continue; } if (size > 0) { @@ -776,13 +776,15 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } } else if (param->type == TSDB_DATA_TYPE_BINARY) { if (bind->length[i] > (uintptr_t)param->bytes) { + tscError("invalid binary length"); return TSDB_CODE_TSC_INVALID_VALUE; } - size = (short)bind->length[i]; - STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer + bind->buffer_length * i, size); + int16_t bsize = (short)bind->length[i]; + STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer + bind->buffer_length * i, bsize); } else if (param->type == TSDB_DATA_TYPE_NCHAR) { int32_t output = 0; if (!taosMbsToUcs4(bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { + tscError("convert failed"); return TSDB_CODE_TSC_INVALID_VALUE; } varDataSetLen(data + param->offset, output); @@ -857,7 +859,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { } -static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind) { +static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) { SSqlCmd* pCmd = &stmt->pSql->cmd; STscStmt* pStmt = (STscStmt*)stmt; int rowNum = bind->num; @@ -877,6 +879,8 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind) { pBlock = *t1; + assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams)); + uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize; if (totalDataSize > pBlock->nAllocSize) { const double factor = 1.5; @@ -890,21 +894,35 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind) { pBlock->nAllocSize = (uint32_t)(totalDataSize * factor); } - for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { - SParamInfo* param = &pBlock->params[j]; - if (bind[param->idx].num != rowNum) { - tscError("param %d: num[%d:%d] not match", param->idx, rowNum, bind[param->idx].num); - return TSDB_CODE_TSC_INVALID_VALUE; + if (colIdx == -1) { + for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { + SParamInfo* param = &pBlock->params[j]; + if (bind[param->idx].num != rowNum) { + tscError("param %d: num[%d:%d] not match", param->idx, rowNum, bind[param->idx].num); + return TSDB_CODE_TSC_INVALID_VALUE; + } + + int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); + if (code != TSDB_CODE_SUCCESS) { + tscError("param %d: type mismatch or invalid", param->idx); + return code; + } } - - int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); + + pCmd->batchSize += rowNum - 1; + } else { + SParamInfo* param = &pBlock->params[colIdx]; + + int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { tscError("param %d: type mismatch or invalid", param->idx); return code; } - } - pCmd->batchSize += rowNum - 1; + if (colIdx == (pBlock->numOfParams - 1)) { + pCmd->batchSize += rowNum - 1; + } + } return TSDB_CODE_SUCCESS; } @@ -1386,9 +1404,25 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { return TSDB_CODE_TSC_APP_ERROR; } - return insertStmtBindParamBatch(pStmt, bind); + return insertStmtBindParamBatch(pStmt, bind, -1); } +int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) { + STscStmt* pStmt = (STscStmt*)stmt; + if (bind == NULL || bind->num <= 0) { + tscError("invalid parameter"); + return TSDB_CODE_TSC_APP_ERROR; + } + + if (!pStmt->isInsert || !pStmt->multiTbInsert || !pStmt->mtb.nameSet) { + tscError("not or invalid batch insert"); + return TSDB_CODE_TSC_APP_ERROR; + } + + return insertStmtBindParamBatch(pStmt, bind, colIdx); +} + + int taos_stmt_add_batch(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; diff --git a/src/inc/taos.h b/src/inc/taos.h index ca87337800..788502b45a 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -119,6 +119,7 @@ int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind); +int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx); int taos_stmt_add_batch(TAOS_STMT *stmt); int taos_stmt_execute(TAOS_STMT *stmt); TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); From 75172ef7bbe21d1a5634d4d562e16801e6b92538 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 8 May 2021 20:44:29 +0800 Subject: [PATCH 092/140] [TD-4096] --- src/client/src/tscSQLParser.c | 45 +++++++++++++---------------------- 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4cf1423c43..4251695d7b 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -64,7 +64,7 @@ static char* getAccountId(SSqlObj* pSql); static bool has(SArray* pFieldList, int32_t startIdx, const char* name); static char* cloneCurrentDBName(SSqlObj* pSql); -static bool hasSpecifyDB(SStrToken* pTableName); +static int32_t getDelimiterIndex(SStrToken* pTableName); static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd); static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd); @@ -426,17 +426,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_DESCRIBE_TABLE: { const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0); if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - - if (!tscValidateTableNameLength(pToken->n)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - // additional msg has been attached already code = tscSetTableFullName(pTableMetaInfo, pToken, pSql); if (code != TSDB_CODE_SUCCESS) { @@ -447,17 +441,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } case TSDB_SQL_SHOW_CREATE_TABLE: { const char* msg1 = "invalid table name"; - const char* msg2 = "table name is too long"; SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0); if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (!tscValidateTableNameLength(pToken->n)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - code = tscSetTableFullName(pTableMetaInfo, pToken, pSql); if (code != TSDB_CODE_SUCCESS) { return code; @@ -987,11 +976,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam const char* msg1 = "name too long"; const char* msg2 = "acctId too long"; const char* msg3 = "no acctId"; + const char* msg4 = "db name too long"; + const char* msg5 = "table name too long"; SSqlCmd* pCmd = &pSql->cmd; int32_t code = TSDB_CODE_SUCCESS; - - if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path + int32_t idx = getDelimiterIndex(pTableName); + if (idx != -1) { // db has been specified in sql string so we ignore current db path char* acctId = getAccountId(pSql); if (acctId == NULL || strlen(acctId) <= 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); @@ -1001,6 +992,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam if (code != 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } + if (idx >= TSDB_DB_NAME_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + char name[TSDB_TABLE_FNAME_LEN] = {0}; strncpy(name, pTableName->z, pTableName->n); @@ -1345,14 +1343,13 @@ static char* cloneCurrentDBName(SSqlObj* pSql) { } /* length limitation, strstr cannot be applied */ -static bool hasSpecifyDB(SStrToken* pTableName) { - for (uint32_t i = 0; i < pTableName->n; ++i) { - if (pTableName->z[i] == TS_PATH_DELIMITER[0]) { - return true; +static int32_t getDelimiterIndex(SStrToken* pTableName) { + for (uint32_t i = 0; i < pTableName->n; ++i) { + if (pTableName->z[i] == TS_PATH_DELIMITER[0]) { + return i; } } - - return false; + return -1; } int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) { @@ -7483,11 +7480,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { return false; } - - - - - - - - From a1fe6660fbde06bede72f55ba2e10f3fdbf04d7e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 May 2021 22:12:44 +0800 Subject: [PATCH 093/140] [td-4030] --- .../jni/com_taosdata_jdbc_TSDBJNIConnector.h | 40 ++++- src/client/src/TSDBJNIConnector.c | 138 +++++++++++++- .../com/taosdata/jdbc/TSDBJNIConnector.java | 58 +++++- .../taosdata/jdbc/TSDBPreparedStatement.java | 169 ++++++++++++++++-- .../taosdata/jdbc/TSDBResultSetBlockData.java | 56 +----- .../jdbc/rs/RestfulPreparedStatement.java | 11 -- src/query/src/qTokenizer.c | 6 +- src/util/inc/tstoken.h | 2 +- 8 files changed, 393 insertions(+), 87 deletions(-) diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index b3060e2c82..bcd0f63818 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: isUpdateQueryImp - * Signature: (J)J + * Signature: (JJ)I */ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp (JNIEnv *env, jobject jobj, jlong con, jlong tres); @@ -185,6 +185,44 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp (JNIEnv *, jobject, jlong, jbyteArray); +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: prepareStmtImp + * Signature: ([BJ)I + */ +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp + (JNIEnv *, jobject, jbyteArray, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: setBindTableNameImp + * Signature: (JLjava/lang/String;J)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp + (JNIEnv *, jobject, jlong, jstring, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: bindColDataImp + * Signature: (J[BIIIJ)J + */ +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp +(JNIEnv *, jobject, jlong, jbyteArray, jint, jint, jint, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: executeBatchImp + * Signature: (JJ)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: executeBatchImp + * Signature: (JJ)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con); + #ifdef __cplusplus } #endif diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 7447e36ac9..c28766b860 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -687,4 +687,140 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec } return taos_result_precision(result); -} \ No newline at end of file +} + +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection already closed", jobj); + return JNI_CONNECTION_NULL; + } + + if (jsql == NULL) { + jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon); + return JNI_SQL_NULL; + } + + jsize len = (*env)->GetArrayLength(env, jsql); + + char *str = (char *) calloc(1, sizeof(char) * (len + 1)); + if (str == NULL) { + jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon); + return JNI_OUT_OF_MEMORY; + } + + (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str); + if ((*env)->ExceptionCheck(env)) { + // todo handle error + } + + TAOS_STMT* pStmt = taos_stmt_init(tscon); + int32_t code = taos_stmt_prepare(pStmt, str, len); + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + return JNI_TDENGINE_ERROR; + } + + free(str); + return (jlong) pStmt; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) { + TAOS *tsconn = (TAOS *)conn; + if (tsconn == NULL) { + jniError("jobj:%p, connection already closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_STMT* pStmt = (TAOS_STMT*) stmt; + if (pStmt == NULL) { + jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn); + return JNI_SQL_NULL; + } + + const char *name = (*env)->GetStringUTFChars(env, jname, NULL); + + int32_t code = taos_stmt_set_tbname((void*)stmt, name); + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); + return JNI_TDENGINE_ERROR; + } + + jniDebug("jobj:%p, conn:%p, set stmt bind table name", jobj, tsconn); + + (*env)->ReleaseStringUTFChars(env, jname, name); + return JNI_SUCCESS; +} + +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt, + jbyteArray data, jint dataType, jint numOfRows, jint colIndex, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection already closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_STMT* pStmt = (TAOS_STMT*) stmt; + if (pStmt == NULL) { + jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); + return JNI_SQL_NULL; + } + +#if 0 + TAOS_BIND* b = malloc(20); + b.num= jrows; + int32_t code = taos_stmt_bind_param_batch(stmt, b, colInex); + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + return JNI_TDENGINE_ERROR; + } +#endif + + return JNI_SUCCESS; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection already closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_STMT *pStmt = (TAOS_STMT*) stmt; + if (pStmt == NULL) { + jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); + return JNI_SQL_NULL; + } + + int32_t code = taos_stmt_execute(pStmt); + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + return JNI_TDENGINE_ERROR; + } + + jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon); + return JNI_SUCCESS; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection already closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_STMT *pStmt = (TAOS_STMT*) stmt; + if (pStmt == NULL) { + jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); + return JNI_SQL_NULL; + } + + int32_t code = taos_stmt_close(pStmt); + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + return JNI_TDENGINE_ERROR; + } + + jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon); + return JNI_SUCCESS; +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 5e3ffffa4f..05e28578f1 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -18,6 +18,7 @@ package com.taosdata.jdbc; import com.taosdata.jdbc.utils.TaosInfo; +import java.nio.ByteBuffer; import java.sql.SQLException; import java.sql.SQLWarning; import java.util.List; @@ -75,7 +76,6 @@ public class TSDBJNIConnector { public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { if (this.taos != TSDBConstants.JNI_NULL_POINTER) { -// this.closeConnectionImp(this.taos); closeConnection(); this.taos = TSDBConstants.JNI_NULL_POINTER; } @@ -97,12 +97,6 @@ public class TSDBJNIConnector { * @throws SQLException */ public long executeQuery(String sql) throws SQLException { - // close previous result set if the user forgets to invoke the - // free method to close previous result set. -// if (!this.isResultsetClosed) { -// freeResultSet(taosResultSetPointer); -// } - Long pSql = 0l; try { pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos); @@ -297,4 +291,54 @@ public class TSDBJNIConnector { } private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); + + public long prepareStmt(String sql) throws SQLException { + Long stmt = 0L; + try { + stmt = prepareStmtImp(sql, this.taos); + } catch (Exception e) { + e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING); + } + + if (stmt == TSDBConstants.JNI_CONNECTION_NULL) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + } + + if (stmt == TSDBConstants.JNI_SQL_NULL) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL); + } + + if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY); + } + + return stmt; + } + + private native long prepareStmtImp(String sql, long con); + + public int setBindTableName(long stmt, String tableName) { + return setBindTableNameImp(stmt, tableName, this.taos); + } + + private native int setBindTableNameImp(long stmt, String name, long conn); + + public int bindColumnDataArray(long stmt, byte[] data, int type, int numOfRows, int columnIndex) { + return bindColDataImp(stmt, data, type, numOfRows, columnIndex, this.taos); + } + + private native int bindColDataImp(long stmt, byte[] data, int type, int numOfRows, int columnIndex, long conn); + + public int executeBatch(long stmt) { + return executeBatchImp(stmt, this.taos); + } + + private native int executeBatchImp(long stmt, long con); + + public int closeBatch(long stmt) { + return closeStmt(stmt, this.taos); + } + + private native int closeStmt(long stmt, long con); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 56f971a35e..be6c2361a1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -14,37 +14,47 @@ *****************************************************************************/ package com.taosdata.jdbc; +import com.sun.tools.javac.util.Assert; import com.taosdata.jdbc.utils.Utils; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.DoubleBuffer; +import java.nio.IntBuffer; import java.sql.*; import java.util.ArrayList; import java.util.Calendar; +import java.util.Collections; import java.util.regex.Matcher; import java.util.regex.Pattern; /* - * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * TDengine only supports a subset of the standard SQL, thus this implementation of the * standard JDBC API contains more or less some adjustments customized for certain * compatibility needs. */ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { - private String rawSql; private Object[] parameters; private boolean isPrepared; - + + private ArrayList colData; + private int type; + + private String tableName; + private long nativeStmtPtr = 0; + private volatile TSDBParameterMetaData parameterMetaData; - TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) { - super(connection, connecter); + TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connector, String sql) { + super(connection, connector); init(sql); + int parameterCnt = 0; if (sql.contains("?")) { - int parameterCnt = 0; for (int i = 0; i < sql.length(); i++) { if ('?' == sql.charAt(i)) { parameterCnt++; @@ -53,6 +63,9 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat parameters = new Object[parameterCnt]; this.isPrepared = true; } + + this.colData = new ArrayList(parameterCnt); + this.colData.addAll(Collections.nCopies(parameterCnt, null)); } private void init(String sql) { @@ -260,10 +273,14 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setObject(int parameterIndex, Object x) throws SQLException { - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (parameterIndex < 1 && parameterIndex >= parameters.length) + } + + if (parameterIndex < 1 && parameterIndex >= parameters.length) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + } + parameters[parameterIndex - 1] = x; } @@ -300,9 +317,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setRef(int parameterIndex, Ref x) throws SQLException { - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -515,4 +533,135 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + + /////////////////////////////////////////////////////////////////////// + // NOTE: the following APIs are not JDBC compatible + // set the bind table name + private static class ColumnInfo { + @SuppressWarnings("rawtypes") + private ArrayList data; + private int type; + private boolean typeIsSet; + + public void ClumnInfo() { + this.typeIsSet = false; + } + + public void setType(int type) { + Assert.check(!this.typeIsSet); + this.typeIsSet = true; + this.type = type; + } + + public boolean isTypeSet() { + return this.typeIsSet; + } + }; + + public void setTableName(String name) { + this.tableName = name; + } + + @SuppressWarnings("unchecked") + public void setInt(int columnIndex, ArrayList list) throws SQLException { + ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex); + if (col == null) { + ColumnInfo p = new ColumnInfo(); + p.setType(TSDBConstants.TSDB_DATA_TYPE_INT); + p.data = (ArrayList) list.clone(); + this.colData.set(columnIndex, p); + } else { + if (col.type != TSDBConstants.TSDB_DATA_TYPE_INT) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + col.data.addAll(list); + } + } + + @SuppressWarnings("unchecked") + public void setFloat(int columnIndex, ArrayList list) throws SQLException { + ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex); + if (col == null) { + ColumnInfo p = new ColumnInfo(); + p.setType(TSDBConstants.TSDB_DATA_TYPE_INT); + p.data = (ArrayList) list.clone(); + this.colData.set(columnIndex, p); + } else { + if (col.type != TSDBConstants.TSDB_DATA_TYPE_INT) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + col.data.addAll(list); + } + } + + public void addColumnDataBatch() { + // do nothing + } + + public void columnDataExecuteBatch() { + int size = this.colData.size(); + ColumnInfo col = (ColumnInfo) this.colData.get(0); + int rows = col.data.size(); + + // pass the data block to native code + TSDBJNIConnector conn = null; + try { + conn = (TSDBJNIConnector) this.getConnection(); + this.nativeStmtPtr = conn.prepareStmt(rawSql); + conn.setBindTableName(this.nativeStmtPtr, this.tableName); + } catch (SQLException e) { + e.printStackTrace(); + } + + for (int i = 0; i < size; ++i) { + ColumnInfo col1 = this.colData.get(i); + Assert.check(col.isTypeSet()); + ByteBuffer ib = ByteBuffer.allocate(rows); + + switch (col1.type) { + case TSDBConstants.TSDB_DATA_TYPE_INT: { + for (int j = 0; j < rows; ++j) { + Integer val = (Integer) col.data.get(j); + if (val == null) { + ib.putInt(Integer.MIN_VALUE); + } else { + ib.putInt((int) col.data.get(j)); + } + } + + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + for (int j = 0; j < rows; ++j) { + ib.putLong((long) col.data.get(j)); + } + break; + } + }; + + conn.bindColumnDataArray(this.nativeStmtPtr, ib.array(), col1.type, rows, i); + } + + conn.executeBatch(this.nativeStmtPtr); + } + + public void columnDataClearBatchClear() { + // TODO clear data in this.colData + } + + public void close() { + TSDBJNIConnector conn = null; + try { + conn = (TSDBJNIConnector) this.getConnection(); + this.nativeStmtPtr = conn.prepareStmt(rawSql); + conn.setBindTableName(this.nativeStmtPtr, this.tableName); + } catch (SQLException e) { + e.printStackTrace(); + } + + conn.closeBatch(this.nativeStmtPtr); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java index ce5290de66..814fd6c18d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java @@ -29,6 +29,8 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import com.taosdata.jdbc.utils.NullType; + public class TSDBResultSetBlockData { private int numOfRows = 0; private int rowIndex = 0; @@ -164,59 +166,7 @@ public class TSDBResultSetBlockData { } } - private static class NullType { - private static final byte NULL_BOOL_VAL = 0x2; - private static final String NULL_STR = "null"; - - public String toString() { - return NullType.NULL_STR; - } - - public static boolean isBooleanNull(byte val) { - return val == NullType.NULL_BOOL_VAL; - } - - private static boolean isTinyIntNull(byte val) { - return val == Byte.MIN_VALUE; - } - - private static boolean isSmallIntNull(short val) { - return val == Short.MIN_VALUE; - } - - private static boolean isIntNull(int val) { - return val == Integer.MIN_VALUE; - } - - private static boolean isBigIntNull(long val) { - return val == Long.MIN_VALUE; - } - - private static boolean isFloatNull(float val) { - return Float.isNaN(val); - } - - private static boolean isDoubleNull(double val) { - return Double.isNaN(val); - } - - private static boolean isBinaryNull(byte[] val, int length) { - if (length != Byte.BYTES) { - return false; - } - - return val[0] == 0xFF; - } - - private static boolean isNcharNull(byte[] val, int length) { - if (length != Integer.BYTES) { - return false; - } - - return (val[0] & val[1] & val[2] & val[3]) == 0xFF; - } - - } + /** * The original type may not be a string type, but will be converted to by diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java index f846a1162e..f58e3f8cd2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java @@ -1,26 +1,15 @@ package com.taosdata.jdbc.rs; -import com.google.common.collect.Range; -import com.google.common.collect.RangeSet; -import com.google.common.collect.TreeRangeSet; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; -import com.taosdata.jdbc.utils.SqlSyntaxValidator; import com.taosdata.jdbc.utils.Utils; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; -import java.nio.charset.Charset; import java.sql.*; import java.util.Calendar; -import java.util.HashMap; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.IntStream; public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement { diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 00fcaf82f5..4a4897f5c2 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -560,9 +560,9 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) { return 0; } -SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* new) { +SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken) { char *src = *str; - int32_t nsize = strlen(new); + int32_t nsize = strlen(newToken); int32_t size = strlen(*str) - token->n + nsize + 1; int32_t bsize = (uint64_t)token->z - (uint64_t)src; SStrToken ntoken; @@ -570,7 +570,7 @@ SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* new) { *str = calloc(1, size); strncpy(*str, src, bsize); - strcat(*str, new); + strcat(*str, newToken); strcat(*str, token->z + token->n); ntoken.n = nsize; diff --git a/src/util/inc/tstoken.h b/src/util/inc/tstoken.h index 93d48e01cb..550dbba06b 100644 --- a/src/util/inc/tstoken.h +++ b/src/util/inc/tstoken.h @@ -182,7 +182,7 @@ static FORCE_INLINE int32_t tGetNumericStringType(const SStrToken* pToken) { void taosCleanupKeywordsTable(); -SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* new); +SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken); #ifdef __cplusplus From 1729bc75ab163c2e7a1192333b45338ea8f7dbbd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 9 May 2021 00:34:48 +0800 Subject: [PATCH 094/140] [TD-4119] --- src/mnode/inc/mnodeDef.h | 2 +- src/mnode/src/mnodeShow.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h index ed1de1b87a..e052f34a33 100644 --- a/src/mnode/inc/mnodeDef.h +++ b/src/mnode/inc/mnodeDef.h @@ -249,7 +249,7 @@ typedef struct SAcctObj { } SAcctObj; typedef struct { - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; int8_t type; int16_t numOfColumns; int32_t index; diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 03772f2724..c0fa6368f3 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -129,7 +129,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) { SShowObj *pShow = calloc(1, showObjSize); pShow->type = pShowMsg->type; pShow->payloadLen = htons(pShowMsg->payloadLen); - tstrncpy(pShow->db, pShowMsg->db, TSDB_DB_NAME_LEN); + tstrncpy(pShow->db, pShowMsg->db, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN); memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen); pShow = mnodePutShowObj(pShow); From 22db3987a32450fbad50d1e85c8ba93f1eff7e1f Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sun, 9 May 2021 10:04:42 +0800 Subject: [PATCH 095/140] fix bug --- src/client/src/tscPrepare.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 9af7b11615..79701841df 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1360,6 +1360,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; if (!pStmt->isInsert) { taosHashCleanup(pStmt->mtb.pTableHash); + taosHashCleanup(pStmt->mtb.pTableBlockHashList); SNormalStmt* normal = &pStmt->normal; if (normal->params != NULL) { From 7db07e73beb897092a76e5d317517e941f99ddea Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sun, 9 May 2021 13:30:37 +0800 Subject: [PATCH 096/140] add test file --- tests/script/api/batchprepare.c | 2131 +++++++++++++++++++++++++++++++ tests/script/api/makefile | 17 + 2 files changed, 2148 insertions(+) create mode 100644 tests/script/api/batchprepare.c create mode 100644 tests/script/api/makefile diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c new file mode 100644 index 0000000000..48c668634d --- /dev/null +++ b/tests/script/api/batchprepare.c @@ -0,0 +1,2131 @@ +// TAOS standard API example. The same syntax as MySQL, but only a subet +// to compile: gcc -o prepare prepare.c -ltaos + +#include +#include +#include +#include "taos.h" +#include + + +void taosMsleep(int mseconds); + +unsigned long long getCurrentTime(){ + struct timeval tv; + if (gettimeofday(&tv, NULL) != 0) { + perror("Failed to get current time in ms"); + exit(EXIT_FAILURE); + } + + return (uint64_t)tv.tv_sec * 1000000ULL + (uint64_t)tv.tv_usec; +} + + + +int stmt_func1(TAOS_STMT *stmt) { + struct { + int64_t ts; + int8_t b; + int8_t v1; + int16_t v2; + int32_t v4; + int64_t v8; + float f4; + double f8; + char bin[40]; + char blob[80]; + } v = {0}; + + TAOS_BIND params[10]; + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer_length = sizeof(v.ts); + params[0].buffer = &v.ts; + params[0].length = ¶ms[0].buffer_length; + params[0].is_null = NULL; + + params[1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[1].buffer_length = sizeof(v.b); + params[1].buffer = &v.b; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + + params[2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[2].buffer_length = sizeof(v.v1); + params[2].buffer = &v.v1; + params[2].length = ¶ms[2].buffer_length; + params[2].is_null = NULL; + + params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[3].buffer_length = sizeof(v.v2); + params[3].buffer = &v.v2; + params[3].length = ¶ms[3].buffer_length; + params[3].is_null = NULL; + + params[4].buffer_type = TSDB_DATA_TYPE_INT; + params[4].buffer_length = sizeof(v.v4); + params[4].buffer = &v.v4; + params[4].length = ¶ms[4].buffer_length; + params[4].is_null = NULL; + + params[5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[5].buffer_length = sizeof(v.v8); + params[5].buffer = &v.v8; + params[5].length = ¶ms[5].buffer_length; + params[5].is_null = NULL; + + params[6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[6].buffer_length = sizeof(v.f4); + params[6].buffer = &v.f4; + params[6].length = ¶ms[6].buffer_length; + params[6].is_null = NULL; + + params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[7].buffer_length = sizeof(v.f8); + params[7].buffer = &v.f8; + params[7].length = ¶ms[7].buffer_length; + params[7].is_null = NULL; + + params[8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[8].buffer_length = sizeof(v.bin); + params[8].buffer = v.bin; + params[8].length = ¶ms[8].buffer_length; + params[8].is_null = NULL; + + params[9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[9].buffer_length = sizeof(v.bin); + params[9].buffer = v.bin; + params[9].length = ¶ms[9].buffer_length; + params[9].is_null = NULL; + + int is_null = 1; + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + for (int zz = 0; zz < 10; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + v.ts = 1591060628000 + zz * 10; + for (int i = 0; i < 10; ++i) { + v.ts += 1; + for (int j = 1; j < 10; ++j) { + params[j].is_null = ((i == j) ? &is_null : 0); + } + v.b = (int8_t)(i+zz*10) % 2; + v.v1 = (int8_t)(i+zz*10); + v.v2 = (int16_t)((i+zz*10) * 2); + v.v4 = (int32_t)((i+zz*10) * 4); + v.v8 = (int64_t)((i+zz*10) * 8); + v.f4 = (float)((i+zz*10) * 40); + v.f8 = (double)((i+zz*10) * 80); + for (int j = 0; j < sizeof(v.bin) - 1; ++j) { + v.bin[j] = (char)((i+zz)%10 + '0'); + } + + taos_stmt_bind_param(stmt, params); + taos_stmt_add_batch(stmt); + } + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + return 0; +} + + +int stmt_func2(TAOS_STMT *stmt) { + struct { + int64_t ts; + int8_t b; + int8_t v1; + int16_t v2; + int32_t v4; + int64_t v8; + float f4; + double f8; + char bin[40]; + char blob[80]; + } v = {0}; + + TAOS_BIND params[10]; + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer_length = sizeof(v.ts); + params[0].buffer = &v.ts; + params[0].length = ¶ms[0].buffer_length; + params[0].is_null = NULL; + + params[1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[1].buffer_length = sizeof(v.b); + params[1].buffer = &v.b; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + + params[2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[2].buffer_length = sizeof(v.v1); + params[2].buffer = &v.v1; + params[2].length = ¶ms[2].buffer_length; + params[2].is_null = NULL; + + params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[3].buffer_length = sizeof(v.v2); + params[3].buffer = &v.v2; + params[3].length = ¶ms[3].buffer_length; + params[3].is_null = NULL; + + params[4].buffer_type = TSDB_DATA_TYPE_INT; + params[4].buffer_length = sizeof(v.v4); + params[4].buffer = &v.v4; + params[4].length = ¶ms[4].buffer_length; + params[4].is_null = NULL; + + params[5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[5].buffer_length = sizeof(v.v8); + params[5].buffer = &v.v8; + params[5].length = ¶ms[5].buffer_length; + params[5].is_null = NULL; + + params[6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[6].buffer_length = sizeof(v.f4); + params[6].buffer = &v.f4; + params[6].length = ¶ms[6].buffer_length; + params[6].is_null = NULL; + + params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[7].buffer_length = sizeof(v.f8); + params[7].buffer = &v.f8; + params[7].length = ¶ms[7].buffer_length; + params[7].is_null = NULL; + + params[8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[8].buffer_length = sizeof(v.bin); + params[8].buffer = v.bin; + params[8].length = ¶ms[8].buffer_length; + params[8].is_null = NULL; + + params[9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[9].buffer_length = sizeof(v.bin); + params[9].buffer = v.bin; + params[9].length = ¶ms[9].buffer_length; + params[9].is_null = NULL; + + int is_null = 1; + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + for (int l = 0; l < 100; l++) { + for (int zz = 0; zz < 10; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + v.ts = 1591060628000 + zz * 100 * l; + for (int i = 0; i < zz; ++i) { + v.ts += 1; + for (int j = 1; j < 10; ++j) { + params[j].is_null = ((i == j) ? &is_null : 0); + } + v.b = (int8_t)(i+zz*10) % 2; + v.v1 = (int8_t)(i+zz*10); + v.v2 = (int16_t)((i+zz*10) * 2); + v.v4 = (int32_t)((i+zz*10) * 4); + v.v8 = (int64_t)((i+zz*10) * 8); + v.f4 = (float)((i+zz*10) * 40); + v.f8 = (double)((i+zz*10) * 80); + for (int j = 0; j < sizeof(v.bin) - 1; ++j) { + v.bin[j] = (char)((i+zz)%10 + '0'); + } + + taos_stmt_bind_param(stmt, params); + taos_stmt_add_batch(stmt); + } + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + } + + + return 0; +} + + + + +int stmt_func3(TAOS_STMT *stmt) { + struct { + int64_t ts; + int8_t b; + int8_t v1; + int16_t v2; + int32_t v4; + int64_t v8; + float f4; + double f8; + char bin[40]; + char blob[80]; + } v = {0}; + + TAOS_BIND params[10]; + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer_length = sizeof(v.ts); + params[0].buffer = &v.ts; + params[0].length = ¶ms[0].buffer_length; + params[0].is_null = NULL; + + params[1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[1].buffer_length = sizeof(v.b); + params[1].buffer = &v.b; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + + params[2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[2].buffer_length = sizeof(v.v1); + params[2].buffer = &v.v1; + params[2].length = ¶ms[2].buffer_length; + params[2].is_null = NULL; + + params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[3].buffer_length = sizeof(v.v2); + params[3].buffer = &v.v2; + params[3].length = ¶ms[3].buffer_length; + params[3].is_null = NULL; + + params[4].buffer_type = TSDB_DATA_TYPE_INT; + params[4].buffer_length = sizeof(v.v4); + params[4].buffer = &v.v4; + params[4].length = ¶ms[4].buffer_length; + params[4].is_null = NULL; + + params[5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[5].buffer_length = sizeof(v.v8); + params[5].buffer = &v.v8; + params[5].length = ¶ms[5].buffer_length; + params[5].is_null = NULL; + + params[6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[6].buffer_length = sizeof(v.f4); + params[6].buffer = &v.f4; + params[6].length = ¶ms[6].buffer_length; + params[6].is_null = NULL; + + params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[7].buffer_length = sizeof(v.f8); + params[7].buffer = &v.f8; + params[7].length = ¶ms[7].buffer_length; + params[7].is_null = NULL; + + params[8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[8].buffer_length = sizeof(v.bin); + params[8].buffer = v.bin; + params[8].length = ¶ms[8].buffer_length; + params[8].is_null = NULL; + + params[9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[9].buffer_length = sizeof(v.bin); + params[9].buffer = v.bin; + params[9].length = ¶ms[9].buffer_length; + params[9].is_null = NULL; + + int is_null = 1; + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + for (int l = 0; l < 100; l++) { + for (int zz = 0; zz < 10; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + v.ts = 1591060628000 + zz * 100 * l; + for (int i = 0; i < zz; ++i) { + v.ts += 1; + for (int j = 1; j < 10; ++j) { + params[j].is_null = ((i == j) ? &is_null : 0); + } + v.b = (int8_t)(i+zz*10) % 2; + v.v1 = (int8_t)(i+zz*10); + v.v2 = (int16_t)((i+zz*10) * 2); + v.v4 = (int32_t)((i+zz*10) * 4); + v.v8 = (int64_t)((i+zz*10) * 8); + v.f4 = (float)((i+zz*10) * 40); + v.f8 = (double)((i+zz*10) * 80); + for (int j = 0; j < sizeof(v.bin) - 1; ++j) { + v.bin[j] = (char)((i+zz)%10 + '0'); + } + + taos_stmt_bind_param(stmt, params); + taos_stmt_add_batch(stmt); + } + } + } + + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + + return 0; +} + + +//300 tables 60 records +int stmt_funcb1(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 9000000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 60; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 60; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 60; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 60; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 60; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 60; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 60; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 60; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 60; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 60; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 3000; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + ++id; + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +//1table 18000 reocrds +int stmt_funcb2(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[18000]; + int8_t v1[18000]; + int16_t v2[18000]; + int32_t v4[18000]; + int64_t v8[18000]; + float f4[18000]; + double f8[18000]; + char bin[18000][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(18000 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 3000*10); + int* is_null = malloc(sizeof(int) * 18000); + int* no_null = malloc(sizeof(int) * 18000); + + for (int i = 0; i < 18000; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 30000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 18000; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 18000; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 18000; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 18000; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 18000; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 18000; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 18000; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 18000; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 18000; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 18000; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 10; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + ++id; + + } + + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +//disorder +int stmt_funcb3(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 9000000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 60; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 60; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 60; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 60; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 60; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 60; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 60; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 60; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 60; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 60; + + } + + int64_t tts = 1591060628000; + int64_t ttt = 0; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + if (i > 0 && i%60 == 0) { + ttt = v.ts[i-1]; + v.ts[i-1] = v.ts[i-60]; + v.ts[i-60] = ttt; + } + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 3000; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + ++id; + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + + + +//samets +int stmt_funcb4(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 9000000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 60; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 60; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 60; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 60; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 60; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 60; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 60; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 60; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 60; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 60; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 3000; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + ++id; + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +//one table 60 records one time +int stmt_funcb_s1(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 9000000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 60; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 60; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 60; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 60; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 60; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 60; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 60; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 60; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 60; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 60; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 3000; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + ++id; + } + + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + + + + + +//300 tables 60 records single column bind +int stmt_funcb_sc1(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 9000000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 60; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 60; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 60; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 60; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 60; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 60; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 60; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 60; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 60; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 60; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 3000; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + for (int col=0; col < 10; ++col) { + taos_stmt_bind_single_param_batch(stmt, params + id++, col); + } + + taos_stmt_add_batch(stmt); + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +//1 tables 60 records single column bind +int stmt_funcb_sc2(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 9000000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 60; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 60; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 60; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 60; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 60; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 60; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 60; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 60; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 60; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 60; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 3000; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + for (int col=0; col < 10; ++col) { + taos_stmt_bind_single_param_batch(stmt, params + id++, col); + } + + taos_stmt_add_batch(stmt); + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + } + + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +//10 tables [1...10] records single column bind +int stmt_funcb_sc3(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[60]; + int8_t v1[60]; + int16_t v2[60]; + int32_t v4[60]; + int64_t v8[60]; + float f4[60]; + double f8[60]; + char bin[60][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 60); + + uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 60*10); + int* is_null = malloc(sizeof(int) * 60); + int* no_null = malloc(sizeof(int) * 60); + + for (int i = 0; i < 60; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + int g = 0; + for (int i = 0; i < 600; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = g%10+1; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = g%10+1; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = g%10+1; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = g%10+1; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = g%10+1; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = g%10+1; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = g%10+1; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = g%10+1; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = g%10+1; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = g%10+1; + ++g; + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 60; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int zz = 0; zz < 10; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + for (int col=0; col < 10; ++col) { + taos_stmt_bind_single_param_batch(stmt, params + id++, col); + } + + taos_stmt_add_batch(stmt); + } + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +void check_result(TAOS *taos, char *tname, int printr, int expected) { + char sql[255] = "SELECT * FROM "; + TAOS_RES *result; + + strcat(sql, tname); + + result = taos_query(taos, sql); + int code = taos_errno(result); + if (code != 0) { + printf("failed to query table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + + + TAOS_ROW row; + int rows = 0; + int num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + char temp[256]; + + // fetch the records row by row + while ((row = taos_fetch_row(result))) { + rows++; + if (printr) { + memset(temp, 0, sizeof(temp)); + taos_print_row(temp, row, fields, num_fields); + printf("[%s]\n", temp); + } + } + + if (rows == expected) { + printf("%d rows are fetched as expectation\n", rows); + } else { + printf("!!!expect %d rows, but %d rows are fetched\n", expected, rows); + exit(1); + } + + taos_free_result(result); + +} + + + +//120table 60 record each table +int sql_perf1(TAOS *taos) { + char *sql[3000] = {0}; + TAOS_RES *result; + + for (int i = 0; i < 3000; i++) { + sql[i] = calloc(1, 1048576); + } + + int len = 0; + int tss = 0; + for (int l = 0; l < 3000; ++l) { + len = sprintf(sql[l], "insert into "); + for (int t = 0; t < 120; ++t) { + len += sprintf(sql[l] + len, "m%d values ", t); + for (int m = 0; m < 60; ++m) { + len += sprintf(sql[l] + len, "(%d, %d, %d, %d, %d, %d, %f, %f, \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\", \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\") ", tss++, m, m, m, m, m, m+1.0, m+1.0); + } + } + } + + + unsigned long long starttime = getCurrentTime(); + for (int i = 0; i < 3000; ++i) { + result = taos_query(taos, sql[i]); + int code = taos_errno(result); + if (code != 0) { + printf("failed to query table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + + taos_free_result(result); + } + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%.1f useconds\n", 3000*120*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*120*60)); + + return 0; +} + + + + + +//one table 60 records one time +int sql_perf_s1(TAOS *taos) { + char **sql = calloc(1, sizeof(char*) * 360000); + TAOS_RES *result; + + for (int i = 0; i < 360000; i++) { + sql[i] = calloc(1, 9000); + } + + int len = 0; + int tss = 0; + int id = 0; + for (int t = 0; t < 120; ++t) { + for (int l = 0; l < 3000; ++l) { + len = sprintf(sql[id], "insert into m%d values ", t); + for (int m = 0; m < 60; ++m) { + len += sprintf(sql[id] + len, "(%d, %d, %d, %d, %d, %d, %f, %f, \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\", \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\") ", tss++, m, m, m, m, m, m+1.0, m+1.0); + } + if (len >= 9000) { + printf("sql:%s,len:%d\n", sql[id], len); + exit(1); + } + ++id; + } + } + + + unsigned long long starttime = getCurrentTime(); + for (int i = 0; i < 360000; ++i) { + result = taos_query(taos, sql[i]); + int code = taos_errno(result); + if (code != 0) { + printf("failed to query table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + + taos_free_result(result); + } + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%.1f useconds\n", 3000*120*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*120*60)); + + return 0; +} + + +//small record size +int sql_s_perf1(TAOS *taos) { + char *sql[3000] = {0}; + TAOS_RES *result; + + for (int i = 0; i < 3000; i++) { + sql[i] = calloc(1, 1048576); + } + + int len = 0; + int tss = 0; + for (int l = 0; l < 3000; ++l) { + len = sprintf(sql[l], "insert into "); + for (int t = 0; t < 120; ++t) { + len += sprintf(sql[l] + len, "m%d values ", t); + for (int m = 0; m < 60; ++m) { + len += sprintf(sql[l] + len, "(%d, %d) ", tss++, m%2); + } + } + } + + + unsigned long long starttime = getCurrentTime(); + for (int i = 0; i < 3000; ++i) { + result = taos_query(taos, sql[i]); + int code = taos_errno(result); + if (code != 0) { + printf("failed to query table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + + taos_free_result(result); + } + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%.1f useconds\n", 3000*120*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*120*60)); + + return 0; +} + + +void prepare(TAOS *taos) { + TAOS_RES *result; + int code; + + result = taos_query(taos, "drop database demo"); + taos_free_result(result); + + result = taos_query(taos, "create database demo"); + code = taos_errno(result); + if (code != 0) { + printf("failed to create database, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + taos_free_result(result); + + result = taos_query(taos, "use demo"); + taos_free_result(result); + + // create table + for (int i = 0 ; i < 300; i++) { + char buf[1024]; + sprintf(buf, "create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), bin2 binary(40))", i) ; + result = taos_query(taos, buf); + code = taos_errno(result); + if (code != 0) { + printf("failed to create table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + taos_free_result(result); + } + +} + + +void runcase(TAOS *taos) { + TAOS_STMT *stmt; + + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("10t+10records start\n"); + stmt_func1(stmt); + printf("10t+10records end\n"); + printf("check result start\n"); + check_result(taos, "m0", 1, 10); + check_result(taos, "m1", 1, 10); + check_result(taos, "m2", 1, 10); + check_result(taos, "m3", 1, 10); + check_result(taos, "m4", 1, 10); + check_result(taos, "m5", 1, 10); + check_result(taos, "m6", 1, 10); + check_result(taos, "m7", 1, 10); + check_result(taos, "m8", 1, 10); + check_result(taos, "m9", 1, 10); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("10t+[0,1,2...9]records start\n"); + stmt_func2(stmt); + printf("10t+[0,1,2...9]records end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 0); + check_result(taos, "m1", 0, 100); + check_result(taos, "m2", 0, 200); + check_result(taos, "m3", 0, 300); + check_result(taos, "m4", 0, 400); + check_result(taos, "m5", 0, 500); + check_result(taos, "m6", 0, 600); + check_result(taos, "m7", 0, 700); + check_result(taos, "m8", 0, 800); + check_result(taos, "m9", 0, 900); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("10t+[0,100,200...900]records start\n"); + stmt_func3(stmt); + printf("10t+[0,100,200...900]records end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 0); + check_result(taos, "m1", 0, 100); + check_result(taos, "m2", 0, 200); + check_result(taos, "m3", 0, 300); + check_result(taos, "m4", 0, 400); + check_result(taos, "m5", 0, 500); + check_result(taos, "m6", 0, 600); + check_result(taos, "m7", 0, 700); + check_result(taos, "m8", 0, 800); + check_result(taos, "m9", 0, 900); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("300t+60r+bm start\n"); + stmt_funcb1(stmt); + printf("300t+60r+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m111", 0, 180000); + check_result(taos, "m223", 0, 180000); + check_result(taos, "m299", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); +#endif + + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("1t+9000r+bm start\n"); + stmt_funcb2(stmt); + printf("1t+9000r+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m111", 0, 180000); + check_result(taos, "m223", 0, 180000); + check_result(taos, "m299", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); + + //stmt_perf1(stmt); +#endif + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("300t+60r+disorder+bm start\n"); + stmt_funcb3(stmt); + printf("300t+60r+disorder+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m111", 0, 180000); + check_result(taos, "m223", 0, 180000); + check_result(taos, "m299", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("300t+60r+samets+bm start\n"); + stmt_funcb4(stmt); + printf("300t+60r+samets+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 1); + check_result(taos, "m1", 0, 1); + check_result(taos, "m111", 0, 1); + check_result(taos, "m223", 0, 1); + check_result(taos, "m299", 0, 1); + printf("check result end\n"); + taos_stmt_close(stmt); +#endif + + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("300t+60r+bm+sc start\n"); + stmt_funcb_sc1(stmt); + printf("300t+60r+bm+sc end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m111", 0, 180000); + check_result(taos, "m223", 0, 180000); + check_result(taos, "m299", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); +#endif + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("1t+60r+bm+sc start\n"); + stmt_funcb_sc2(stmt); + printf("1t+60r+bm+sc end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m111", 0, 180000); + check_result(taos, "m223", 0, 180000); + check_result(taos, "m299", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("10t+[1...10]r+bm+sc start\n"); + stmt_funcb_sc3(stmt); + printf("10t+[1...10]r+bm+sc end\n"); + printf("check result start\n"); + check_result(taos, "m0", 1, 1); + check_result(taos, "m1", 1, 2); + check_result(taos, "m2", 1, 3); + check_result(taos, "m3", 1, 4); + check_result(taos, "m4", 1, 5); + check_result(taos, "m5", 1, 6); + check_result(taos, "m6", 1, 7); + check_result(taos, "m7", 1, 8); + check_result(taos, "m8", 1, 9); + check_result(taos, "m9", 1, 10); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + + +#if 1 + prepare(taos); + + stmt = taos_stmt_init(taos); + + printf("1t+60r+bm start\n"); + stmt_funcb_s1(stmt); + printf("1t+60r+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m111", 0, 180000); + check_result(taos, "m223", 0, 180000); + check_result(taos, "m299", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + + +#if 1 + prepare(taos); + + (void)stmt; + printf("120t+60r+sql start\n"); + sql_perf1(taos); + printf("120t+60r+sql end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m34", 0, 180000); + check_result(taos, "m67", 0, 180000); + check_result(taos, "m99", 0, 180000); + printf("check result end\n"); +#endif + +#if 1 + prepare(taos); + + (void)stmt; + printf("1t+60r+sql start\n"); + sql_perf_s1(taos); + printf("1t+60r+sql end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + check_result(taos, "m1", 0, 180000); + check_result(taos, "m34", 0, 180000); + check_result(taos, "m67", 0, 180000); + check_result(taos, "m99", 0, 180000); + printf("check result end\n"); +#endif + + + +} + +int main(int argc, char *argv[]) +{ + TAOS *taos; + + // connect to server + if (argc < 2) { + printf("please input server ip \n"); + return 0; + } + + taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + prepare(taos); + + runcase(taos); + + + return 0; +} + diff --git a/tests/script/api/makefile b/tests/script/api/makefile new file mode 100644 index 0000000000..c5bbde0f0b --- /dev/null +++ b/tests/script/api/makefile @@ -0,0 +1,17 @@ +# Copyright (c) 2017 by TAOS Technologies, Inc. +# todo: library dependency, header file dependency + +ROOT=./ +TARGET=exe +LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt +CFLAGS = -O0 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ + -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ + -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 + +all: $(TARGET) + +exe: + gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS) + +clean: + rm $(ROOT)batchprepare From a415d77ead22b2131078ae42521a6237356cc650 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sun, 9 May 2021 15:57:45 +0800 Subject: [PATCH 097/140] taoserror: fix invalid tsdb state error no --- src/inc/taoserror.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 855afe923d..ce6f7c4f22 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -218,7 +218,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") -#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0513) //"Invalid tsdb state") +#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") From 92f8459cc760b8c9a2d1dea743e30edfb1602ea1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sun, 9 May 2021 15:59:26 +0800 Subject: [PATCH 098/140] [TD-4081]: fix vnode confirm forward missing response & write msg freeing --- src/vnode/src/vnodeSync.c | 3 +-- src/vnode/src/vnodeWrite.c | 11 ++++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index aa4cf0fc15..05af34a34f 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -119,7 +119,6 @@ void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) { void *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) { vError("vgId:%d, vnode not found while confirm forward", vgId); - return; } dnodeSendRpcVWriteRsp(pVnode, wparam, code); @@ -162,4 +161,4 @@ int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool force) { SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code, force); -} \ No newline at end of file +} diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index def9cf3b32..09b41418d2 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -317,12 +317,13 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { SVnodeObj *pVnode = vparam; + if (pVnode) { + int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); + int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); - int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); - int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); - - vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite, - pWrite->rpcMsg.ahandle, queued, queuedSize); + vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite, + pWrite->rpcMsg.ahandle, queued, queuedSize); + } taosFreeQitem(pWrite); vnodeRelease(pVnode); From 53062af89d5cdd0df21fabfb0a93d402939c664c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sun, 9 May 2021 16:23:50 +0800 Subject: [PATCH 099/140] dummy commit to make drone CI progress --- src/vnode/src/vnodeWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 09b41418d2..36516d81df 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -372,8 +372,8 @@ static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { taosMsleep(ms); return 0; } else { - void *unUsed = NULL; - taosTmrReset(vnodeFlowCtrlMsgToWQueue, 100, pWrite, tsDnodeTmr, &unUsed); + void *unUsedTimerId = NULL; + taosTmrReset(vnodeFlowCtrlMsgToWQueue, 100, pWrite, tsDnodeTmr, &unUsedTimerId); vTrace("vgId:%d, msg:%p, app:%p, perform flowctrl, retry:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, pWrite->processedCount); From b7cec3072beec73d16d1436c3c44feb8896f2217 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sun, 9 May 2021 16:45:02 +0800 Subject: [PATCH 100/140] fix --- src/client/src/tscPrepare.c | 87 ++++--- tests/script/api/batchprepare.c | 413 ++++++++++++++++++++++++++++++-- 2 files changed, 445 insertions(+), 55 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 79701841df..4d5fdc2287 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -148,7 +148,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { break; default: - tscDebug("param %d: type mismatch or invalid", i); + tscDebug("0x%"PRIx64" param %d: type mismatch or invalid", stmt->pSql->self, i); return TSDB_CODE_TSC_INVALID_VALUE; } } @@ -804,7 +804,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { if (pStmt->multiTbInsert) { if (pCmd->pTableBlockHashList == NULL) { - tscError("Table block hash list is empty"); + tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } @@ -850,7 +850,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { int code = doBindParam(pBlock, data, param, &bind[param->idx], 1); if (code != TSDB_CODE_SUCCESS) { - tscDebug("param %d: type mismatch or invalid", param->idx); + tscDebug("0x%"PRIx64" param %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return code; } } @@ -866,18 +866,34 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c STableDataBlocks* pBlock = NULL; - if (pCmd->pTableBlockHashList == NULL) { - tscError("Table block hash list is empty"); - return TSDB_CODE_TSC_APP_ERROR; - } + if (pStmt->multiTbInsert) { + if (pCmd->pTableBlockHashList == NULL) { + tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self); + return TSDB_CODE_TSC_APP_ERROR; + } - STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); - if (t1 == NULL) { - tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); - return TSDB_CODE_TSC_APP_ERROR; - } + STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); + if (t1 == NULL) { + tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid); + return TSDB_CODE_TSC_APP_ERROR; + } - pBlock = *t1; + pBlock = *t1; + } else { + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0); + + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + if (pCmd->pTableBlockHashList == NULL) { + pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + } + + int32_t ret = + tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk), + pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL); + if (ret != 0) { + return ret; + } + } assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams)); @@ -898,13 +914,13 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { SParamInfo* param = &pBlock->params[j]; if (bind[param->idx].num != rowNum) { - tscError("param %d: num[%d:%d] not match", param->idx, rowNum, bind[param->idx].num); + tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num); return TSDB_CODE_TSC_INVALID_VALUE; } int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { - tscError("param %d: type mismatch or invalid", param->idx); + tscError("0x%"PRIx64" param %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return code; } } @@ -915,7 +931,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { - tscError("param %d: type mismatch or invalid", param->idx); + tscError("0x%"PRIx64" param %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return code; } @@ -940,7 +956,7 @@ static int insertStmtUpdateBatch(STscStmt* stmt) { STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid)); if (t1 == NULL) { - tscError("no table data block in hash list, uid:%" PRId64 , stmt->mtb.currentUid); + tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, stmt->mtb.currentUid); return TSDB_CODE_TSC_APP_ERROR; } @@ -1110,7 +1126,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) { int32_t code = 0; if(pStmt->mtb.nameSet == false) { - tscError("no table name set"); + tscError("0x%"PRIx64" no table name set", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } @@ -1276,13 +1292,13 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { if (name == NULL) { terrno = TSDB_CODE_TSC_APP_ERROR; - tscError("name is NULL"); + tscError("0x%"PRIx64" name is NULL", pSql->self); return TSDB_CODE_TSC_APP_ERROR; } if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) { terrno = TSDB_CODE_TSC_APP_ERROR; - tscError("not multi table insert"); + tscError("0x%"PRIx64" not multi table insert", pSql->self); return TSDB_CODE_TSC_APP_ERROR; } @@ -1292,7 +1308,7 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); if (t1 == NULL) { - tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); + tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid); return TSDB_CODE_TSC_APP_ERROR; } @@ -1301,7 +1317,7 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { taosHashPut(pCmd->pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES); - tscDebug("table:%s is already prepared, uid:%" PRIu64, name, pStmt->mtb.currentUid); + tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid); return TSDB_CODE_SUCCESS; } @@ -1350,7 +1366,7 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid)); - tscDebug("table:%s is prepared, uid:%" PRIu64, name, pStmt->mtb.currentUid); + tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid); } return code; @@ -1396,12 +1412,17 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { STscStmt* pStmt = (STscStmt*)stmt; if (bind == NULL || bind->num <= 0) { - tscError("invalid parameter"); + tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } - if (!pStmt->isInsert || !pStmt->multiTbInsert || !pStmt->mtb.nameSet) { - tscError("not or invalid batch insert"); + if (!pStmt->isInsert) { + tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self); + return TSDB_CODE_TSC_APP_ERROR; + } + + if (pStmt->multiTbInsert && !pStmt->mtb.nameSet) { + tscError("0x%"PRIx64" no table name set", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } @@ -1411,14 +1432,20 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) { STscStmt* pStmt = (STscStmt*)stmt; if (bind == NULL || bind->num <= 0) { - tscError("invalid parameter"); + tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } - if (!pStmt->isInsert || !pStmt->multiTbInsert || !pStmt->mtb.nameSet) { - tscError("not or invalid batch insert"); + if (!pStmt->isInsert) { + tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } + + if (pStmt->multiTbInsert && !pStmt->mtb.nameSet) { + tscError("0x%"PRIx64" no table name set", pStmt->pSql->self); + return TSDB_CODE_TSC_APP_ERROR; + } + return insertStmtBindParamBatch(pStmt, bind, colIdx); } @@ -1544,7 +1571,7 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) { } if (idx<0 || idx>=pBlock->numOfParams) { - tscError("param %d: out of range", idx); + tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx); abort(); } diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index 48c668634d..9b204c2f4a 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -6,7 +6,13 @@ #include #include "taos.h" #include +#include +#include +typedef struct { + TAOS *taos; + int idx; +}T_par; void taosMsleep(int mseconds); @@ -585,7 +591,7 @@ int stmt_funcb2(TAOS_STMT *stmt) { for (int i = 0; i < 30000; i+=10) { params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; params[i+0].buffer_length = sizeof(int64_t); - params[i+0].buffer = &v.ts[60*i/10]; + params[i+0].buffer = &v.ts[18000*i/10]; params[i+0].length = NULL; params[i+0].is_null = no_null; params[i+0].num = 18000; @@ -1008,6 +1014,232 @@ int stmt_funcb4(TAOS_STMT *stmt) { } + + +//1table 18000 reocrds +int stmt_funcb5(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int8_t b[18000]; + int8_t v1[18000]; + int16_t v2[18000]; + int32_t v4[18000]; + int64_t v8[18000]; + float f4[18000]; + double f8[18000]; + char bin[18000][40]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 900000 * 60); + + uintptr_t *lb = malloc(18000 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 3000*10); + int* is_null = malloc(sizeof(int) * 18000); + int* no_null = malloc(sizeof(int) * 18000); + + for (int i = 0; i < 18000; ++i) { + lb[i] = 40; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v.b[i] = (int8_t)(i % 2); + v.v1[i] = (int8_t)((i+1) % 2); + v.v2[i] = (int16_t)i; + v.v4[i] = (int32_t)(i+1); + v.v8[i] = (int64_t)(i+2); + v.f4[i] = (float)(i+3); + v.f8[i] = (double)(i+4); + memset(v.bin[i], '0'+i%10, 40); + } + + for (int i = 0; i < 30000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[18000*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 18000; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = 18000; + + params[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[i+2].buffer_length = sizeof(int8_t); + params[i+2].buffer = v.v1; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = 18000; + + params[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[i+3].buffer_length = sizeof(int16_t); + params[i+3].buffer = v.v2; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = 18000; + + params[i+4].buffer_type = TSDB_DATA_TYPE_INT; + params[i+4].buffer_length = sizeof(int32_t); + params[i+4].buffer = v.v4; + params[i+4].length = NULL; + params[i+4].is_null = is_null; + params[i+4].num = 18000; + + params[i+5].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[i+5].buffer_length = sizeof(int64_t); + params[i+5].buffer = v.v8; + params[i+5].length = NULL; + params[i+5].is_null = is_null; + params[i+5].num = 18000; + + params[i+6].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+6].buffer_length = sizeof(float); + params[i+6].buffer = v.f4; + params[i+6].length = NULL; + params[i+6].is_null = is_null; + params[i+6].num = 18000; + + params[i+7].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[i+7].buffer_length = sizeof(double); + params[i+7].buffer = v.f8; + params[i+7].length = NULL; + params[i+7].is_null = is_null; + params[i+7].num = 18000; + + params[i+8].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+8].buffer_length = 40; + params[i+8].buffer = v.bin; + params[i+8].length = lb; + params[i+8].is_null = is_null; + params[i+8].num = 18000; + + params[i+9].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+9].buffer_length = 40; + params[i+9].buffer = v.bin; + params[i+9].length = lb; + params[i+9].is_null = is_null; + params[i+9].num = 18000; + + } + + int64_t tts = 1591060628000; + for (int i = 0; i < 54000000; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into m0 values(?,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 10; l++) { + for (int zz = 0; zz < 1; zz++) { + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + ++id; + + } + + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + +//1table 200000 reocrds +int stmt_funcb_ssz1(TAOS_STMT *stmt) { + struct { + int64_t *ts; + int b[30000]; + } v = {0}; + + v.ts = malloc(sizeof(int64_t) * 30000 * 3000); + + uintptr_t *lb = malloc(30000 * sizeof(uintptr_t)); + + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 3000*10); + int* no_null = malloc(sizeof(int) * 200000); + + for (int i = 0; i < 30000; ++i) { + lb[i] = 40; + no_null[i] = 0; + v.b[i] = (int8_t)(i % 2); + } + + for (int i = 0; i < 30000; i+=10) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v.ts[30000*i/10]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = 30000; + + params[i+1].buffer_type = TSDB_DATA_TYPE_INT; + params[i+1].buffer_length = sizeof(int); + params[i+1].buffer = v.b; + params[i+1].length = NULL; + params[i+1].is_null = no_null; + params[i+1].num = 30000; + } + + int64_t tts = 0; + for (int64_t i = 0; i < 90000000LL; ++i) { + v.ts[i] = tts + i; + } + + unsigned long long starttime = getCurrentTime(); + + char *sql = "insert into ? values(?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + } + + int id = 0; + for (int l = 0; l < 10; l++) { + for (int zz = 0; zz < 300; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + } + + taos_stmt_bind_param_batch(stmt, params + id * 10); + taos_stmt_add_batch(stmt); + + if (taos_stmt_execute(stmt) != 0) { + printf("failed to execute insert statement.\n"); + exit(1); + } + ++id; + + } + + } + + unsigned long long endtime = getCurrentTime(); + printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + + return 0; +} + + //one table 60 records one time int stmt_funcb_s1(TAOS_STMT *stmt) { struct { @@ -1796,7 +2028,7 @@ int sql_s_perf1(TAOS *taos) { } -void prepare(TAOS *taos) { +void prepare(TAOS *taos, int bigsize) { TAOS_RES *result; int code; @@ -1818,7 +2050,11 @@ void prepare(TAOS *taos) { // create table for (int i = 0 ; i < 300; i++) { char buf[1024]; - sprintf(buf, "create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), bin2 binary(40))", i) ; + if (bigsize) { + sprintf(buf, "create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), bin2 binary(40))", i) ; + } else { + sprintf(buf, "create table m%d (ts timestamp, b int)", i) ; + } result = taos_query(taos, buf); code = taos_errno(result); if (code != 0) { @@ -1832,12 +2068,68 @@ void prepare(TAOS *taos) { } -void runcase(TAOS *taos) { + +void preparem(TAOS *taos, int bigsize, int idx) { + TAOS_RES *result; + int code; + char dbname[32],sql[255]; + + sprintf(dbname, "demo%d", idx); + sprintf(sql, "drop database %s", dbname); + + + result = taos_query(taos, sql); + taos_free_result(result); + + sprintf(sql, "create database %s", dbname); + result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + printf("failed to create database, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + taos_free_result(result); + + sprintf(sql, "use %s", dbname); + result = taos_query(taos, sql); + taos_free_result(result); + + // create table + for (int i = 0 ; i < 300; i++) { + char buf[1024]; + if (bigsize) { + sprintf(buf, "create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), bin2 binary(40))", i) ; + } else { + sprintf(buf, "create table m%d (ts timestamp, b int)", i) ; + } + result = taos_query(taos, buf); + code = taos_errno(result); + if (code != 0) { + printf("failed to create table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + exit(1); + } + taos_free_result(result); + } + +} + + + +//void runcase(TAOS *taos, int idx) { +void* runcase(void *par) { + T_par* tpar = (T_par *)par; + TAOS *taos = tpar->taos; + int idx = tpar->idx; + TAOS_STMT *stmt; + (void)idx; + #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -1861,7 +2153,7 @@ void runcase(TAOS *taos) { #endif #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -1885,7 +2177,7 @@ void runcase(TAOS *taos) { #endif #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -1910,7 +2202,7 @@ void runcase(TAOS *taos) { #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -1928,14 +2220,14 @@ void runcase(TAOS *taos) { #endif -#if 1 - prepare(taos); +#if 1 + prepare(taos, 1); stmt = taos_stmt_init(taos); - printf("1t+9000r+bm start\n"); + printf("1t+18000r+bm start\n"); stmt_funcb2(stmt); - printf("1t+9000r+bm end\n"); + printf("1t+18000r+bm end\n"); printf("check result start\n"); check_result(taos, "m0", 0, 180000); check_result(taos, "m1", 0, 180000); @@ -1945,11 +2237,10 @@ void runcase(TAOS *taos) { printf("check result end\n"); taos_stmt_close(stmt); - //stmt_perf1(stmt); #endif #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -1969,7 +2260,7 @@ void runcase(TAOS *taos) { #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -1986,9 +2277,24 @@ void runcase(TAOS *taos) { taos_stmt_close(stmt); #endif +#if 1 + prepare(taos, 1); + + stmt = taos_stmt_init(taos); + + printf("1t+18000r+nodyntable+bm start\n"); + stmt_funcb5(stmt); + printf("1t+18000r+nodyntable+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 180000); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -2006,7 +2312,7 @@ void runcase(TAOS *taos) { #endif #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -2025,7 +2331,7 @@ void runcase(TAOS *taos) { #endif #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -2050,7 +2356,7 @@ void runcase(TAOS *taos) { #if 1 - prepare(taos); + prepare(taos, 1); stmt = taos_stmt_init(taos); @@ -2070,7 +2376,7 @@ void runcase(TAOS *taos) { #if 1 - prepare(taos); + prepare(taos, 1); (void)stmt; printf("120t+60r+sql start\n"); @@ -2086,7 +2392,7 @@ void runcase(TAOS *taos) { #endif #if 1 - prepare(taos); + prepare(taos, 1); (void)stmt; printf("1t+60r+sql start\n"); @@ -2102,12 +2408,32 @@ void runcase(TAOS *taos) { #endif +#if 1 + preparem(taos, 0, idx); + + stmt = taos_stmt_init(taos); + + printf("1t+30000r+bm start\n"); + stmt_funcb_ssz1(stmt); + printf("1t+30000r+bm end\n"); + printf("check result start\n"); + check_result(taos, "m0", 0, 300000); + check_result(taos, "m1", 0, 300000); + check_result(taos, "m111", 0, 300000); + check_result(taos, "m223", 0, 300000); + check_result(taos, "m299", 0, 300000); + printf("check result end\n"); + taos_stmt_close(stmt); + +#endif + + return NULL; } int main(int argc, char *argv[]) { - TAOS *taos; + TAOS *taos[4]; // connect to server if (argc < 2) { @@ -2115,17 +2441,54 @@ int main(int argc, char *argv[]) return 0; } - taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + taos[0] = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); exit(1); } - prepare(taos); + taos[1] = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } - runcase(taos); + taos[2] = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + taos[3] = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + pthread_t *pThreadList = (pthread_t *) calloc(sizeof(pthread_t), 4); + + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + T_par par[4]; + + par[0].taos = taos[0]; + par[0].idx = 0; + par[1].taos = taos[1]; + par[1].idx = 1; + par[2].taos = taos[2]; + par[2].idx = 2; + par[3].taos = taos[3]; + par[3].idx = 3; + pthread_create(&(pThreadList[0]), &thattr, runcase, (void *)&par[0]); + //pthread_create(&(pThreadList[1]), &thattr, runcase, (void *)&par[1]); + //pthread_create(&(pThreadList[2]), &thattr, runcase, (void *)&par[2]); + //pthread_create(&(pThreadList[3]), &thattr, runcase, (void *)&par[3]); + + while(1) { + sleep(1); + } return 0; } From 511cdbeae0996869a899df8061bfcbd8b5b530f5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 9 May 2021 16:48:51 +0800 Subject: [PATCH 101/140] [td-4038] --- .../jni/com_taosdata_jdbc_TSDBJNIConnector.h | 4 +- src/client/src/TSDBJNIConnector.c | 50 ++++- src/client/src/tscPrepare.c | 5 +- src/client/src/tscUtil.c | 2 +- .../com/taosdata/jdbc/TSDBConnection.java | 14 +- .../com/taosdata/jdbc/TSDBJNIConnector.java | 44 ++--- .../taosdata/jdbc/TSDBPreparedStatement.java | 173 ++++++++++++------ .../java/com/taosdata/jdbc/TSDBStatement.java | 66 +++---- src/inc/taos.h | 9 +- 9 files changed, 223 insertions(+), 144 deletions(-) diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index bcd0f63818..07fd46f859 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -204,10 +204,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: bindColDataImp - * Signature: (J[BIIIJ)J + * Signature: (J[B[BIIIIJ)J */ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp -(JNIEnv *, jobject, jlong, jbyteArray, jint, jint, jint, jlong); +(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong); /* * Class: com_taosdata_jdbc_TSDBJNIConnector diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index c28766b860..8ffd021e51 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -746,14 +746,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI return JNI_TDENGINE_ERROR; } - jniDebug("jobj:%p, conn:%p, set stmt bind table name", jobj, tsconn); + jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name); (*env)->ReleaseStringUTFChars(env, jname, name); return JNI_SUCCESS; } JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt, - jbyteArray data, jint dataType, jint numOfRows, jint colIndex, jlong con) { + jbyteArray data, jbyteArray length, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); @@ -766,15 +766,50 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J return JNI_SQL_NULL; } -#if 0 - TAOS_BIND* b = malloc(20); - b.num= jrows; - int32_t code = taos_stmt_bind_param_batch(stmt, b, colInex); + // todo refactor + jsize len = (*env)->GetArrayLength(env, data); + char *colBuf = (char *)calloc(1, sizeof(char) * len); + (*env)->GetByteArrayRegion(env, data, 0, len, (jbyte *)colBuf); + if ((*env)->ExceptionCheck(env)) { + // todo handle error + } + + len = (*env)->GetArrayLength(env, length); + char *lengthArray = (char*) calloc(1, sizeof(char) * len); + (*env)->GetByteArrayRegion(env, length, 0, len, (jbyte*) lengthArray); + if ((*env)->ExceptionCheck(env)) { + } + + // bind multi-rows with only one invoke. + TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND)); + + b->num = numOfRows; + b->buffer_type = dataType; // todo check data type + b->buffer_length = tDataTypes[dataType].bytes; + b->is_null = calloc(numOfRows, sizeof(int32_t)); + b->buffer = colBuf; + b->length = (uintptr_t*)lengthArray; + + // set the length and is_null array + switch(dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: { + int32_t bytes = tDataTypes[dataType].bytes; + for(int32_t i = 0; i < numOfRows; ++i) { + b->length[i] = bytes; + b->is_null[i] = isNull(colBuf + bytes * i, dataType); + } + } + } + + int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } -#endif return JNI_SUCCESS; } @@ -792,6 +827,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J return JNI_SQL_NULL; } + taos_stmt_add_batch(pStmt); int32_t code = taos_stmt_execute(pStmt); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 9af7b11615..d251867eaa 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1306,10 +1306,9 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { } pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name); - pStmt->mtb.nameSet = true; - tscDebug("sqlstr set to %s", pSql->sqlstr); + tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); pSql->cmd.parseFinished = 0; pSql->cmd.numOfParams = 0; @@ -1350,7 +1349,7 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid)); - tscDebug("table:%s is prepared, uid:%" PRIu64, name, pStmt->mtb.currentUid); + tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid); } return code; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 7a6842869a..5f14236ded 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1319,7 +1319,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { pBlocks->numOfRows = 0; }else { - tscDebug("table %s data block is empty", pOneTableBlock->tableName.tname); + tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname); } p = taosHashIterate(pCmd->pTableBlockHashList, p); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index c8ab9fb15a..02fee74eb5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -49,7 +49,7 @@ public class TSDBConnection extends AbstractConnection { this.databaseMetaData.setConnection(this); } - public TSDBJNIConnector getConnection() { + public TSDBJNIConnector getConnector() { return this.connector; } @@ -58,7 +58,7 @@ public class TSDBConnection extends AbstractConnection { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } - return new TSDBStatement(this, this.connector); + return new TSDBStatement(this); } public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException { @@ -74,14 +74,18 @@ public class TSDBConnection extends AbstractConnection { } public PreparedStatement prepareStatement(String sql) throws SQLException { - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - return new TSDBPreparedStatement(this, this.connector, sql); + } + + return new TSDBPreparedStatement(this, sql); } public void close() throws SQLException { - if (isClosed) + if (isClosed) { return; + } + this.connector.closeConnection(); this.isClosed = true; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 05e28578f1..62cd441b3a 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -30,10 +30,13 @@ public class TSDBJNIConnector { private static volatile Boolean isInitialized = false; private TaosInfo taosInfo = TaosInfo.getInstance(); + // Connection pointer used in C private long taos = TSDBConstants.JNI_NULL_POINTER; + // result set status in current connection private boolean isResultsetClosed = true; + private int affectedRows = -1; static { @@ -163,37 +166,14 @@ public class TSDBJNIConnector { private native long isUpdateQueryImp(long connection, long pSql); /** - * Free resultset operation from C to release resultset pointer by JNI + * Free result set operation from C to release result set pointer by JNI */ public int freeResultSet(long pSql) { - int res = TSDBConstants.JNI_SUCCESS; -// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { -// throw new RuntimeException("Invalid result set pointer"); -// } - -// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - res = this.freeResultSetImp(this.taos, pSql); -// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; -// } - + int res = this.freeResultSetImp(this.taos, pSql); isResultsetClosed = true; return res; } - /** - * Close the open result set which is associated to the current connection. If the result set is already - * closed, return 0 for success. - */ -// public int freeResultSet() { -// int resCode = TSDBConstants.JNI_SUCCESS; -// if (!isResultsetClosed) { -// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); -// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; -// isResultsetClosed = true; -// } -// return resCode; -// } - private native int freeResultSetImp(long connection, long result); /** @@ -240,6 +220,7 @@ public class TSDBJNIConnector { */ public void closeConnection() throws SQLException { int code = this.closeConnectionImp(this.taos); + if (code < 0) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); } else if (code == 0) { @@ -247,6 +228,7 @@ public class TSDBJNIConnector { } else { throw new SQLException("Undefined error code returned by TDengine when closing a connection"); } + // invoke closeConnectionImpl only here taosInfo.connect_close_increment(); } @@ -283,7 +265,7 @@ public class TSDBJNIConnector { private native void unsubscribeImp(long subscription, boolean isKeep); /** - * Validate if a create table sql statement is correct without actually creating that table + * Validate if a create table SQL statement is correct without actually creating that table */ public boolean validateCreateTableSql(String sql) { int res = validateCreateTableSqlImp(taos, sql.getBytes()); @@ -295,7 +277,7 @@ public class TSDBJNIConnector { public long prepareStmt(String sql) throws SQLException { Long stmt = 0L; try { - stmt = prepareStmtImp(sql, this.taos); + stmt = prepareStmtImp(sql.getBytes(), this.taos); } catch (Exception e) { e.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING); @@ -316,7 +298,7 @@ public class TSDBJNIConnector { return stmt; } - private native long prepareStmtImp(String sql, long con); + private native long prepareStmtImp(byte[] sql, long con); public int setBindTableName(long stmt, String tableName) { return setBindTableNameImp(stmt, tableName, this.taos); @@ -324,11 +306,11 @@ public class TSDBJNIConnector { private native int setBindTableNameImp(long stmt, String name, long conn); - public int bindColumnDataArray(long stmt, byte[] data, int type, int numOfRows, int columnIndex) { - return bindColDataImp(stmt, data, type, numOfRows, columnIndex, this.taos); + public int bindColumnDataArray(long stmt, ByteBuffer colList, ByteBuffer lengthList, int type, int bytes, int numOfRows,int columnIndex) { + return bindColDataImp(stmt, colList.array(), lengthList.array(), type, bytes, numOfRows, columnIndex, this.taos); } - private native int bindColDataImp(long stmt, byte[] data, int type, int numOfRows, int columnIndex, long conn); + private native int bindColDataImp(long stmt, byte[] data, byte[] length, int type, int bytes, int numOfRows, int columnIndex, long conn); public int executeBatch(long stmt) { return executeBatchImp(stmt, this.taos); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index be6c2361a1..cc781d1613 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -22,8 +22,7 @@ import java.io.Reader; import java.math.BigDecimal; import java.net.URL; import java.nio.ByteBuffer; -import java.nio.DoubleBuffer; -import java.nio.IntBuffer; +import java.nio.ByteOrder; import java.sql.*; import java.util.ArrayList; import java.util.Calendar; @@ -45,12 +44,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private int type; private String tableName; - private long nativeStmtPtr = 0; + private long nativeStmtHandle = 0; private volatile TSDBParameterMetaData parameterMetaData; - TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connector, String sql) { - super(connection, connector); + TSDBPreparedStatement(TSDBConnection connection, String sql) { + super(connection); init(sql); int parameterCnt = 0; @@ -64,8 +63,9 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.isPrepared = true; } - this.colData = new ArrayList(parameterCnt); - this.colData.addAll(Collections.nCopies(parameterCnt, null)); + // the table name is also a parameter, so ignore it. + this.colData = new ArrayList(parameterCnt - 1); + this.colData.addAll(Collections.nCopies(parameterCnt - 1, null)); } private void init(String sql) { @@ -543,12 +543,15 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private int type; private boolean typeIsSet; - public void ClumnInfo() { + public ColumnInfo() { this.typeIsSet = false; } - public void setType(int type) { - Assert.check(!this.typeIsSet); + public void setType(int type) throws SQLException { + if (this.isTypeSet()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type has been set"); + } + this.typeIsSet = true; this.type = type; } @@ -562,106 +565,160 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.tableName = name; } - @SuppressWarnings("unchecked") - public void setInt(int columnIndex, ArrayList list) throws SQLException { + public void setValueImpl(int columnIndex, ArrayList list, int type) throws SQLException { ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex); if (col == null) { ColumnInfo p = new ColumnInfo(); - p.setType(TSDBConstants.TSDB_DATA_TYPE_INT); - p.data = (ArrayList) list.clone(); + p.setType(type); + p.data = (ArrayList) list.clone(); this.colData.set(columnIndex, p); } else { - if (col.type != TSDBConstants.TSDB_DATA_TYPE_INT) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (col.type != type) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch"); } col.data.addAll(list); } } - @SuppressWarnings("unchecked") - public void setFloat(int columnIndex, ArrayList list) throws SQLException { - ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex); - if (col == null) { - ColumnInfo p = new ColumnInfo(); - p.setType(TSDBConstants.TSDB_DATA_TYPE_INT); - p.data = (ArrayList) list.clone(); - this.colData.set(columnIndex, p); - } else { - if (col.type != TSDBConstants.TSDB_DATA_TYPE_INT) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - col.data.addAll(list); - } + public void setInt(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT); } - public void addColumnDataBatch() { + public void setFloat(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT); + } + + public void setTimestamp(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP); + } + + public void setLong(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT); + } + + public void setDouble(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE); + } + + public void setBoolean(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL); + } + + public void setByte(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT); + } + + public void setShort(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT); + } + + public void setString(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY); + } + + public void setNString(int columnIndex, ArrayList list) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR); + } + + public void columnDataAddBatch() { // do nothing } - public void columnDataExecuteBatch() { - int size = this.colData.size(); - ColumnInfo col = (ColumnInfo) this.colData.get(0); - int rows = col.data.size(); + public void columnDataExecuteBatch() throws SQLException { + int numOfCols = this.colData.size(); + int rows = ((ColumnInfo) this.colData.get(0)).data.size(); // pass the data block to native code - TSDBJNIConnector conn = null; + TSDBJNIConnector connector = null; try { - conn = (TSDBJNIConnector) this.getConnection(); - this.nativeStmtPtr = conn.prepareStmt(rawSql); - conn.setBindTableName(this.nativeStmtPtr, this.tableName); + connector = ((TSDBConnection) this.getConnection()).getConnector(); + this.nativeStmtHandle = connector.prepareStmt(rawSql); + + // table name is not set yet, abort + if (this.tableName == null) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet"); + } + connector.setBindTableName(this.nativeStmtHandle, this.tableName); } catch (SQLException e) { e.printStackTrace(); } - for (int i = 0; i < size; ++i) { + int bytes = 0; + + for (int i = 0; i < numOfCols; ++i) { ColumnInfo col1 = this.colData.get(i); - Assert.check(col.isTypeSet()); - ByteBuffer ib = ByteBuffer.allocate(rows); - + if (!col1.isTypeSet()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind"); + } + + if (rows != col1.data.size()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical"); + } + + ByteBuffer bbuf = null; + + ByteBuffer lengthBuf = ByteBuffer.allocate(rows * Integer.BYTES); + lengthBuf.order(ByteOrder.LITTLE_ENDIAN); + switch (col1.type) { case TSDBConstants.TSDB_DATA_TYPE_INT: { + bbuf = ByteBuffer.allocate(rows * Integer.BYTES); + bbuf.order(ByteOrder.LITTLE_ENDIAN); + for (int j = 0; j < rows; ++j) { - Integer val = (Integer) col.data.get(j); + Integer val = (Integer) col1.data.get(j); if (val == null) { - ib.putInt(Integer.MIN_VALUE); + bbuf.putInt(j * Integer.BYTES, Integer.MIN_VALUE); } else { - ib.putInt((int) col.data.get(j)); + bbuf.putInt(j * Integer.BYTES, val); } + + lengthBuf.putInt(j * Integer.BYTES, Integer.BYTES); } + bytes = Integer.BYTES; break; } case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + bbuf = ByteBuffer.allocate(rows * Long.BYTES); + bbuf.order(ByteOrder.LITTLE_ENDIAN); + for (int j = 0; j < rows; ++j) { - ib.putLong((long) col.data.get(j)); + Long val = (Long) col1.data.get(j); + if (val == null) { + bbuf.putLong(j * Long.BYTES, Long.MIN_VALUE); + } else { + bbuf.putLong(j * Long.BYTES, val); + } + lengthBuf.putInt(j * Integer.BYTES, Long.BYTES); } + + bytes = Long.BYTES; break; } }; - conn.bindColumnDataArray(this.nativeStmtPtr, ib.array(), col1.type, rows, i); + connector.bindColumnDataArray(this.nativeStmtHandle, bbuf, lengthBuf, col1.type, bytes, rows, i); } - conn.executeBatch(this.nativeStmtPtr); + connector.executeBatch(this.nativeStmtHandle); } - public void columnDataClearBatchClear() { + public void columnDataClearBatch() { // TODO clear data in this.colData } - public void close() { - TSDBJNIConnector conn = null; + public void columnDataCloseBatch() { + TSDBJNIConnector connector = null; try { - conn = (TSDBJNIConnector) this.getConnection(); - this.nativeStmtPtr = conn.prepareStmt(rawSql); - conn.setBindTableName(this.nativeStmtPtr, this.tableName); + connector = ((TSDBConnection) this.getConnection()).getConnector(); + connector.closeBatch(this.nativeStmtHandle); + this.nativeStmtHandle = 0L; + this.tableName = null; } catch (SQLException e) { e.printStackTrace(); } - - conn.closeBatch(this.nativeStmtPtr); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index fb20a621b0..d8ba67576d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -19,8 +19,6 @@ import java.sql.ResultSet; import java.sql.SQLException; public class TSDBStatement extends AbstractStatement { - - private TSDBJNIConnector connector; /** * Status of current statement */ @@ -29,29 +27,26 @@ public class TSDBStatement extends AbstractStatement { private TSDBConnection connection; private TSDBResultSet resultSet; - public void setConnection(TSDBConnection connection) { + TSDBStatement(TSDBConnection connection) { this.connection = connection; } - TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) { - this.connection = connection; - this.connector = connector; - } - public ResultSet executeQuery(String sql) throws SQLException { // check if closed - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + //TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了 // execute query - long pSql = this.connector.executeQuery(sql); + long pSql = this.connection.getConnector().executeQuery(sql); // if pSql is create/insert/update/delete/alter SQL - if (this.connector.isUpdateQuery(pSql)) { - this.connector.freeResultSet(pSql); + if (this.connection.getConnector().isUpdateQuery(pSql)) { + this.connection.getConnector().freeResultSet(pSql); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY); } - TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql); + TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql); res.setBatchFetch(this.connection.getBatchFetch()); return res; } @@ -60,14 +55,14 @@ public class TSDBStatement extends AbstractStatement { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - long pSql = this.connector.executeQuery(sql); + long pSql = this.connection.getConnector().executeQuery(sql); // if pSql is create/insert/update/delete/alter SQL - if (!this.connector.isUpdateQuery(pSql)) { - this.connector.freeResultSet(pSql); + if (!this.connection.getConnector().isUpdateQuery(pSql)) { + this.connection.getConnector().freeResultSet(pSql); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE); } - int affectedRows = this.connector.getAffectedRows(pSql); - this.connector.freeResultSet(pSql); + int affectedRows = this.connection.getConnector().getAffectedRows(pSql); + this.connection.getConnector().freeResultSet(pSql); return affectedRows; } @@ -81,30 +76,29 @@ public class TSDBStatement extends AbstractStatement { public boolean execute(String sql) throws SQLException { // check if closed - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + // execute query - long pSql = this.connector.executeQuery(sql); + long pSql = this.connection.getConnector().executeQuery(sql); // if pSql is create/insert/update/delete/alter SQL - if (this.connector.isUpdateQuery(pSql)) { - this.affectedRows = this.connector.getAffectedRows(pSql); - this.connector.freeResultSet(pSql); + if (this.connection.getConnector().isUpdateQuery(pSql)) { + this.affectedRows = this.connection.getConnector().getAffectedRows(pSql); + this.connection.getConnector().freeResultSet(pSql); return false; } - this.resultSet = new TSDBResultSet(this, this.connector, pSql); + this.resultSet = new TSDBResultSet(this, this.connection.getConnector(), pSql); this.resultSet.setBatchFetch(this.connection.getBatchFetch()); return true; } public ResultSet getResultSet() throws SQLException { - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); -// long resultSetPointer = connector.getResultSet(); -// TSDBResultSet resSet = null; -// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { -// resSet = new TSDBResultSet(connector, resultSetPointer); -// } + } + return this.resultSet; } @@ -115,12 +109,20 @@ public class TSDBStatement extends AbstractStatement { } public Connection getConnection() throws SQLException { - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (this.connector == null) + } + + if (this.connection.getConnector() == null) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + } + return this.connection; } + + public void setConnection(TSDBConnection connection) { + this.connection = connection; + } public boolean isClosed() throws SQLException { return isClosed; diff --git a/src/inc/taos.h b/src/inc/taos.h index 788502b45a..bb3cfa2744 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -102,11 +102,10 @@ typedef struct TAOS_BIND { typedef struct TAOS_MULTI_BIND { int buffer_type; - void * buffer; - uintptr_t buffer_length; // unused - uintptr_t *length; - int * is_null; - + void *buffer; + uintptr_t buffer_length; + uintptr_t *length; + int *is_null; int num; } TAOS_MULTI_BIND; From 6aba3f5eb1b6cd49a77ec84d204bee3be6f16785 Mon Sep 17 00:00:00 2001 From: root Date: Sun, 9 May 2021 09:00:35 +0000 Subject: [PATCH 102/140] fix bug --- .../taosdata/jdbc/TSDBPreparedStatement.java | 1 - .../com/taosdata/jdbc/utils/NullType.java | 55 +++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) create mode 100755 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index cc781d1613..e8a75cb10b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -14,7 +14,6 @@ *****************************************************************************/ package com.taosdata.jdbc; -import com.sun.tools.javac.util.Assert; import com.taosdata.jdbc.utils.Utils; import java.io.InputStream; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java new file mode 100755 index 0000000000..3aceee79c3 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java @@ -0,0 +1,55 @@ +package com.taosdata.jdbc.utils; + +public class NullType { + private static final byte NULL_BOOL_VAL = 0x2; + private static final String NULL_STR = "null"; + + public String toString() { + return NullType.NULL_STR; + } + + public static boolean isBooleanNull(byte val) { + return val == NullType.NULL_BOOL_VAL; + } + + public static boolean isTinyIntNull(byte val) { + return val == Byte.MIN_VALUE; + } + + public static boolean isSmallIntNull(short val) { + return val == Short.MIN_VALUE; + } + + public static boolean isIntNull(int val) { + return val == Integer.MIN_VALUE; + } + + public static boolean isBigIntNull(long val) { + return val == Long.MIN_VALUE; + } + + public static boolean isFloatNull(float val) { + return Float.isNaN(val); + } + + public static boolean isDoubleNull(double val) { + return Double.isNaN(val); + } + + public static boolean isBinaryNull(byte[] val, int length) { + if (length != Byte.BYTES) { + return false; + } + + return val[0] == 0xFF; + } + + public static boolean isNcharNull(byte[] val, int length) { + if (length != Integer.BYTES) { + return false; + } + + return (val[0] & val[1] & val[2] & val[3]) == 0xFF; + } + +} From 0b91c2cfc6c5741338db5a2144b2d838d02d564f Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sun, 9 May 2021 17:23:10 +0800 Subject: [PATCH 103/140] fix bug --- src/client/src/tscPrepare.c | 6 +-- tests/script/api/batchprepare.c | 71 +++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index d33bdbb588..77ab9e84c3 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1374,9 +1374,6 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { int taos_stmt_close(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; if (!pStmt->isInsert) { - taosHashCleanup(pStmt->mtb.pTableHash); - taosHashCleanup(pStmt->mtb.pTableBlockHashList); - SNormalStmt* normal = &pStmt->normal; if (normal->params != NULL) { for (uint16_t i = 0; i < normal->numParams; i++) { @@ -1386,6 +1383,9 @@ int taos_stmt_close(TAOS_STMT* stmt) { } free(normal->parts); free(normal->sql); + } else { + taosHashCleanup(pStmt->mtb.pTableHash); + taosHashCleanup(pStmt->mtb.pTableBlockHashList); } taos_free_result(pStmt->pSql); diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index 9b204c2f4a..5303b53329 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -548,6 +548,12 @@ int stmt_funcb1(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -700,6 +706,12 @@ int stmt_funcb2(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -857,6 +869,12 @@ int stmt_funcb3(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1010,6 +1028,12 @@ int stmt_funcb4(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1157,6 +1181,12 @@ int stmt_funcb5(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1236,6 +1266,11 @@ int stmt_funcb_ssz1(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(no_null); + return 0; } @@ -1388,6 +1423,12 @@ int stmt_funcb_s1(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1544,6 +1585,12 @@ int stmt_funcb_sc1(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1698,6 +1745,12 @@ int stmt_funcb_sc2(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1849,6 +1902,12 @@ int stmt_funcb_sc3(TAOS_STMT *stmt) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%u useconds\n", 3000*300*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*300*60)); + free(v.ts); + free(lb); + free(params); + free(is_null); + free(no_null); + return 0; } @@ -1934,6 +1993,10 @@ int sql_perf1(TAOS *taos) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%.1f useconds\n", 3000*120*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*120*60)); + for (int i = 0; i < 3000; i++) { + free(sql[i]); + } + return 0; } @@ -1983,6 +2046,10 @@ int sql_perf_s1(TAOS *taos) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%.1f useconds\n", 3000*120*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*120*60)); + for (int i = 0; i < 360000; i++) { + free(sql[i]); + } + return 0; } @@ -2024,6 +2091,10 @@ int sql_s_perf1(TAOS *taos) { unsigned long long endtime = getCurrentTime(); printf("insert total %d records, used %u seconds, avg:%.1f useconds\n", 3000*120*60, (endtime-starttime)/1000000UL, (endtime-starttime)/(3000*120*60)); + for (int i = 0; i < 3000; i++) { + free(sql[i]); + } + return 0; } From 7e2345e371b7e1e2d428001c539d42f8a4f2c185 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sun, 9 May 2021 17:31:18 +0800 Subject: [PATCH 104/140] fix --- src/client/src/tscPrepare.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 77ab9e84c3..bc333625ef 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -810,7 +810,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid)); if (t1 == NULL) { - tscError("no table data block in hash list, uid:%" PRId64 , pStmt->mtb.currentUid); + tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid); return TSDB_CODE_TSC_APP_ERROR; } From fa3f970748bf3794781477cbb4ff12ea021320b7 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 09:22:10 +0800 Subject: [PATCH 105/140] fix mem leak --- src/client/src/tscPrepare.c | 7 +++++-- tests/script/api/batchprepare.c | 2 ++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index bc333625ef..c8a3b963f0 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1384,8 +1384,11 @@ int taos_stmt_close(TAOS_STMT* stmt) { free(normal->parts); free(normal->sql); } else { - taosHashCleanup(pStmt->mtb.pTableHash); - taosHashCleanup(pStmt->mtb.pTableBlockHashList); + if (pStmt->multiTbInsert) { + taosHashCleanup(pStmt->mtb.pTableHash); + pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true); + taosHashCleanup(pStmt->pSql->cmd.pTableBlockHashList); + } } taos_free_result(pStmt->pSql); diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index 5303b53329..f3ea95abfa 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -2050,6 +2050,8 @@ int sql_perf_s1(TAOS *taos) { free(sql[i]); } + free(sql); + return 0; } From 2e08de2b45bdf99c320176c64f29a11452ab1647 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 09:26:19 +0800 Subject: [PATCH 106/140] fix crash issue --- src/client/src/tscPrepare.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index c8a3b963f0..6307643c95 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1388,6 +1388,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { taosHashCleanup(pStmt->mtb.pTableHash); pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true); taosHashCleanup(pStmt->pSql->cmd.pTableBlockHashList); + pStmt->pSql->cmd.pTableBlockHashList = NULL; } } From 29307ce31ebcd99e6e40b00b46393db1b70eb697 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 May 2021 11:07:29 +0800 Subject: [PATCH 107/140] [td-4038]: support column data batch bind. --- .../jni/com_taosdata_jdbc_TSDBJNIConnector.h | 4 +- src/client/src/TSDBJNIConnector.c | 33 ++-- src/client/src/tscPrepare.c | 13 +- .../com/taosdata/jdbc/TSDBJNIConnector.java | 6 +- .../taosdata/jdbc/TSDBPreparedStatement.java | 166 +++++++++++++----- .../taosdata/jdbc/TSDBResultSetBlockData.java | 4 +- .../com/taosdata/jdbc/utils/NullType.java | 50 +++++- src/inc/taos.h | 4 +- 8 files changed, 199 insertions(+), 81 deletions(-) diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 07fd46f859..04bccc1a4a 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -204,10 +204,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: bindColDataImp - * Signature: (J[B[BIIIIJ)J + * Signature: (J[B[B[BIIIIJ)J */ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp -(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong); +(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong); /* * Class: com_taosdata_jdbc_TSDBJNIConnector diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 8ffd021e51..4842bb2ed1 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -753,7 +753,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI } JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt, - jbyteArray data, jbyteArray length, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) { + jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); @@ -767,16 +767,22 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J } // todo refactor - jsize len = (*env)->GetArrayLength(env, data); - char *colBuf = (char *)calloc(1, sizeof(char) * len); - (*env)->GetByteArrayRegion(env, data, 0, len, (jbyte *)colBuf); + jsize len = (*env)->GetArrayLength(env, colDataList); + char *colBuf = (char *)calloc(1, len); + (*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf); if ((*env)->ExceptionCheck(env)) { // todo handle error } - len = (*env)->GetArrayLength(env, length); - char *lengthArray = (char*) calloc(1, sizeof(char) * len); - (*env)->GetByteArrayRegion(env, length, 0, len, (jbyte*) lengthArray); + len = (*env)->GetArrayLength(env, lengthList); + char *lengthArray = (char*) calloc(1, len); + (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray); + if ((*env)->ExceptionCheck(env)) { + } + + len = (*env)->GetArrayLength(env, nullList); + char *nullArray = (char*) calloc(1, len); + (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray); if ((*env)->ExceptionCheck(env)) { } @@ -785,10 +791,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J b->num = numOfRows; b->buffer_type = dataType; // todo check data type - b->buffer_length = tDataTypes[dataType].bytes; - b->is_null = calloc(numOfRows, sizeof(int32_t)); + b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes; + b->is_null = nullArray; b->buffer = colBuf; - b->length = (uintptr_t*)lengthArray; + b->length = (int32_t*)lengthArray; // set the length and is_null array switch(dataType) { @@ -800,8 +806,13 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J int32_t bytes = tDataTypes[dataType].bytes; for(int32_t i = 0; i < numOfRows; ++i) { b->length[i] = bytes; - b->is_null[i] = isNull(colBuf + bytes * i, dataType); } + break; + } + + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_BINARY: { + // do nothing } } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 6307643c95..90023f9ded 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -148,7 +148,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { break; default: - tscDebug("0x%"PRIx64" param %d: type mismatch or invalid", stmt->pSql->self, i); + tscDebug("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i); return TSDB_CODE_TSC_INVALID_VALUE; } } @@ -776,7 +776,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } } else if (param->type == TSDB_DATA_TYPE_BINARY) { if (bind->length[i] > (uintptr_t)param->bytes) { - tscError("invalid binary length"); + tscError("binary length too long, ignore it, expect:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); return TSDB_CODE_TSC_INVALID_VALUE; } int16_t bsize = (short)bind->length[i]; @@ -784,9 +784,10 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } else if (param->type == TSDB_DATA_TYPE_NCHAR) { int32_t output = 0; if (!taosMbsToUcs4(bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { - tscError("convert failed"); + tscError("convert nchar string to UCS4_LE failed:%s", (char*)(bind->buffer + bind->buffer_length * i)); return TSDB_CODE_TSC_INVALID_VALUE; } + varDataSetLen(data + param->offset, output); } } @@ -850,7 +851,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { int code = doBindParam(pBlock, data, param, &bind[param->idx], 1); if (code != TSDB_CODE_SUCCESS) { - tscDebug("0x%"PRIx64" param %d: type mismatch or invalid", pStmt->pSql->self, param->idx); + tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return code; } } @@ -920,7 +921,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { - tscError("0x%"PRIx64" param %d: type mismatch or invalid", pStmt->pSql->self, param->idx); + tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return code; } } @@ -931,7 +932,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { - tscError("0x%"PRIx64" param %d: type mismatch or invalid", pStmt->pSql->self, param->idx); + tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return code; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 62cd441b3a..016a898e3f 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -306,11 +306,11 @@ public class TSDBJNIConnector { private native int setBindTableNameImp(long stmt, String name, long conn); - public int bindColumnDataArray(long stmt, ByteBuffer colList, ByteBuffer lengthList, int type, int bytes, int numOfRows,int columnIndex) { - return bindColDataImp(stmt, colList.array(), lengthList.array(), type, bytes, numOfRows, columnIndex, this.taos); + public int bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) { + return bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos); } - private native int bindColDataImp(long stmt, byte[] data, byte[] length, int type, int bytes, int numOfRows, int columnIndex, long conn); + private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn); public int executeBatch(long stmt) { return executeBatchImp(stmt, this.taos); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index e8a75cb10b..357f225dd4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -18,6 +18,7 @@ import com.taosdata.jdbc.utils.Utils; import java.io.InputStream; import java.io.Reader; +import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.net.URL; import java.nio.ByteBuffer; @@ -40,8 +41,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private boolean isPrepared; private ArrayList colData; - private int type; - private String tableName; private long nativeStmtHandle = 0; @@ -540,6 +539,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @SuppressWarnings("rawtypes") private ArrayList data; private int type; + private int bytes; private boolean typeIsSet; public ColumnInfo() { @@ -564,60 +564,61 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.tableName = name; } - public void setValueImpl(int columnIndex, ArrayList list, int type) throws SQLException { + public void setValueImpl(int columnIndex, ArrayList list, int type, int bytes) throws SQLException { ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex); if (col == null) { ColumnInfo p = new ColumnInfo(); p.setType(type); + p.bytes = bytes; p.data = (ArrayList) list.clone(); this.colData.set(columnIndex, p); } else { if (col.type != type) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch"); } - col.data.addAll(list); } } public void setInt(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT, Integer.BYTES); } public void setFloat(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT, Float.BYTES); } public void setTimestamp(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP, Long.BYTES); } public void setLong(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT, Long.BYTES); } public void setDouble(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE, Double.BYTES); } public void setBoolean(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL, Byte.BYTES); } public void setByte(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT, Byte.BYTES); } public void setShort(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT); + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT, Short.BYTES); } - public void setString(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY); + public void setString(int columnIndex, ArrayList list, int size) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY, size); } - public void setNString(int columnIndex, ArrayList list) throws SQLException { - setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR); + // note: expand the required space for each NChar character + public void setNString(int columnIndex, ArrayList list, int size) throws SQLException { + setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES); } public void columnDataAddBatch() { @@ -643,11 +644,9 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat e.printStackTrace(); } - int bytes = 0; - for (int i = 0; i < numOfCols; ++i) { ColumnInfo col1 = this.colData.get(i); - if (!col1.isTypeSet()) { + if (col1 == null || !col1.isTypeSet()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind"); } @@ -655,51 +654,122 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical"); } - ByteBuffer bbuf = null; + ByteBuffer colDataList = ByteBuffer.allocate(rows * col1.bytes); + colDataList.order(ByteOrder.LITTLE_ENDIAN); - ByteBuffer lengthBuf = ByteBuffer.allocate(rows * Integer.BYTES); - lengthBuf.order(ByteOrder.LITTLE_ENDIAN); + ByteBuffer lengthList = ByteBuffer.allocate(rows * Integer.BYTES); + lengthList.order(ByteOrder.LITTLE_ENDIAN); + + ByteBuffer isNullList = ByteBuffer.allocate(rows * Byte.BYTES); + isNullList.order(ByteOrder.LITTLE_ENDIAN); switch (col1.type) { case TSDBConstants.TSDB_DATA_TYPE_INT: { - bbuf = ByteBuffer.allocate(rows * Integer.BYTES); - bbuf.order(ByteOrder.LITTLE_ENDIAN); - for (int j = 0; j < rows; ++j) { Integer val = (Integer) col1.data.get(j); - if (val == null) { - bbuf.putInt(j * Integer.BYTES, Integer.MIN_VALUE); - } else { - bbuf.putInt(j * Integer.BYTES, val); - } - - lengthBuf.putInt(j * Integer.BYTES, Integer.BYTES); + colDataList.putInt(val == null? Integer.MIN_VALUE:val); + isNullList.put((byte) (val == null? 1:0)); + } + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: { + for (int j = 0; j < rows; ++j) { + Byte val = (Byte) col1.data.get(j); + colDataList.put(val == null? 0:val); + isNullList.put((byte) (val == null? 1:0)); + } + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_BOOL: { + for (int j = 0; j < rows; ++j) { + Byte val = (Byte) col1.data.get(j); + colDataList.put(val == null? 0:val); + isNullList.put((byte) (val == null? 1:0)); + } + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: { + for (int j = 0; j < rows; ++j) { + Short val = (Short) col1.data.get(j); + colDataList.putShort(val == null? 0:val); + isNullList.put((byte) (val == null? 1:0)); + } + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: { + for (int j = 0; j < rows; ++j) { + Long val = (Long) col1.data.get(j); + colDataList.putLong(val == null? 0:val); + isNullList.put((byte) (val == null? 1:0)); + } + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: { + for (int j = 0; j < rows; ++j) { + Float val = (Float) col1.data.get(j); + colDataList.putFloat(val == null? 0:val); + isNullList.put((byte) (val == null? 1:0)); } - - bytes = Integer.BYTES; break; } - case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { - bbuf = ByteBuffer.allocate(rows * Long.BYTES); - bbuf.order(ByteOrder.LITTLE_ENDIAN); - + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { for (int j = 0; j < rows; ++j) { - Long val = (Long) col1.data.get(j); - if (val == null) { - bbuf.putLong(j * Long.BYTES, Long.MIN_VALUE); - } else { - bbuf.putLong(j * Long.BYTES, val); - } - lengthBuf.putInt(j * Integer.BYTES, Long.BYTES); + Double val = (Double) col1.data.get(j); + colDataList.putDouble(val == null? 0:val); + isNullList.put((byte) (val == null? 1:0)); } - - bytes = Long.BYTES; break; } + + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + String charset = TaosGlobalConfig.getCharset(); + for (int j = 0; j < rows; ++j) { + String val = (String) col1.data.get(j); + if (val != null && val.length() > col1.bytes) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long"); + } + + colDataList.position(j * col1.bytes); // seek to the correct position + if (val != null) { + byte[] b = null; + try { + if (col1.type == TSDBConstants.TSDB_DATA_TYPE_BINARY) { + b = val.getBytes(); + } else { + b = val.getBytes(charset); + } + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + + colDataList.put(b); + lengthList.putInt(b.length); + isNullList.put((byte) 0); + } else { + lengthList.putInt(0); + isNullList.put((byte) 1); + } + } + break; + } + + case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: + case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_UINT: + case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types"); + } }; - connector.bindColumnDataArray(this.nativeStmtHandle, bbuf, lengthBuf, col1.type, bytes, rows, i); + connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i); } connector.executeBatch(this.nativeStmtHandle); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java index 814fd6c18d..7b3be5d263 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java @@ -438,8 +438,8 @@ public class TSDBResultSetBlockData { } try { - String ss = TaosGlobalConfig.getCharset(); - return new String(dest, ss); + String charset = TaosGlobalConfig.getCharset(); + return new String(dest, charset); } catch (UnsupportedEncodingException e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java index 3aceee79c3..0e05aeeee7 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/NullType.java @@ -11,11 +11,11 @@ public class NullType { public static boolean isBooleanNull(byte val) { return val == NullType.NULL_BOOL_VAL; } - + public static boolean isTinyIntNull(byte val) { return val == Byte.MIN_VALUE; } - + public static boolean isSmallIntNull(short val) { return val == Short.MIN_VALUE; } @@ -23,19 +23,19 @@ public class NullType { public static boolean isIntNull(int val) { return val == Integer.MIN_VALUE; } - + public static boolean isBigIntNull(long val) { return val == Long.MIN_VALUE; } - + public static boolean isFloatNull(float val) { return Float.isNaN(val); } - + public static boolean isDoubleNull(double val) { return Double.isNaN(val); } - + public static boolean isBinaryNull(byte[] val, int length) { if (length != Byte.BYTES) { return false; @@ -43,7 +43,7 @@ public class NullType { return val[0] == 0xFF; } - + public static boolean isNcharNull(byte[] val, int length) { if (length != Integer.BYTES) { return false; @@ -51,5 +51,41 @@ public class NullType { return (val[0] & val[1] & val[2] & val[3]) == 0xFF; } + + public static byte getBooleanNull() { + return NullType.NULL_BOOL_VAL; + } + + public static byte getTinyintNull() { + return Byte.MIN_VALUE; + } + + public static int getIntNull() { + return Integer.MIN_VALUE; + } + + public static short getSmallIntNull() { + return Short.MIN_VALUE; + } + + public static long getBigIntNull() { + return Long.MIN_VALUE; + } + + public static int getFloatNull() { + return 0x7FF00000; + } + + public static long getDoubleNull() { + return 0x7FFFFF0000000000L; + } + + public static byte getBinaryNull() { + return (byte) 0xFF; + } + + public static byte[] getNcharNull() { + return new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}; + } } diff --git a/src/inc/taos.h b/src/inc/taos.h index bb3cfa2744..6dd695b320 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -104,8 +104,8 @@ typedef struct TAOS_MULTI_BIND { int buffer_type; void *buffer; uintptr_t buffer_length; - uintptr_t *length; - int *is_null; + int32_t *length; + char *is_null; int num; } TAOS_MULTI_BIND; From 8223aa416f3af774cf1f93a6bc231680d1fc5178 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 May 2021 11:15:52 +0800 Subject: [PATCH 108/140] [td-4038]: support column data batch bind. --- src/client/inc/tsclient.h | 3 +-- src/client/src/tscPrepare.c | 52 +++++++++---------------------------- 2 files changed, 13 insertions(+), 42 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 73b0172e85..ce44d4ab83 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -154,13 +154,12 @@ typedef struct STagCond { typedef struct SParamInfo { int32_t idx; - char type; + uint8_t type; uint8_t timePrec; int16_t bytes; uint32_t offset; } SParamInfo; - typedef struct SBoundColumn { bool hasVal; // denote if current column has bound or not int32_t offset; // all column offset value diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 90023f9ded..23dcff276d 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -717,45 +717,14 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) { - if (bind->buffer_type != param->type) { + if (bind->buffer_type != param->type || !isValidDataType(param->type)) { return TSDB_CODE_TSC_INVALID_VALUE; } - short size = 0; - switch(param->type) { - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - size = 1; - break; - - case TSDB_DATA_TYPE_SMALLINT: - size = 2; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_FLOAT: - size = 4; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_DOUBLE: - case TSDB_DATA_TYPE_TIMESTAMP: - size = 8; - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (bind->length == NULL) { - tscError("BINARY/NCHAR no length"); - return TSDB_CODE_TSC_INVALID_VALUE; - } - break; - - default: - assert(false); - return TSDB_CODE_TSC_INVALID_VALUE; + if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) { + tscError("BINARY/NCHAR no length"); + return TSDB_CODE_TSC_INVALID_VALUE; } - for (int i = 0; i < bind->num; ++i) { char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i); @@ -765,8 +734,8 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU continue; } - if (size > 0) { - memcpy(data + param->offset, bind->buffer + bind->buffer_length * i, size); + if (!IS_VAR_DATA_TYPE(param->type)) { + memcpy(data + param->offset, bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes); if (param->offset == 0) { if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { @@ -776,12 +745,17 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } } else if (param->type == TSDB_DATA_TYPE_BINARY) { if (bind->length[i] > (uintptr_t)param->bytes) { - tscError("binary length too long, ignore it, expect:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); + tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); return TSDB_CODE_TSC_INVALID_VALUE; } int16_t bsize = (short)bind->length[i]; STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer + bind->buffer_length * i, bsize); } else if (param->type == TSDB_DATA_TYPE_NCHAR) { + if (bind->length[i] > (uintptr_t)param->bytes) { + tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); + return TSDB_CODE_TSC_INVALID_VALUE; + } + int32_t output = 0; if (!taosMbsToUcs4(bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { tscError("convert nchar string to UCS4_LE failed:%s", (char*)(bind->buffer + bind->buffer_length * i)); @@ -795,8 +769,6 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU return TSDB_CODE_SUCCESS; } - - static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { SSqlCmd* pCmd = &stmt->pSql->cmd; STscStmt* pStmt = (STscStmt*)stmt; From ecb735deb75e1554e8bb3eab4614af9802e0a1f5 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 11:21:57 +0800 Subject: [PATCH 109/140] add api check --- src/client/src/tscPrepare.c | 112 ++++++++++++++++++++++++++++++++---- 1 file changed, 102 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 6307643c95..f4e1aba009 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -53,10 +53,20 @@ typedef struct SMultiTbStmt { SHashObj *pTableBlockHashList; // data block for each table } SMultiTbStmt; +typedef enum { + STMT_INIT = 1, + STMT_PREPARE, + STMT_SETTBNAME, + STMT_BIND, + STMT_BIND_COL, + STMT_ADD_BATCH, + STMT_EXECUTE +} STMT_ST; + typedef struct STscStmt { bool isInsert; bool multiTbInsert; - int64_t prevTs; + int16_t last; STscObj* taos; SSqlObj* pSql; SMultiTbStmt mtb; @@ -1185,6 +1195,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) { pSql->maxRetry = TSDB_MAX_REPLICA; pSql->isBind = true; pStmt->pSql = pSql; + pStmt->last = STMT_INIT; return pStmt; } @@ -1197,6 +1208,13 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { return TSDB_CODE_TSC_DISCONNECTED; } + if (pStmt->last != STMT_INIT) { + tscError("prepare status error, last:%d", pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + + pStmt->last = STMT_PREPARE; + SSqlObj* pSql = pStmt->pSql; size_t sqlLen = strlen(sql); @@ -1302,6 +1320,13 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { return TSDB_CODE_TSC_APP_ERROR; } + if (pStmt->last == STMT_INIT && pStmt->last == STMT_BIND && pStmt->last == STMT_BIND_COL) { + tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + + pStmt->last = STMT_SETTBNAME; + uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name)); if (uid != NULL) { pStmt->mtb.currentUid = *uid; @@ -1399,11 +1424,25 @@ int taos_stmt_close(TAOS_STMT* stmt) { int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { STscStmt* pStmt = (STscStmt*)stmt; + if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + if (pStmt->isInsert) { - if (pStmt->multiTbInsert && pStmt->mtb.nameSet == false) { - tscError("no table name set"); - return TSDB_CODE_TSC_APP_ERROR; + if (pStmt->multiTbInsert) { + if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) { + tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + } else { + if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH) { + tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } } + + pStmt->last = STMT_BIND; return insertStmtBindParam(pStmt, bind); } else { @@ -1414,6 +1453,12 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { STscStmt* pStmt = (STscStmt*)stmt; + + if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + if (bind == NULL || bind->num <= 0) { tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; @@ -1424,16 +1469,30 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { return TSDB_CODE_TSC_APP_ERROR; } - if (pStmt->multiTbInsert && !pStmt->mtb.nameSet) { - tscError("0x%"PRIx64" no table name set", pStmt->pSql->self); - return TSDB_CODE_TSC_APP_ERROR; + if (pStmt->multiTbInsert) { + if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) { + tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + } else { + if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH) { + tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } } + + pStmt->last = STMT_BIND; return insertStmtBindParamBatch(pStmt, bind, -1); } int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) { STscStmt* pStmt = (STscStmt*)stmt; + if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + if (bind == NULL || bind->num <= 0) { tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; @@ -1444,11 +1503,19 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in return TSDB_CODE_TSC_APP_ERROR; } - if (pStmt->multiTbInsert && !pStmt->mtb.nameSet) { - tscError("0x%"PRIx64" no table name set", pStmt->pSql->self); - return TSDB_CODE_TSC_APP_ERROR; + if (pStmt->multiTbInsert) { + if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) { + tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + } else { + if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) { + tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } } + pStmt->last = STMT_BIND_COL; return insertStmtBindParamBatch(pStmt, bind, colIdx); } @@ -1457,9 +1524,22 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in int taos_stmt_add_batch(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; + if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + if (pStmt->isInsert) { + if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) { + tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + + pStmt->last = STMT_ADD_BATCH; + return insertStmtAddBatch(pStmt); } + return TSDB_CODE_COM_OPS_NOT_SUPPORT; } @@ -1474,7 +1554,19 @@ int taos_stmt_reset(TAOS_STMT* stmt) { int taos_stmt_execute(TAOS_STMT* stmt) { int ret = 0; STscStmt* pStmt = (STscStmt*)stmt; + if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + if (pStmt->isInsert) { + if (pStmt->last != STMT_ADD_BATCH) { + tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last); + return TSDB_CODE_TSC_APP_ERROR; + } + + pStmt->last = STMT_EXECUTE; + if (pStmt->multiTbInsert) { ret = insertBatchStmtExecute(pStmt); } else { From b3b0782ce2581fe1184337560d0e4de88e2bc3c6 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 11:43:16 +0800 Subject: [PATCH 110/140] fix issue --- tests/script/api/batchprepare.c | 58 ++++++++++++++++----------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index f3ea95abfa..8f1337486e 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -417,11 +417,11 @@ int stmt_funcb1(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; @@ -574,11 +574,11 @@ int stmt_funcb2(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(18000 * sizeof(uintptr_t)); + int *lb = malloc(18000 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 3000*10); - int* is_null = malloc(sizeof(int) * 18000); - int* no_null = malloc(sizeof(int) * 18000); + char* is_null = malloc(sizeof(char) * 18000); + char* no_null = malloc(sizeof(char) * 18000); for (int i = 0; i < 18000; ++i) { lb[i] = 40; @@ -732,11 +732,11 @@ int stmt_funcb3(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; @@ -897,11 +897,11 @@ int stmt_funcb4(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; @@ -1056,11 +1056,11 @@ int stmt_funcb5(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(18000 * sizeof(uintptr_t)); + int *lb = malloc(18000 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 3000*10); - int* is_null = malloc(sizeof(int) * 18000); - int* no_null = malloc(sizeof(int) * 18000); + char* is_null = malloc(sizeof(char) * 18000); + char* no_null = malloc(sizeof(char) * 18000); for (int i = 0; i < 18000; ++i) { lb[i] = 40; @@ -1200,10 +1200,10 @@ int stmt_funcb_ssz1(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 30000 * 3000); - uintptr_t *lb = malloc(30000 * sizeof(uintptr_t)); + int *lb = malloc(30000 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 3000*10); - int* no_null = malloc(sizeof(int) * 200000); + char* no_null = malloc(sizeof(int) * 200000); for (int i = 0; i < 30000; ++i) { lb[i] = 40; @@ -1291,11 +1291,11 @@ int stmt_funcb_s1(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; @@ -1453,11 +1453,11 @@ int stmt_funcb_sc1(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; @@ -1611,11 +1611,11 @@ int stmt_funcb_sc2(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 900000 * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 900000*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; @@ -1771,11 +1771,11 @@ int stmt_funcb_sc3(TAOS_STMT *stmt) { v.ts = malloc(sizeof(int64_t) * 60); - uintptr_t *lb = malloc(60 * sizeof(uintptr_t)); + int *lb = malloc(60 * sizeof(int)); TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * 60*10); - int* is_null = malloc(sizeof(int) * 60); - int* no_null = malloc(sizeof(int) * 60); + char* is_null = malloc(sizeof(char) * 60); + char* no_null = malloc(sizeof(char) * 60); for (int i = 0; i < 60; ++i) { lb[i] = 40; From 7f3d0dd5ad41170625ddf969234706c21dcd28e5 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 13:19:53 +0800 Subject: [PATCH 111/140] fix bug --- src/client/src/tscPrepare.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index e19d692094..10bf3f43e3 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1670,3 +1670,4 @@ const char *taos_data_type(int type) { } } + \ No newline at end of file From f02fd80a318ba4b150f680966d0fb00366618447 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 13:24:28 +0800 Subject: [PATCH 112/140] fix bug --- src/client/src/tscPrepare.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 10bf3f43e3..ca887427e0 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1409,7 +1409,7 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { return TSDB_CODE_TSC_APP_ERROR; } } else { - if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH) { + if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) { tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); return TSDB_CODE_TSC_APP_ERROR; } @@ -1448,7 +1448,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { return TSDB_CODE_TSC_APP_ERROR; } } else { - if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH) { + if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) { tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); return TSDB_CODE_TSC_APP_ERROR; } @@ -1482,7 +1482,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in return TSDB_CODE_TSC_APP_ERROR; } } else { - if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) { + if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) { tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last); return TSDB_CODE_TSC_APP_ERROR; } @@ -1670,4 +1670,3 @@ const char *taos_data_type(int type) { } } - \ No newline at end of file From 7570894fbb68b43cfb583ae5c015894441fb3e35 Mon Sep 17 00:00:00 2001 From: wu champion Date: Mon, 10 May 2021 15:30:00 +0800 Subject: [PATCH 113/140] [TD-3960] add case for TD-3960 --- tests/pytest/fulltest.sh | 1 + .../functions/showOfflineThresholdIs864000.py | 36 +++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 tests/pytest/functions/showOfflineThresholdIs864000.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index b5aae6fcef..a748c9dd2d 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -305,6 +305,7 @@ python3 ./test.py -f functions/function_top.py -r 1 python3 ./test.py -f functions/function_twa.py -r 1 python3 ./test.py -f functions/function_twa_test2.py python3 ./test.py -f functions/function_stddev_td2555.py +python3 ./test.py -f functions/showOfflineThresholdIs864000.py python3 ./test.py -f insert/metadataUpdate.py python3 ./test.py -f query/last_cache.py python3 ./test.py -f query/last_row_cache.py diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py new file mode 100644 index 0000000000..6cce869bf2 --- /dev/null +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -0,0 +1,36 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.query("show variables") + tdSql.checkData(51, 1, 864000) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 94040c49979b72c2d167f17eb06ed8db7c80c11d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 May 2021 15:45:20 +0800 Subject: [PATCH 114/140] [td-225] --- src/inc/ttokendef.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index e9f95660f7..ef3f8ed1fb 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -79,12 +79,12 @@ #define TK_DOT 60 #define TK_CREATE 61 #define TK_TABLE 62 -#define TK_DATABASE 63 -#define TK_TABLES 64 -#define TK_STABLES 65 -#define TK_VGROUPS 66 -#define TK_DROP 67 -#define TK_STABLE 68 +#define TK_STABLE 63 +#define TK_DATABASE 64 +#define TK_TABLES 65 +#define TK_STABLES 66 +#define TK_VGROUPS 67 +#define TK_DROP 68 #define TK_TOPIC 69 #define TK_DNODE 70 #define TK_USER 71 From 79b422390f9174ea23c56584e0dbefc2c79345ee Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 10 May 2021 16:17:46 +0800 Subject: [PATCH 115/140] fix bug --- src/client/src/tscServer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 2035d6261f..33325c7fd7 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2542,6 +2542,7 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } pTableMetaInfo->pTableMeta = (STableMeta *)tmp; + memset(pTableMetaInfo->pTableMeta, 0, size); pTableMetaInfo->tableMetaSize = size; } else { //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); From f2fa15156543728d68e3502970bcf4e2ee55381a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 May 2021 17:01:46 +0800 Subject: [PATCH 116/140] [td-3967]update the test case. --- .../taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java index 9014e82a9e..a3c46dc232 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java @@ -12,7 +12,7 @@ public class InsertSpecialCharacterJniTest { private static String tbname1 = "test"; private static String tbname2 = "weather"; private static String special_character_str_1 = "$asd$$fsfsf$"; - private static String special_character_str_2 = "\\asdfsfsf\\\\"; + private static String special_character_str_2 = "\\\\asdfsfsf\\\\"; private static String special_character_str_3 = "\\\\asdfsfsf\\"; private static String special_character_str_4 = "?asd??fsf?sf?"; private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; @@ -70,7 +70,7 @@ public class InsertSpecialCharacterJniTest { String f1 = new String(rs.getBytes(2)); //TODO: bug to be fixed // Assert.assertEquals(special_character_str_2, f1); - Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1); + Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1); String f2 = rs.getString(3); Assert.assertNull(f2); } From 420a83437f70391a3a003834e9a67ba423bae67f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 May 2021 19:04:43 +0800 Subject: [PATCH 117/140] [td-4038]refactor --- src/client/src/TSDBJNIConnector.c | 7 +++++ .../com/taosdata/jdbc/TSDBJNIConnector.java | 28 +++++++++++++------ .../taosdata/jdbc/TSDBPreparedStatement.java | 7 +++-- 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 4842bb2ed1..da7da17aa3 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -742,6 +742,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI int32_t code = taos_stmt_set_tbname((void*)stmt, name); if (code != TSDB_CODE_SUCCESS) { + (*env)->ReleaseStringUTFChars(env, jname, name); + jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); return JNI_TDENGINE_ERROR; } @@ -817,6 +819,11 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J } int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex); + tfree(b->length); + tfree(b->buffer); + tfree(b->is_null); + tfree(b); + if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 016a898e3f..2111ab2743 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -300,26 +300,38 @@ public class TSDBJNIConnector { private native long prepareStmtImp(byte[] sql, long con); - public int setBindTableName(long stmt, String tableName) { - return setBindTableNameImp(stmt, tableName, this.taos); + public void setBindTableName(long stmt, String tableName) throws SQLException { + int code = setBindTableNameImp(stmt, tableName, this.taos); + if (code != TSDBConstants.JNI_SUCCESS) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name"); + } } private native int setBindTableNameImp(long stmt, String name, long conn); - public int bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) { - return bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos); + public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException { + int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos); + if (code != TSDBConstants.JNI_SUCCESS) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data"); + } } private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn); - public int executeBatch(long stmt) { - return executeBatchImp(stmt, this.taos); + public void executeBatch(long stmt) throws SQLException { + int code = executeBatchImp(stmt, this.taos); + if (code != TSDBConstants.JNI_SUCCESS) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind"); + } } private native int executeBatchImp(long stmt, long con); - public int closeBatch(long stmt) { - return closeStmt(stmt, this.taos); + public void closeBatch(long stmt) throws SQLException { + int code = closeStmt(stmt, this.taos); + if (code != TSDBConstants.JNI_SUCCESS) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind"); + } } private native int closeStmt(long stmt, long con); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 357f225dd4..9bdebe36b6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -733,9 +733,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat String charset = TaosGlobalConfig.getCharset(); for (int j = 0; j < rows; ++j) { String val = (String) col1.data.get(j); - if (val != null && val.length() > col1.bytes) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long"); - } colDataList.position(j * col1.bytes); // seek to the correct position if (val != null) { @@ -750,6 +747,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat e.printStackTrace(); } + if (val.length() > col1.bytes) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long"); + } + colDataList.put(b); lengthList.putInt(b.length); isNullList.put((byte) 0); From 4f7a835ad6c92068e429f322a7ef8479cca109fb Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 May 2021 19:09:21 +0800 Subject: [PATCH 118/140] fix compile error --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index facd10b8b4..74b5baf15a 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -650,7 +650,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (diffSize) { for (int32_t i = 1; i < pCmd->numOfClause; ++i) { - SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i); + SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i); tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo); } } From a9c5957165255f58b8fa56a274249de482033b5c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 00:08:30 +0800 Subject: [PATCH 119/140] [TD-4095]: taosdump long tablename. (#6076) Co-authored-by: Shuduo Sang --- src/kit/taosdump/taosdump.c | 115 +++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 46 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index e706ddefd6..f80ac069a0 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -110,14 +110,14 @@ typedef struct { } SColDes; typedef struct { - char name[TSDB_COL_NAME_LEN + 1]; + char name[TSDB_TABLE_NAME_LEN]; SColDes cols[]; } STableDef; extern char version[]; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; + char name[TSDB_DB_NAME_LEN]; char create_time[32]; int32_t ntables; int32_t vgroups; @@ -142,8 +142,8 @@ typedef struct { } SDbInfo; typedef struct { - char name[TSDB_TABLE_NAME_LEN + 1]; - char metric[TSDB_TABLE_NAME_LEN + 1]; + char name[TSDB_TABLE_NAME_LEN]; + char metric[TSDB_TABLE_NAME_LEN]; } STableRecord; typedef struct { @@ -155,7 +155,7 @@ typedef struct { pthread_t threadID; int32_t threadIndex; int32_t totalThreads; - char dbName[TSDB_TABLE_NAME_LEN + 1]; + char dbName[TSDB_DB_NAME_LEN]; void *taosCon; int64_t rowsOfDumpOut; int64_t tablesOfDumpOut; @@ -214,13 +214,13 @@ static struct argp_option options[] = { {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, // dump unit options {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + {"databases", 'D', 0, 0, "Dump assigned databases", 2}, // dump format options {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, - {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"without-property", 'N', 0, 0, "Dump schema without properties.", 3}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, - {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, @@ -341,15 +341,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'A': arguments->all_databases = true; break; - case 'B': + case 'D': arguments->databases = true; break; // dump format option case 's': arguments->schemaonly = true; break; - case 'M': - arguments->with_property = true; + case 'N': + arguments->with_property = false; break; case 'S': // parse time here. @@ -358,7 +358,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'E': arguments->end_time = atol(arg); break; - case 'N': + case 'B': arguments->data_batch = atoi(arg); if (arguments->data_batch >= INT16_MAX) { arguments->data_batch = INT16_MAX - 1; @@ -402,17 +402,17 @@ static resultStatistics g_resultStatistics = {0}; static FILE *g_fpOfResult = NULL; static int g_numOfCores = 1; -int taosDumpOut(struct arguments *arguments); -int taosDumpIn(struct arguments *arguments); -void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); -int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); -int taosCheckParam(struct arguments *arguments); -void taosFreeDbInfos(); +static int taosDumpOut(struct arguments *arguments); +static int taosDumpIn(struct arguments *arguments); +static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); +static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); +static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); +static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); +static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); +static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); +static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); +static int taosCheckParam(struct arguments *arguments); +static void taosFreeDbInfos(); static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName); struct arguments g_args = { @@ -436,8 +436,8 @@ struct arguments g_args = { false, false, // dump format option - false, - false, + false, // schemeonly + true, // with_property 0, INT64_MAX, 1, @@ -959,7 +959,8 @@ int taosDumpOut(struct arguments *arguments) { goto _exit_failure; } - strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes); if (arguments->with_property) { dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); @@ -967,7 +968,8 @@ int taosDumpOut(struct arguments *arguments) { dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); //dbInfos[count]->daysToKeep1; //dbInfos[count]->daysToKeep2; @@ -980,7 +982,8 @@ int taosDumpOut(struct arguments *arguments) { dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } @@ -1095,7 +1098,9 @@ _exit_failure: return -1; } -int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { +int taosGetTableDes( + char* dbName, char *table, + STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { TAOS_ROW row = NULL; TAOS_RES* res = NULL; int count = 0; @@ -1113,7 +1118,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo TAOS_FIELD *fields = taos_fetch_fields(res); - tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); + tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); while ((row = taos_fetch_row(res)) != NULL) { strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], @@ -1232,7 +1237,9 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo return count; } -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) { +int32_t taosDumpTable( + char *table, char *metric, struct arguments *arguments, + FILE *fp, TAOS* taosCon, char* dbName) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -1346,14 +1353,17 @@ void* taosDumpOutWorkThreadFp(void *arg) ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName); + int ret = taosDumpTable( + tableRecord.name, tableRecord.metric, &g_args, + fp, pThread->taosCon, pThread->dbName); if (ret >= 0) { // TODO: sum table count and table rows by self pThread->tablesOfDumpOut++; pThread->rowsOfDumpOut += ret; if (pThread->rowsOfDumpOut >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName); + printf(" %"PRId64 " rows already be dumpout from database %s\n", + pThread->rowsOfDumpOut, pThread->dbName); lastRowsPrint += 5000000; } @@ -1364,9 +1374,12 @@ void* taosDumpOutWorkThreadFp(void *arg) memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); + sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", + g_args.outpath, pThread->dbName, + pThread->threadIndex, fileNameIndex); } else { - sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex); + sprintf(tmpBuf, "%s.tables.%d-%d.sql", + pThread->dbName, pThread->threadIndex, fileNameIndex); } fileNameIndex++; @@ -1391,14 +1404,15 @@ void* taosDumpOutWorkThreadFp(void *arg) static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName) { pthread_attr_t thattr; - SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + SThreadParaObj *threadObj = + (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); for (int t = 0; t < numOfThread; ++t) { SThreadParaObj *pThread = threadObj + t; pThread->rowsOfDumpOut = 0; pThread->tablesOfDumpOut = 0; pThread->threadIndex = t; pThread->totalThreads = numOfThread; - tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); + tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); pThread->taosCon = taosCon; pthread_attr_init(&thattr); @@ -1487,7 +1501,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); - strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); } @@ -1557,8 +1572,10 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao int32_t numOfTable = 0; while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); - tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); @@ -1643,15 +1660,18 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha char* pstr = sqlstr; - pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name); + pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", + dbName, tableDes->name); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; if (counter == 0) { - pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, " (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } else { - pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || @@ -1664,9 +1684,11 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha for (; counter < numOfCols; counter++) { if (counter == count_temp) { - pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, ") TAGS (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } else { - pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || @@ -1693,7 +1715,8 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols char *pstr = NULL; pstr = tmpBuf; - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric); + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", + dbName, tableDes->name, dbName, metric); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; From b6fa37eaedc6adff0e9b42f8b1f8c3465c4ba40a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 09:52:27 +0800 Subject: [PATCH 120/140] [TD-4095]: taosdump long tablename. (#6077) Co-authored-by: Shuduo Sang --- src/kit/taosdump/taosdump.c | 115 +++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 46 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index e706ddefd6..f80ac069a0 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -110,14 +110,14 @@ typedef struct { } SColDes; typedef struct { - char name[TSDB_COL_NAME_LEN + 1]; + char name[TSDB_TABLE_NAME_LEN]; SColDes cols[]; } STableDef; extern char version[]; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; + char name[TSDB_DB_NAME_LEN]; char create_time[32]; int32_t ntables; int32_t vgroups; @@ -142,8 +142,8 @@ typedef struct { } SDbInfo; typedef struct { - char name[TSDB_TABLE_NAME_LEN + 1]; - char metric[TSDB_TABLE_NAME_LEN + 1]; + char name[TSDB_TABLE_NAME_LEN]; + char metric[TSDB_TABLE_NAME_LEN]; } STableRecord; typedef struct { @@ -155,7 +155,7 @@ typedef struct { pthread_t threadID; int32_t threadIndex; int32_t totalThreads; - char dbName[TSDB_TABLE_NAME_LEN + 1]; + char dbName[TSDB_DB_NAME_LEN]; void *taosCon; int64_t rowsOfDumpOut; int64_t tablesOfDumpOut; @@ -214,13 +214,13 @@ static struct argp_option options[] = { {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, // dump unit options {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + {"databases", 'D', 0, 0, "Dump assigned databases", 2}, // dump format options {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, - {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"without-property", 'N', 0, 0, "Dump schema without properties.", 3}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, - {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, @@ -341,15 +341,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'A': arguments->all_databases = true; break; - case 'B': + case 'D': arguments->databases = true; break; // dump format option case 's': arguments->schemaonly = true; break; - case 'M': - arguments->with_property = true; + case 'N': + arguments->with_property = false; break; case 'S': // parse time here. @@ -358,7 +358,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'E': arguments->end_time = atol(arg); break; - case 'N': + case 'B': arguments->data_batch = atoi(arg); if (arguments->data_batch >= INT16_MAX) { arguments->data_batch = INT16_MAX - 1; @@ -402,17 +402,17 @@ static resultStatistics g_resultStatistics = {0}; static FILE *g_fpOfResult = NULL; static int g_numOfCores = 1; -int taosDumpOut(struct arguments *arguments); -int taosDumpIn(struct arguments *arguments); -void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); -int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); -int taosCheckParam(struct arguments *arguments); -void taosFreeDbInfos(); +static int taosDumpOut(struct arguments *arguments); +static int taosDumpIn(struct arguments *arguments); +static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); +static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); +static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); +static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); +static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); +static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); +static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); +static int taosCheckParam(struct arguments *arguments); +static void taosFreeDbInfos(); static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName); struct arguments g_args = { @@ -436,8 +436,8 @@ struct arguments g_args = { false, false, // dump format option - false, - false, + false, // schemeonly + true, // with_property 0, INT64_MAX, 1, @@ -959,7 +959,8 @@ int taosDumpOut(struct arguments *arguments) { goto _exit_failure; } - strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes); if (arguments->with_property) { dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); @@ -967,7 +968,8 @@ int taosDumpOut(struct arguments *arguments) { dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); //dbInfos[count]->daysToKeep1; //dbInfos[count]->daysToKeep2; @@ -980,7 +982,8 @@ int taosDumpOut(struct arguments *arguments) { dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } @@ -1095,7 +1098,9 @@ _exit_failure: return -1; } -int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { +int taosGetTableDes( + char* dbName, char *table, + STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { TAOS_ROW row = NULL; TAOS_RES* res = NULL; int count = 0; @@ -1113,7 +1118,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo TAOS_FIELD *fields = taos_fetch_fields(res); - tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); + tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); while ((row = taos_fetch_row(res)) != NULL) { strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], @@ -1232,7 +1237,9 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo return count; } -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) { +int32_t taosDumpTable( + char *table, char *metric, struct arguments *arguments, + FILE *fp, TAOS* taosCon, char* dbName) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -1346,14 +1353,17 @@ void* taosDumpOutWorkThreadFp(void *arg) ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName); + int ret = taosDumpTable( + tableRecord.name, tableRecord.metric, &g_args, + fp, pThread->taosCon, pThread->dbName); if (ret >= 0) { // TODO: sum table count and table rows by self pThread->tablesOfDumpOut++; pThread->rowsOfDumpOut += ret; if (pThread->rowsOfDumpOut >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName); + printf(" %"PRId64 " rows already be dumpout from database %s\n", + pThread->rowsOfDumpOut, pThread->dbName); lastRowsPrint += 5000000; } @@ -1364,9 +1374,12 @@ void* taosDumpOutWorkThreadFp(void *arg) memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); + sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", + g_args.outpath, pThread->dbName, + pThread->threadIndex, fileNameIndex); } else { - sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex); + sprintf(tmpBuf, "%s.tables.%d-%d.sql", + pThread->dbName, pThread->threadIndex, fileNameIndex); } fileNameIndex++; @@ -1391,14 +1404,15 @@ void* taosDumpOutWorkThreadFp(void *arg) static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName) { pthread_attr_t thattr; - SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + SThreadParaObj *threadObj = + (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); for (int t = 0; t < numOfThread; ++t) { SThreadParaObj *pThread = threadObj + t; pThread->rowsOfDumpOut = 0; pThread->tablesOfDumpOut = 0; pThread->threadIndex = t; pThread->totalThreads = numOfThread; - tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); + tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); pThread->taosCon = taosCon; pthread_attr_init(&thattr); @@ -1487,7 +1501,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); - strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); } @@ -1557,8 +1572,10 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao int32_t numOfTable = 0; while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); - tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); @@ -1643,15 +1660,18 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha char* pstr = sqlstr; - pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name); + pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", + dbName, tableDes->name); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; if (counter == 0) { - pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, " (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } else { - pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || @@ -1664,9 +1684,11 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha for (; counter < numOfCols; counter++) { if (counter == count_temp) { - pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, ") TAGS (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } else { - pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); } if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || @@ -1693,7 +1715,8 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols char *pstr = NULL; pstr = tmpBuf; - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric); + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", + dbName, tableDes->name, dbName, metric); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; From 12228d0d9636608e17ded5257361adb75f139432 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 09:54:03 +0800 Subject: [PATCH 121/140] [TD-4073]: taosdemo rest query output to result file. (#6072) * [TD-4073]: taosdemo rest query output to result file. * [TD-4073]: taosdemo rest query output to result file. fix insecure strcat(). * [TD-4073]: taosdemo rest query output to result file. fix postSql command mistake * [TD-4073]: taosdemo rest query output to result file. fix append function bug. * [TD-4073]: taosdemo rest query output to result file. prevent potential null file passed. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 154 ++++++++++++++++++++---------------- 1 file changed, 84 insertions(+), 70 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 99befbaf31..935aa358c5 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -76,6 +76,7 @@ enum QUERY_MODE { #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) +#define COND_BUF_LEN BUFFER_SIZE - 30 #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 #define MAX_DB_NAME_SIZE 64 @@ -522,6 +523,8 @@ static int taosRandom() static int createDatabasesAndStables(); static void createChildTables(); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); +static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, + char* sqlstr, char *resultFile); /* ************ Global variables ************ */ @@ -1090,27 +1093,33 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { return 0; } -static void getResult(TAOS_RES *res, char* resultFileName) { +static void appendResultBufToFile(char *resultBuf, char *resultFile) +{ + FILE *fp = NULL; + if (resultFile[0] != 0) { + fp = fopen(resultFile, "at"); + if (fp == NULL) { + errorPrint( + "%s() LN%d, failed to open result file: %s, result will not save to file\n", + __func__, __LINE__, resultFile); + return; + } + } + + fprintf(fp, "%s", resultBuf); + tmfclose(fp); +} + +static void appendResultToFile(TAOS_RES *res, char* resultFile) { TAOS_ROW row = NULL; int num_rows = 0; int num_fields = taos_field_count(res); TAOS_FIELD *fields = taos_fetch_fields(res); - FILE *fp = NULL; - if (resultFileName[0] != 0) { - fp = fopen(resultFileName, "at"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n", - __func__, __LINE__, resultFileName); - } - } - char* databuf = (char*) calloc(1, 100*1024*1024); if (databuf == NULL) { errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", __func__, __LINE__); - if (fp) - fclose(fp); return ; } @@ -1120,7 +1129,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) { // fetch the records row by row while((row = taos_fetch_row(res))) { if (totalLen >= 100*1024*1024 - 32000) { - if (fp) fprintf(fp, "%s", databuf); + appendResultBufToFile(databuf, resultFile); totalLen = 0; memset(databuf, 0, 100*1024*1024); } @@ -1132,22 +1141,39 @@ static void getResult(TAOS_RES *res, char* resultFileName) { totalLen += len; } - if (fp) fprintf(fp, "%s", databuf); - tmfclose(fp); + appendResultBufToFile(databuf, resultFile); free(databuf); } -static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { - TAOS_RES *res = taos_query(taos, command); - if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - return; - } +static void selectAndGetResult(threadInfo *pThreadInfo, char *command, char* resultFile) +{ + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { + TAOS_RES *res = taos_query(pThreadInfo->taos, command); + if (res == NULL || taos_errno(res) != 0) { + errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + return; + } - getResult(res, resultFileName); - taos_free_result(res); + if ((resultFile) && (strlen(resultFile))) { + appendResultToFile(res, resultFile); + } + taos_free_result(res); + + } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + int retCode = postProceSql( + g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, + command, + resultFile); + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); + } + + } else { + errorPrint("%s() LN%d, unknown query mode: %s\n", + __func__, __LINE__, g_queryInfo.queryMode); + } } static int32_t rand_bool(){ @@ -1940,13 +1966,13 @@ static void printfQuerySystemInfo(TAOS * taos) { // show variables res = taos_query(taos, "show variables;"); - //getResult(res, filename); + //appendResultToFile(res, filename); xDumpResultToFile(filename, res); // show dnodes res = taos_query(taos, "show dnodes;"); xDumpResultToFile(filename, res); - //getResult(res, filename); + //appendResultToFile(res, filename); // show databases res = taos_query(taos, "show databases;"); @@ -1981,7 +2007,8 @@ static void printfQuerySystemInfo(TAOS * taos) { free(dbInfos); } -static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, char* sqlstr) +static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, + char* sqlstr, char *resultFile) { char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; @@ -2117,6 +2144,10 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port response_buf[RESP_BUF_LEN - 1] = '\0'; printf("Response:\n%s\n", response_buf); + if (resultFile) { + appendResultBufToFile(response_buf, resultFile); + } + free(request_buf); #ifdef WINDOWS closesocket(sockfd); @@ -4688,7 +4719,8 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k) if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); } else if (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest"))) { - if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, buffer)) { + if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, + buffer, NULL /* not set result file */)) { affectedRows = -1; printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); @@ -5936,7 +5968,7 @@ static void *readMetric(void *sarg) { fprintf(fp, "Querying On %d records:\n", totalData); for (int j = 0; j < n; j++) { - char condition[BUFFER_SIZE - 30] = "\0"; + char condition[COND_BUF_LEN] = "\0"; char tempS[64] = "\0"; int m = 10 < num_of_tables ? 10 : num_of_tables; @@ -5947,7 +5979,7 @@ static void *readMetric(void *sarg) { } else { sprintf(tempS, " or t1 = %d ", i); } - strcat(condition, tempS); + strncat(condition, tempS, COND_BUF_LEN - 1); sprintf(command, "select %s from meters where %s", aggreFunc[j], condition); @@ -6125,43 +6157,24 @@ static void *specifiedTableQuery(void *sarg) { taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms } - st = taosGetTimestampMs(); - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { - int64_t t1 = taosGetTimestampMs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - } - selectAndGetResult(pThreadInfo->taos, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); - int64_t t2 = taosGetTimestampMs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000.0); - } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - int64_t t1 = taosGetTimestampMs(); - int retCode = postProceSql(g_queryInfo.host, &(g_queryInfo.serv_addr), - g_queryInfo.port, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); - return NULL; - } - int64_t t2 = taosGetTimestampMs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000.0); - - } else { - errorPrint("%s() LN%d, unknown query mode: %s\n", - __func__, __LINE__, g_queryInfo.queryMode); - return NULL; } - totalQueried ++; - g_queryInfo.specifiedQueryInfo.totalQueried ++; + + st = taosGetTimestampMs(); + + selectAndGetResult(pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); et = taosGetTimestampMs(); + printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", + taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0); + + totalQueried ++; + g_queryInfo.specifiedQueryInfo.totalQueried ++; uint64_t currentPrintTime = taosGetTimestampMs(); uint64_t endTs = taosGetTimestampMs(); @@ -6194,14 +6207,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { tstrncpy(outSql, inSql, pos - inSql + 1); //printf("1: %s\n", outSql); - strcat(outSql, subTblName); + strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); //printf("2: %s\n", outSql); - strcat(outSql, pos+strlen(sourceString)); + strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); //printf("3: %s\n", outSql); } static void *superTableQuery(void *sarg) { - char sqlstr[1024]; + char sqlstr[MAX_QUERY_SQL_LENGTH]; threadInfo *pThreadInfo = (threadInfo *)sarg; if (pThreadInfo->taos == NULL) { @@ -6246,7 +6259,7 @@ static void *superTableQuery(void *sarg) { g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); } - selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile); + selectAndGetResult(pThreadInfo, sqlstr, tmpFile); totalQueried++; g_queryInfo.superQueryInfo.totalQueried ++; @@ -6447,7 +6460,8 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c return; } - getResult(res, (char*)param); + if (param) + appendResultToFile(res, (char*)param); // tao_unscribe() will free result. } @@ -6476,7 +6490,7 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[1024]; + char subSqlstr[MAX_QUERY_SQL_LENGTH]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; if (g_queryInfo.superQueryInfo.sqlCount == 0) @@ -6551,8 +6565,8 @@ static void *superSubscribe(void *sarg) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID); + appendResultToFile(res, tmpFile); } - getResult(res, tmpFile); } } } @@ -6639,8 +6653,8 @@ static void *specifiedSubscribe(void *sarg) { if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); + appendResultToFile(res, tmpFile); } - getResult(res, tmpFile); } } } From edbc123e2e0b4265e2ac89d85f10bb82bb34c39e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 11 May 2021 10:43:53 +0800 Subject: [PATCH 122/140] [td-225]update the test case. --- .../jdbc/cases/InsertSpecialCharacterRestfulTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java index 0cbbe76716..ea0d1aec41 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java @@ -13,7 +13,7 @@ public class InsertSpecialCharacterRestfulTest { private static String tbname1 = "test"; private static String tbname2 = "weather"; private static String special_character_str_1 = "$asd$$fsfsf$"; - private static String special_character_str_2 = "\\asdfsfsf\\\\"; + private static String special_character_str_2 = "\\\\asdfsfsf\\\\"; private static String special_character_str_3 = "\\\\asdfsfsf\\"; private static String special_character_str_4 = "?asd??fsf?sf?"; private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; @@ -49,7 +49,7 @@ public class InsertSpecialCharacterRestfulTest { @Test public void testCase02() throws SQLException { //TODO: - // Expected :\asdfsfsf\\ + // Expected :\asdfsfsf\ // Actual :\asdfsfsf\ final long now = System.currentTimeMillis(); @@ -71,7 +71,7 @@ public class InsertSpecialCharacterRestfulTest { String f1 = new String(rs.getBytes(2)); //TODO: bug to be fixed // Assert.assertEquals(special_character_str_2, f1); - Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1); + Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1); String f2 = rs.getString(3); Assert.assertNull(f2); } From 4fa64be709f830b14670173b7ff5ba6105f9773f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 13:11:44 +0800 Subject: [PATCH 123/140] Hotfix/sangshuduo/td 4073 taosdemo restful resultfile for develop (#6078) * [TD-4073]: taosdemo rest query output to result file. * [TD-4073]: taosdemo rest query output to result file. fix insecure strcat(). * [TD-4073]: taosdemo rest query output to result file. fix append function bug. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 150 +++++++++++++++++++----------------- 1 file changed, 80 insertions(+), 70 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 72682c6a21..8dea4995ee 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -70,6 +70,7 @@ enum TEST_MODE { #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) +#define COND_BUF_LEN BUFFER_SIZE - 30 #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 #define MAX_DB_NAME_SIZE 64 @@ -516,6 +517,8 @@ static int taosRandom() static int createDatabasesAndStables(); static void createChildTables(); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); +static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, + char* sqlstr, char *resultFile); /* ************ Global variables ************ */ @@ -1083,27 +1086,33 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { return 0; } -static void getResult(TAOS_RES *res, char* resultFileName) { +static void appendResultBufToFile(char *resultBuf, char *resultFile) +{ + FILE *fp = NULL; + if (resultFile[0] != 0) { + fp = fopen(resultFile, "at"); + if (fp == NULL) { + errorPrint( + "%s() LN%d, failed to open result file: %s, result will not save to file\n", + __func__, __LINE__, resultFile); + return; + } + } + + fprintf(fp, "%s", resultBuf); + tmfclose(fp); +} + +static void appendResultToFile(TAOS_RES *res, char* resultFile) { TAOS_ROW row = NULL; int num_rows = 0; int num_fields = taos_field_count(res); TAOS_FIELD *fields = taos_fetch_fields(res); - FILE *fp = NULL; - if (resultFileName[0] != 0) { - fp = fopen(resultFileName, "at"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n", - __func__, __LINE__, resultFileName); - } - } - char* databuf = (char*) calloc(1, 100*1024*1024); if (databuf == NULL) { errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", __func__, __LINE__); - if (fp) - fclose(fp); return ; } @@ -1113,7 +1122,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) { // fetch the records row by row while((row = taos_fetch_row(res))) { if (totalLen >= 100*1024*1024 - 32000) { - if (fp) fprintf(fp, "%s", databuf); + appendResultBufToFile(databuf, resultFile); totalLen = 0; memset(databuf, 0, 100*1024*1024); } @@ -1125,22 +1134,36 @@ static void getResult(TAOS_RES *res, char* resultFileName) { totalLen += len; } - if (fp) fprintf(fp, "%s", databuf); - tmfclose(fp); + appendResultBufToFile(databuf, resultFile); free(databuf); } -static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { - TAOS_RES *res = taos_query(taos, command); - if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - return; - } +static void selectAndGetResult(threadInfo *pThreadInfo, char *command, char* resultFileName) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { + TAOS_RES *res = taos_query(pThreadInfo->taos, command); + if (res == NULL || taos_errno(res) != 0) { + errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + return; + } - getResult(res, resultFileName); - taos_free_result(res); + appendResultToFile(res, resultFileName); + taos_free_result(res); + + } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + int retCode = postProceSql( + g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + resultFileName); + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); + } + + } else { + errorPrint("%s() LN%d, unknown query mode: %s\n", + __func__, __LINE__, g_queryInfo.queryMode); + } } static int32_t rand_bool(){ @@ -1933,13 +1956,13 @@ static void printfQuerySystemInfo(TAOS * taos) { // show variables res = taos_query(taos, "show variables;"); - //getResult(res, filename); + //appendResultToFile(res, filename); xDumpResultToFile(filename, res); // show dnodes res = taos_query(taos, "show dnodes;"); xDumpResultToFile(filename, res); - //getResult(res, filename); + //appendResultToFile(res, filename); // show databases res = taos_query(taos, "show databases;"); @@ -1974,7 +1997,8 @@ static void printfQuerySystemInfo(TAOS * taos) { free(dbInfos); } -static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, char* sqlstr) +static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, + char* sqlstr, char *resultFile) { char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; @@ -2110,6 +2134,10 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port response_buf[RESP_BUF_LEN - 1] = '\0'; printf("Response:\n%s\n", response_buf); + if (resultFile) { + appendResultBufToFile(response_buf, resultFile); + } + free(request_buf); #ifdef WINDOWS closesocket(sockfd); @@ -4681,7 +4709,8 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k) if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); } else if (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest"))) { - if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, buffer)) { + if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, + buffer, NULL /* not set result file */)) { affectedRows = -1; printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); @@ -5922,7 +5951,7 @@ static void *readMetric(void *sarg) { fprintf(fp, "Querying On %d records:\n", totalData); for (int j = 0; j < n; j++) { - char condition[BUFFER_SIZE - 30] = "\0"; + char condition[COND_BUF_LEN] = "\0"; char tempS[64] = "\0"; int m = 10 < num_of_tables ? 10 : num_of_tables; @@ -5933,7 +5962,7 @@ static void *readMetric(void *sarg) { } else { sprintf(tempS, " or t1 = %d ", i); } - strcat(condition, tempS); + strncat(condition, tempS, COND_BUF_LEN - 1); sprintf(command, "select %s from meters where %s", aggreFunc[j], condition); @@ -6111,43 +6140,24 @@ static void *specifiedTableQuery(void *sarg) { taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms } - st = taosGetTimestampMs(); - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { - int64_t t1 = taosGetTimestampMs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - } - selectAndGetResult(pThreadInfo->taos, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); - int64_t t2 = taosGetTimestampMs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000.0); - } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - int64_t t1 = taosGetTimestampMs(); - int retCode = postProceSql(g_queryInfo.host, &(g_queryInfo.serv_addr), - g_queryInfo.port, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); - return NULL; - } - int64_t t2 = taosGetTimestampMs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000.0); - - } else { - errorPrint("%s() LN%d, unknown query mode: %s\n", - __func__, __LINE__, g_queryInfo.queryMode); - return NULL; } - totalQueried ++; - g_queryInfo.specifiedQueryInfo.totalQueried ++; + + st = taosGetTimestampMs(); + + selectAndGetResult(pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); et = taosGetTimestampMs(); + printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", + taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0); + + totalQueried ++; + g_queryInfo.specifiedQueryInfo.totalQueried ++; uint64_t currentPrintTime = taosGetTimestampMs(); uint64_t endTs = taosGetTimestampMs(); @@ -6180,14 +6190,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { tstrncpy(outSql, inSql, pos - inSql + 1); //printf("1: %s\n", outSql); - strcat(outSql, subTblName); + strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); //printf("2: %s\n", outSql); - strcat(outSql, pos+strlen(sourceString)); + strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); //printf("3: %s\n", outSql); } static void *superTableQuery(void *sarg) { - char sqlstr[1024]; + char sqlstr[MAX_QUERY_SQL_LENGTH]; threadInfo *pThreadInfo = (threadInfo *)sarg; if (pThreadInfo->taos == NULL) { @@ -6232,7 +6242,7 @@ static void *superTableQuery(void *sarg) { g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); } - selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile); + selectAndGetResult(pThreadInfo, sqlstr, tmpFile); totalQueried++; g_queryInfo.superQueryInfo.totalQueried ++; @@ -6433,7 +6443,7 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c return; } - getResult(res, (char*)param); + appendResultToFile(res, (char*)param); // tao_unscribe() will free result. } @@ -6462,7 +6472,7 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[1024]; + char subSqlstr[MAX_QUERY_SQL_LENGTH]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; if (g_queryInfo.superQueryInfo.sqlCount == 0) @@ -6538,7 +6548,7 @@ static void *superSubscribe(void *sarg) { g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID); } - getResult(res, tmpFile); + appendResultToFile(res, tmpFile); } } } @@ -6626,7 +6636,7 @@ static void *specifiedSubscribe(void *sarg) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); } - getResult(res, tmpFile); + appendResultToFile(res, tmpFile); } } } From 1d6f4b2123bd22f3064d34092fd52ac4cfdec80a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 11 May 2021 13:48:23 +0800 Subject: [PATCH 124/140] [td-225]refactor. --- .../taosdata/jdbc/TSDBPreparedStatement.java | 84 +++++++++++-------- 1 file changed, 48 insertions(+), 36 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 9bdebe36b6..b7e2a9abd3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -621,29 +621,32 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES); } - public void columnDataAddBatch() { - // do nothing - } - - public void columnDataExecuteBatch() throws SQLException { - int numOfCols = this.colData.size(); - int rows = ((ColumnInfo) this.colData.get(0)).data.size(); - - // pass the data block to native code - TSDBJNIConnector connector = null; - try { - connector = ((TSDBConnection) this.getConnection()).getConnector(); - this.nativeStmtHandle = connector.prepareStmt(rawSql); - - // table name is not set yet, abort - if (this.tableName == null) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet"); - } - connector.setBindTableName(this.nativeStmtHandle, this.tableName); - } catch (SQLException e) { - e.printStackTrace(); + public void columnDataAddBatch() throws SQLException { + // pass the data block to native code + if (rawSql == null) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "sql statement not set yet"); } - + + // table name is not set yet, abort + if (this.tableName == null) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet"); + } + + int numOfCols = this.colData.size(); + if (numOfCols == 0) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind"); + } + + TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); + this.nativeStmtHandle = connector.prepareStmt(rawSql); + connector.setBindTableName(this.nativeStmtHandle, this.tableName); + + ColumnInfo colInfo = (ColumnInfo) this.colData.get(0); + if (colInfo == null) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind"); + } + + int rows = colInfo.data.size(); for (int i = 0; i < numOfCols; ++i) { ColumnInfo col1 = this.colData.get(i); if (col1 == null || !col1.isTypeSet()) { @@ -684,8 +687,13 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat case TSDBConstants.TSDB_DATA_TYPE_BOOL: { for (int j = 0; j < rows; ++j) { - Byte val = (Byte) col1.data.get(j); - colDataList.put(val == null? 0:val); + Boolean val = (Boolean) col1.data.get(j); + if (val == null) { + colDataList.put((byte) 0); + } else { + colDataList.put((byte) (val? 1:0)); + } + isNullList.put((byte) (val == null? 1:0)); } break; @@ -772,23 +780,27 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i); } - + } + + public void columnDataExecuteBatch() throws SQLException { + TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); connector.executeBatch(this.nativeStmtHandle); + this.columnDataClearBatch(); } public void columnDataClearBatch() { - // TODO clear data in this.colData + int size = this.colData.size(); + this.colData.clear(); + + this.colData.addAll(Collections.nCopies(size, null)); + this.tableName = null; // clear the table name } - public void columnDataCloseBatch() { - TSDBJNIConnector connector = null; - try { - connector = ((TSDBConnection) this.getConnection()).getConnector(); - connector.closeBatch(this.nativeStmtHandle); - this.nativeStmtHandle = 0L; - this.tableName = null; - } catch (SQLException e) { - e.printStackTrace(); - } + public void columnDataCloseBatch() throws SQLException { + TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); + connector.closeBatch(this.nativeStmtHandle); + + this.nativeStmtHandle = 0L; + this.tableName = null; } } From 98feeefd4e9f874daf1baa7791fdb82635b98e1f Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 11 May 2021 14:10:01 +0800 Subject: [PATCH 125/140] fix compile error --- src/client/src/tscPrepare.c | 10 +++++----- src/query/src/qTokenizer.c | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index ca887427e0..952a29eab4 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -745,7 +745,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } if (!IS_VAR_DATA_TYPE(param->type)) { - memcpy(data + param->offset, bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes); + memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes); if (param->offset == 0) { if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { @@ -759,7 +759,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU return TSDB_CODE_TSC_INVALID_VALUE; } int16_t bsize = (short)bind->length[i]; - STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer + bind->buffer_length * i, bsize); + STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize); } else if (param->type == TSDB_DATA_TYPE_NCHAR) { if (bind->length[i] > (uintptr_t)param->bytes) { tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); @@ -767,8 +767,8 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } int32_t output = 0; - if (!taosMbsToUcs4(bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { - tscError("convert nchar string to UCS4_LE failed:%s", (char*)(bind->buffer + bind->buffer_length * i)); + if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { + tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i)); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -1293,7 +1293,7 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { return TSDB_CODE_TSC_APP_ERROR; } - if (pStmt->last == STMT_INIT && pStmt->last == STMT_BIND && pStmt->last == STMT_BIND_COL) { + if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) { tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last); return TSDB_CODE_TSC_APP_ERROR; } diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 4a4897f5c2..a16bcd4fc9 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -562,9 +562,9 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) { SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken) { char *src = *str; - int32_t nsize = strlen(newToken); - int32_t size = strlen(*str) - token->n + nsize + 1; - int32_t bsize = (uint64_t)token->z - (uint64_t)src; + size_t nsize = strlen(newToken); + int32_t size = (int32_t)strlen(*str) - token->n + (int32_t)nsize + 1; + int32_t bsize = (int32_t)((uint64_t)token->z - (uint64_t)src); SStrToken ntoken; *str = calloc(1, size); @@ -573,7 +573,7 @@ SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken) strcat(*str, newToken); strcat(*str, token->z + token->n); - ntoken.n = nsize; + ntoken.n = (uint32_t)nsize; ntoken.z = *str + bsize; tfree(src); From b2d68568d886707c114aa8c13e5ecf2be8a86f72 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 11 May 2021 14:20:08 +0800 Subject: [PATCH 126/140] [td-4038]refactor. --- .../main/java/com/taosdata/jdbc/AbstractResultSet.java | 7 +++++-- .../java/com/taosdata/jdbc/TSDBResultSetRowData.java | 5 +++-- .../test/java/com/taosdata/jdbc/TSDBResultSetTest.java | 10 +++++----- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java index 4b5b88d93b..f8ea9af423 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -84,10 +84,12 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet } @Override + @Deprecated public InputStream getUnicodeStream(int columnIndex) throws SQLException { - if (isClosed()) + if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -171,6 +173,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet } @Override + @Deprecated public InputStream getUnicodeStream(String columnLabel) throws SQLException { return getUnicodeStream(findColumn(columnLabel)); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index 34470fbc4e..618e896a6d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -84,7 +84,8 @@ public class TSDBResultSetRowData { data.set(col, value); } - public int getInt(int col, int srcType) throws SQLException { + @SuppressWarnings("deprecation") + public int getInt(int col, int srcType) throws SQLException { Object obj = data.get(col); switch (srcType) { @@ -128,7 +129,7 @@ public class TSDBResultSetRowData { long value = (long) obj; if (value < 0) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - return new Long(value).intValue(); + return Long.valueOf(value).intValue(); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java index c5c6f7bca5..f304fd6874 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java @@ -3,7 +3,6 @@ package com.taosdata.jdbc; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import com.google.common.primitives.Shorts; -import com.taosdata.jdbc.rs.RestfulResultSet; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -177,7 +176,8 @@ public class TSDBResultSetTest { rs.getAsciiStream("f1"); } - @Test(expected = SQLFeatureNotSupportedException.class) + @SuppressWarnings("deprecation") + @Test(expected = SQLFeatureNotSupportedException.class) public void getUnicodeStream() throws SQLException { rs.getUnicodeStream("f1"); } @@ -326,7 +326,7 @@ public class TSDBResultSetTest { @Test(expected = SQLFeatureNotSupportedException.class) public void getRow() throws SQLException { - int row = rs.getRow(); + rs.getRow(); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -405,12 +405,12 @@ public class TSDBResultSetTest { @Test(expected = SQLFeatureNotSupportedException.class) public void updateByte() throws SQLException { - rs.updateByte(1, new Byte("0")); + rs.updateByte(1, (byte) 0); } @Test(expected = SQLFeatureNotSupportedException.class) public void updateShort() throws SQLException { - rs.updateShort(1, new Short("0")); + rs.updateShort(1, (short) 0); } @Test(expected = SQLFeatureNotSupportedException.class) From fd500aba599ba2fe478116325c955fff196ce322 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 11 May 2021 15:32:28 +0800 Subject: [PATCH 127/140] add records num check --- src/client/src/tscPrepare.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 952a29eab4..611cb604c4 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -932,6 +932,11 @@ static int insertStmtUpdateBatch(STscStmt* stmt) { SSqlCmd* pCmd = &pSql->cmd; STableDataBlocks* pBlock = NULL; + if (pCmd->batchSize > INT16_MAX) { + tscError("too many record:%d", pCmd->batchSize); + return TSDB_CODE_TSC_APP_ERROR; + } + assert(pCmd->numOfClause == 1); if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) { return TSDB_CODE_SUCCESS; @@ -1432,7 +1437,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { return TSDB_CODE_TSC_DISCONNECTED; } - if (bind == NULL || bind->num <= 0) { + if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) { tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } @@ -1466,7 +1471,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in return TSDB_CODE_TSC_DISCONNECTED; } - if (bind == NULL || bind->num <= 0) { + if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) { tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); return TSDB_CODE_TSC_APP_ERROR; } From 055919a7a71f233bbc77517ded850f197061993c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 15:38:05 +0800 Subject: [PATCH 128/140] Hotfix/sangshuduo/td 3992 taosdemo subscribe (#6062) * [TD-3902]: taosdemo subscribe. * [TD-3992]: taosdemo subscribe. refactor sync/async mode. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 128 ++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 71 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 935aa358c5..3c83cb7a5e 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -68,12 +68,6 @@ enum TEST_MODE { INVAID_TEST }; -enum QUERY_MODE { - SYNC_QUERY_MODE, // 0 - ASYNC_QUERY_MODE, // 1 - INVALID_MODE -}; - #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define COND_BUF_LEN BUFFER_SIZE - 30 @@ -119,8 +113,8 @@ typedef enum TALBE_EXISTS_EN { } TALBE_EXISTS_EN; enum MODE { - SYNC, - ASYNC, + SYNC_MODE, + ASYNC_MODE, MODE_BUT }; @@ -206,7 +200,7 @@ typedef struct SArguments_S { bool verbose_print; bool performance_print; char * output_file; - uint32_t query_mode; + bool async_mode; char * datatype[MAX_NUM_DATATYPE + 1]; uint32_t len_of_binary; uint32_t num_of_CPR; @@ -344,7 +338,7 @@ typedef struct SDbs_S { bool use_metric; bool insert_only; bool do_aggreFunc; - bool queryMode; + bool asyncMode; uint32_t threadCount; uint32_t threadCountByCreateTbl; @@ -361,7 +355,7 @@ typedef struct SpecifiedQueryInfo_S { uint64_t queryInterval; // 0: unlimit > 0 loop/s uint64_t concurrent; uint64_t sqlCount; - uint32_t mode; // 0: sync, 1: async + uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms uint64_t queryTimes; int subscribeRestart; @@ -376,7 +370,7 @@ typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; uint64_t queryInterval; // 0: unlimit > 0 loop/s uint32_t threadCnt; - uint32_t mode; // 0: sync, 1: async + uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; @@ -774,49 +768,48 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n"); exit(EXIT_FAILURE); } - arguments->query_mode = atoi(argv[++i]); + arguments->async_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-T need a number following!\n"); exit(EXIT_FAILURE); } arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-i need a number following!\n"); exit(EXIT_FAILURE); } arguments->insert_interval = atoi(argv[++i]); } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1])) - || (atoi(argv[i+1]) <= 0)) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-qt need a valid (>0) number following!\n"); + errorPrint("%s", "\n\t-qt need a number following!\n"); exit(EXIT_FAILURE); } arguments->query_times = atoi(argv[++i]); } else if (strcmp(argv[i], "-B") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-B need a number following!\n"); exit(EXIT_FAILURE); } arguments->interlace_rows = atoi(argv[++i]); } else if (strcmp(argv[i], "-r") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-r need a number following!\n"); exit(EXIT_FAILURE); @@ -1076,7 +1069,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { if (code != 0) { if (!quiet) { debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); + errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); //taos_close(taos); @@ -1672,7 +1665,7 @@ static void printfQueryMeta() { printf("concurrent: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.specifiedQueryInfo.mode)?"async":"sync"); + (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync"); printf("interval: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", @@ -1704,7 +1697,7 @@ static void printfQueryMeta() { g_queryInfo.superQueryInfo.queryTimes); printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.superQueryInfo.mode)?"async":"sync"); + (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync"); printf("interval: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", @@ -4072,9 +4065,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, gQueryTimes->valueint); + if (gQueryTimes->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } g_args.query_times = gQueryTimes->valueint; @@ -4123,9 +4116,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, specifiedQueryTimes->valueint); + if (specifiedQueryTimes->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -4152,20 +4145,20 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.concurrent = 1; } - cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (mode && mode->type == cJSON_String - && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; - } else if (0 == strcmp("async", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE; + cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String + && specifiedAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; } else { - errorPrint("%s() LN%d, failed to read json, query mode input error\n", + errorPrint("%s() LN%d, failed to read json, async mode input error\n", __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; } cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); @@ -4267,9 +4260,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, superQueryTimes->valueint); + if (superQueryTimes->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; @@ -4312,20 +4305,20 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON* submode = cJSON_GetObjectItem(superQuery, "mode"); - if (submode && submode->type == cJSON_String - && submode->valuestring != NULL) { - if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; - } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE; + cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); + if (superAsyncMode && superAsyncMode->type == cJSON_String + && superAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; } else { - errorPrint("%s() LN%d, failed to read json, query mode input error\n", + errorPrint("%s() LN%d, failed to read json, async mode input error\n", __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; } cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); @@ -5233,13 +5226,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTs = taosGetTimestampMs(); - if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch); - errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n"); - goto free_of_interlace; - } int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); endTs = taosGetTimestampMs(); @@ -5780,10 +5766,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, } */ tsem_init(&(t_info->lock_sem), 0, 0); - if (SYNC == g_Dbs.queryMode) { - pthread_create(pids + i, NULL, syncWrite, t_info); - } else { + if (ASYNC_MODE == g_Dbs.asyncMode) { pthread_create(pids + i, NULL, asyncWrite, t_info); + } else { + pthread_create(pids + i, NULL, syncWrite, t_info); } } @@ -6469,7 +6455,7 @@ static TAOS_SUB* subscribeImpl( TAOS *taos, char *sql, char* topic, char* resultFileName) { TAOS_SUB* tsub = NULL; - if (ASYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { + if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { tsub = taos_subscribe(taos, g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, @@ -6554,7 +6540,7 @@ static void *superSubscribe(void *sarg) { TAOS_RES* res = NULL; while(1) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (ASYNC_QUERY_MODE == g_queryInfo.superQueryInfo.mode) { + if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { continue; } @@ -6643,7 +6629,7 @@ static void *specifiedSubscribe(void *sarg) { TAOS_RES* res = NULL; while(1) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (ASYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { + if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { continue; } @@ -6866,7 +6852,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - g_Dbs.queryMode = g_args.query_mode; + g_Dbs.asyncMode = g_args.async_mode; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; From eab8e5e8f2f1c008a942dc9672ff618cf0d4580f Mon Sep 17 00:00:00 2001 From: dapan1121 <72057773+dapan1121@users.noreply.github.com> Date: Tue, 11 May 2021 17:07:35 +0800 Subject: [PATCH 129/140] Update makefile --- tests/examples/c/makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index 09682d35ef..304623c27a 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -14,7 +14,6 @@ exe: gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)asyncdemo $(LFLAGS) gcc $(CFLAGS) ./demo.c -o $(ROOT)demo $(LFLAGS) gcc $(CFLAGS) ./prepare.c -o $(ROOT)prepare $(LFLAGS) - gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS) gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS) gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS) From ace482e293c8c0ddccd52fcc878cca6dd8209071 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 11 May 2021 18:59:51 +0800 Subject: [PATCH 130/140] fix bug --- .../java/com/taosdata/jdbc/TSDBPreparedStatement.java | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index b7e2a9abd3..71e07252a3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -60,10 +60,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat parameters = new Object[parameterCnt]; this.isPrepared = true; } - - // the table name is also a parameter, so ignore it. - this.colData = new ArrayList(parameterCnt - 1); - this.colData.addAll(Collections.nCopies(parameterCnt - 1, null)); + + if (parameterCnt > 1) { + // the table name is also a parameter, so ignore it. + this.colData = new ArrayList(parameterCnt - 1); + this.colData.addAll(Collections.nCopies(parameterCnt - 1, null)); + } } private void init(String sql) { From af33e1184063a047de7eb26805fd4bd314dce579 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 19:06:02 +0800 Subject: [PATCH 131/140] [TD-4136]: taosdemo max records per req < 32767 (#6082) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 12 +++++++++++- .../tools/taosdemoAllTest/speciQueryRestful.json | 4 ++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3c83cb7a5e..a3a8968017 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -68,6 +68,14 @@ enum TEST_MODE { INVAID_TEST }; +enum QUERY_MODE { + SYNC_QUERY_MODE, // 0 + ASYNC_QUERY_MODE, // 1 + INVALID_MODE +}; + +#define MAX_RECORDS_PER_REQ 32766 + #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define COND_BUF_LEN BUFFER_SIZE - 30 @@ -3436,10 +3444,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", __func__, __LINE__); goto PARSE_OVER; + } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { + numRecPerReq->valueint = MAX_RECORDS_PER_REQ; } g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = UINT64_MAX; + g_args.num_of_RPR = MAX_RECORDS_PER_REQ; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json b/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json index 98e9b7a4e8..bc3a66f43c 100644 --- a/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json +++ b/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json @@ -8,7 +8,7 @@ "confirm_parameter_prompt": "no", "databases": "db", "query_times": 2, - "query_mode": "restful", + "query_mode": "rest", "specified_table_query": { "query_interval": 1, "concurrent": 3, @@ -35,4 +35,4 @@ ] } } - \ No newline at end of file + From 95139cf4a1c03cd6f9bb60fe0daca48bbd84e4e9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 21:36:48 +0800 Subject: [PATCH 132/140] [TD-4130]: taosdemo subscribe super table. (#6086) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 134 +++++++++++++++++------------------- 1 file changed, 63 insertions(+), 71 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index a3a8968017..8866bf2607 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -68,12 +68,6 @@ enum TEST_MODE { INVAID_TEST }; -enum QUERY_MODE { - SYNC_QUERY_MODE, // 0 - ASYNC_QUERY_MODE, // 1 - INVALID_MODE -}; - #define MAX_RECORDS_PER_REQ 32766 #define MAX_SQL_SIZE 65536 @@ -1107,6 +1101,7 @@ static void appendResultBufToFile(char *resultBuf, char *resultFile) } } + fprintf(fp, "%s", resultBuf); tmfclose(fp); } @@ -1142,6 +1137,7 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) { totalLen += len; } + verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", __func__, __LINE__, databuf, resultFile); appendResultBufToFile(databuf, resultFile); free(databuf); } @@ -6517,59 +6513,63 @@ static void *superSubscribe(void *sarg) { return NULL; } - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) { - // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); + char topic[32] = {0}; + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { + sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%d", i, j); memset(subSqlstr,0,sizeof(subSqlstr)); - replaceChildTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], subSqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + if (g_queryInfo.superQueryInfo.result[j][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID); + g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); } - tsub[i] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile); - if (NULL == tsub[i]) { + + uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; + debugPrint("%s() LN%d, subSeq=%"PRIu64" subSqlstr: %s\n", + __func__, __LINE__, subSeq, subSqlstr); + tsub[subSeq] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile); + if (NULL == tsub[subSeq]) { taos_close(pThreadInfo->taos); return NULL; } } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while(0); + } // start loop to consume result TAOS_RES* res = NULL; while(1) { - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { - continue; - } + for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { + if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { + continue; + } - res = taos_consume(tsub[i]); - if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[i], + uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; + taosMsleep(100); // ms + res = taos_consume(tsub[subSeq]); + if (res) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[j][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); - appendResultToFile(res, tmpFile); + appendResultToFile(res, tmpFile); + } } } } } taos_free_result(res); - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { + uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; + taos_unsubscribe(tsub[subSeq], + g_queryInfo.superQueryInfo.subscribeKeepProgress); + } } taos_close(pThreadInfo->taos); @@ -6607,17 +6607,8 @@ static void *specifiedSubscribe(void *sarg) { return NULL; } - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) { - // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms - // //printf("========sleep duration:%"PRIu64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + char topic[32] = {0}; + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { sprintf(topic, "taosdemo-subscribe-%d", i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { @@ -6630,11 +6621,7 @@ static void *specifiedSubscribe(void *sarg) { taos_close(pThreadInfo->taos); return NULL; } - } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while(0); - + } // start loop to consume result TAOS_RES* res = NULL; while(1) { @@ -6643,6 +6630,7 @@ static void *specifiedSubscribe(void *sarg) { continue; } + taosMsleep(1000); // ms res = taos_consume(tsub[i]); if (res) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; @@ -6699,31 +6687,35 @@ static int subscribeTestProcess() { pthread_t *pids = NULL; threadInfo *infos = NULL; - //==== create sub threads for query from super table - if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) || - (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) { - errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n", + //==== create sub threads for query for specified table + if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { + printf("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); - exit(-1); - } + g_queryInfo.specifiedQueryInfo.sqlCount); + } else { + if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { + errorPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount); + exit(-1); + } - pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(-1); - } + pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); + exit(-1); + } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; t_info->taos = NULL; // TODO: workaround to use separate taos connection; pthread_create(pids + i, NULL, specifiedSubscribe, t_info); + } } - //==== create sub threads for query from sub table + //==== create sub threads for super table query pthread_t *pidsOfSub = NULL; threadInfo *infosOfSub = NULL; if ((g_queryInfo.superQueryInfo.sqlCount > 0) From e8190f17c8f96904a180c5ad14446d2c47874020 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 11 May 2021 21:38:18 +0800 Subject: [PATCH 133/140] [TD-4136]: taosdemo records per req more than 32767. (#6087) for develop branch. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index fc717aa5a9..0e468347ee 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -68,6 +68,8 @@ enum TEST_MODE { INVAID_TEST }; +#define MAX_RECORDS_PER_REQ 32766 + #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define COND_BUF_LEN BUFFER_SIZE - 30 @@ -3434,10 +3436,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", __func__, __LINE__); goto PARSE_OVER; + } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { + numRecPerReq->valueint = MAX_RECORDS_PER_REQ; } g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = UINT64_MAX; + g_args.num_of_RPR = MAX_RECORDS_PER_REQ; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); From 5131e7b12a664ddcdfc48a1d11125d71f92cf64a Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Wed, 12 May 2021 09:41:03 +0800 Subject: [PATCH 134/140] Fix/td 3986 (#6088) * [TD-3986]: fix subscribe test case error * change jdbc version number --- cmake/install.inc | 2 +- src/connector/jdbc/CMakeLists.txt | 2 +- src/connector/jdbc/deploy-pom.xml | 2 +- src/connector/jdbc/pom.xml | 2 +- .../com/taosdata/jdbc/TSDBJNIConnector.java | 15 ++----- .../java/com/taosdata/jdbc/TSDBResultSet.java | 13 +++--- .../java/com/taosdata/jdbc/SubscribeTest.java | 44 +++++++++++++------ 7 files changed, 45 insertions(+), 35 deletions(-) diff --git a/cmake/install.inc b/cmake/install.inc index 9e325531d5..f8b3b7c3c6 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index de4b8f6bfb..61e976cb18 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index a31796ffde..968a9bf470 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.28 + 2.0.29 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 3400a82e73..d94d28d9fa 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.28 + 2.0.29 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 5e3ffffa4f..7f0cf7de8d 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -32,7 +32,7 @@ public class TSDBJNIConnector { // Connection pointer used in C private long taos = TSDBConstants.JNI_NULL_POINTER; // result set status in current connection - private boolean isResultsetClosed = true; + private boolean isResultsetClosed; private int affectedRows = -1; static { @@ -135,6 +135,7 @@ public class TSDBJNIConnector { // Try retrieving result set for the executed SQL using the current connection pointer. pSql = this.getResultSetImp(this.taos, pSql); + // if pSql == 0L that means resultset is closed isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER); return pSql; @@ -172,16 +173,7 @@ public class TSDBJNIConnector { * Free resultset operation from C to release resultset pointer by JNI */ public int freeResultSet(long pSql) { - int res = TSDBConstants.JNI_SUCCESS; -// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { -// throw new RuntimeException("Invalid result set pointer"); -// } - -// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - res = this.freeResultSetImp(this.taos, pSql); -// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; -// } - + int res = this.freeResultSetImp(this.taos, pSql); isResultsetClosed = true; return res; } @@ -199,7 +191,6 @@ public class TSDBJNIConnector { // } // return resCode; // } - private native int freeResultSetImp(long connection, long result); /** diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index 2576a25f0d..aba29d602b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -109,6 +109,8 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { public void close() throws SQLException { if (isClosed) return; + if (this.statement == null) + return; if (this.jniConnector != null) { int code = this.jniConnector.freeResultSet(this.resultSetPointer); if (code == TSDBConstants.JNI_CONNECTION_NULL) { @@ -461,12 +463,13 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { } public boolean isClosed() throws SQLException { - if (isClosed) - return true; - if (jniConnector != null) { - isClosed = jniConnector.isResultsetClosed(); - } return isClosed; +// if (isClosed) +// return true; +// if (jniConnector != null) { +// isClosed = jniConnector.isResultsetClosed(); +// } +// return isClosed; } public String getNString(int columnIndex) throws SQLException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 3a223ed981..24c73fdd5c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -1,6 +1,7 @@ package com.taosdata.jdbc; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -19,6 +20,7 @@ public class SubscribeTest { String tName = "t0"; String host = "127.0.0.1"; String topic = "test"; + private long ts; @Test public void subscribe() { @@ -27,26 +29,40 @@ public class SubscribeTest { TSDBConnection conn = connection.unwrap(TSDBConnection.class); TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false); - int a = 0; - while (true) { - TimeUnit.MILLISECONDS.sleep(1000); + for (int j = 0; j < 10; j++) { + TimeUnit.SECONDS.sleep(1); TSDBResultSet resSet = subscribe.consume(); + + int rowCnt = 0; while (resSet.next()) { - for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resSet.getString(i) + "\t"); + if (rowCnt == 0) { + long cur_ts = resSet.getTimestamp(1).getTime(); + int k = resSet.getInt(2); + int v = resSet.getInt(3); + Assert.assertEquals(ts, cur_ts); + Assert.assertEquals(100, k); + Assert.assertEquals(1, v); } - System.out.println("\n======" + a + "=========="); - } - a++; - if (a >= 2) { - break; + if (rowCnt == 1) { + long cur_ts = resSet.getTimestamp(1).getTime(); + int k = resSet.getInt(2); + int v = resSet.getInt(3); + Assert.assertEquals(ts + 1, cur_ts); + Assert.assertEquals(101, k); + Assert.assertEquals(2, v); + + } + rowCnt++; } + if (j == 0) + Assert.assertEquals(2, rowCnt); resSet.close(); } - subscribe.close(true); - } catch (Exception e) { - e.printStackTrace(); + + + } catch (SQLException | InterruptedException throwables) { + throwables.printStackTrace(); } } @@ -62,7 +78,7 @@ public class SubscribeTest { statement.execute("drop database if exists " + dbName); statement.execute("create database if not exists " + dbName); statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); - long ts = System.currentTimeMillis(); + ts = System.currentTimeMillis(); statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)"); statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)"); } From 39f8213080600f91f5309479ecfac707eebaa3ff Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Wed, 12 May 2021 09:49:26 +0800 Subject: [PATCH 135/140] [TD-182]: use single repo for python connector (#6036) * [TD-182]: use single repo for python connector Remove code for each platform and build up one single python code base for windows/osx/linux platforms and python2/python3 runtime. * [TD-182]: remove redundant code in python connector * [TD-4149] : fix python connection config error --- src/connector/python/.gitignore | 154 +++++ .../python/{linux/python2 => }/LICENSE | 0 src/connector/python/README.md | 17 + src/connector/python/examples/demo.py | 12 + src/connector/python/linux/python2 | 1 + src/connector/python/linux/python2/README.md | 1 - src/connector/python/linux/python2/setup.py | 20 - .../python/linux/python2/taos/cinterface.py | 648 ------------------ .../python/linux/python2/taos/cursor.py | 278 -------- src/connector/python/linux/python3 | 1 + src/connector/python/linux/python3/LICENSE | 12 - src/connector/python/linux/python3/README.md | 1 - src/connector/python/linux/python3/setup.py | 20 - .../python/linux/python3/taos/__init__.py | 24 - .../python/linux/python3/taos/connection.py | 95 --- .../python/linux/python3/taos/constants.py | 42 -- .../python/linux/python3/taos/dbapi.py | 44 -- .../python/linux/python3/taos/error.py | 66 -- .../python/linux/python3/taos/subscription.py | 57 -- src/connector/python/osx/python3 | 1 + src/connector/python/osx/python3/LICENSE | 12 - src/connector/python/osx/python3/README.md | 1 - src/connector/python/osx/python3/setup.py | 20 - .../python/osx/python3/taos/__init__.py | 24 - .../python/osx/python3/taos/cinterface.py | 648 ------------------ .../python/osx/python3/taos/connection.py | 95 --- .../python/osx/python3/taos/constants.py | 42 -- .../python/osx/python3/taos/cursor.py | 280 -------- .../python/osx/python3/taos/dbapi.py | 44 -- .../python/osx/python3/taos/error.py | 66 -- .../python/osx/python3/taos/subscription.py | 57 -- src/connector/python/setup.py | 35 + .../{linux/python2 => }/taos/__init__.py | 0 .../{linux/python3 => }/taos/cinterface.py | 253 +++---- .../{linux/python2 => }/taos/connection.py | 0 .../{linux/python2 => }/taos/constants.py | 0 .../python/{linux/python3 => }/taos/cursor.py | 6 + .../python/{linux/python2 => }/taos/dbapi.py | 0 .../python/{linux/python2 => }/taos/error.py | 0 .../{linux/python2 => }/taos/subscription.py | 0 src/connector/python/windows/python2 | 1 + src/connector/python/windows/python2/LICENSE | 12 - .../python/windows/python2/README.md | 1 - src/connector/python/windows/python2/setup.py | 20 - .../python/windows/python2/taos/__init__.py | 24 - .../python/windows/python2/taos/cinterface.py | 648 ------------------ .../python/windows/python2/taos/connection.py | 96 --- .../python/windows/python2/taos/constants.py | 42 -- .../python/windows/python2/taos/cursor.py | 220 ------ .../python/windows/python2/taos/dbapi.py | 44 -- .../python/windows/python2/taos/error.py | 66 -- .../windows/python2/taos/subscription.py | 57 -- src/connector/python/windows/python3 | 1 + src/connector/python/windows/python3/LICENSE | 12 - .../python/windows/python3/README.md | 1 - src/connector/python/windows/python3/setup.py | 20 - .../python/windows/python3/taos/__init__.py | 24 - .../python/windows/python3/taos/cinterface.py | 648 ------------------ .../python/windows/python3/taos/connection.py | 96 --- .../python/windows/python3/taos/constants.py | 42 -- .../python/windows/python3/taos/cursor.py | 220 ------ .../python/windows/python3/taos/dbapi.py | 44 -- .../python/windows/python3/taos/error.py | 66 -- .../windows/python3/taos/subscription.py | 57 -- 64 files changed, 321 insertions(+), 5218 deletions(-) create mode 100644 src/connector/python/.gitignore rename src/connector/python/{linux/python2 => }/LICENSE (100%) create mode 100644 src/connector/python/README.md create mode 100644 src/connector/python/examples/demo.py create mode 120000 src/connector/python/linux/python2 delete mode 100644 src/connector/python/linux/python2/README.md delete mode 100644 src/connector/python/linux/python2/setup.py delete mode 100644 src/connector/python/linux/python2/taos/cinterface.py delete mode 100644 src/connector/python/linux/python2/taos/cursor.py create mode 120000 src/connector/python/linux/python3 delete mode 100644 src/connector/python/linux/python3/LICENSE delete mode 100644 src/connector/python/linux/python3/README.md delete mode 100644 src/connector/python/linux/python3/setup.py delete mode 100644 src/connector/python/linux/python3/taos/__init__.py delete mode 100644 src/connector/python/linux/python3/taos/connection.py delete mode 100644 src/connector/python/linux/python3/taos/constants.py delete mode 100644 src/connector/python/linux/python3/taos/dbapi.py delete mode 100644 src/connector/python/linux/python3/taos/error.py delete mode 100644 src/connector/python/linux/python3/taos/subscription.py create mode 120000 src/connector/python/osx/python3 delete mode 100644 src/connector/python/osx/python3/LICENSE delete mode 100644 src/connector/python/osx/python3/README.md delete mode 100644 src/connector/python/osx/python3/setup.py delete mode 100644 src/connector/python/osx/python3/taos/__init__.py delete mode 100644 src/connector/python/osx/python3/taos/cinterface.py delete mode 100644 src/connector/python/osx/python3/taos/connection.py delete mode 100644 src/connector/python/osx/python3/taos/constants.py delete mode 100644 src/connector/python/osx/python3/taos/cursor.py delete mode 100644 src/connector/python/osx/python3/taos/dbapi.py delete mode 100644 src/connector/python/osx/python3/taos/error.py delete mode 100644 src/connector/python/osx/python3/taos/subscription.py create mode 100644 src/connector/python/setup.py rename src/connector/python/{linux/python2 => }/taos/__init__.py (100%) rename src/connector/python/{linux/python3 => }/taos/cinterface.py (70%) rename src/connector/python/{linux/python2 => }/taos/connection.py (100%) rename src/connector/python/{linux/python2 => }/taos/constants.py (100%) rename src/connector/python/{linux/python3 => }/taos/cursor.py (98%) rename src/connector/python/{linux/python2 => }/taos/dbapi.py (100%) rename src/connector/python/{linux/python2 => }/taos/error.py (100%) rename src/connector/python/{linux/python2 => }/taos/subscription.py (100%) create mode 120000 src/connector/python/windows/python2 delete mode 100644 src/connector/python/windows/python2/LICENSE delete mode 100644 src/connector/python/windows/python2/README.md delete mode 100644 src/connector/python/windows/python2/setup.py delete mode 100644 src/connector/python/windows/python2/taos/__init__.py delete mode 100644 src/connector/python/windows/python2/taos/cinterface.py delete mode 100644 src/connector/python/windows/python2/taos/connection.py delete mode 100644 src/connector/python/windows/python2/taos/constants.py delete mode 100644 src/connector/python/windows/python2/taos/cursor.py delete mode 100644 src/connector/python/windows/python2/taos/dbapi.py delete mode 100644 src/connector/python/windows/python2/taos/error.py delete mode 100644 src/connector/python/windows/python2/taos/subscription.py create mode 120000 src/connector/python/windows/python3 delete mode 100644 src/connector/python/windows/python3/LICENSE delete mode 100644 src/connector/python/windows/python3/README.md delete mode 100644 src/connector/python/windows/python3/setup.py delete mode 100644 src/connector/python/windows/python3/taos/__init__.py delete mode 100644 src/connector/python/windows/python3/taos/cinterface.py delete mode 100644 src/connector/python/windows/python3/taos/connection.py delete mode 100644 src/connector/python/windows/python3/taos/constants.py delete mode 100644 src/connector/python/windows/python3/taos/cursor.py delete mode 100644 src/connector/python/windows/python3/taos/dbapi.py delete mode 100644 src/connector/python/windows/python3/taos/error.py delete mode 100644 src/connector/python/windows/python3/taos/subscription.py diff --git a/src/connector/python/.gitignore b/src/connector/python/.gitignore new file mode 100644 index 0000000000..228a0b4530 --- /dev/null +++ b/src/connector/python/.gitignore @@ -0,0 +1,154 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/python +# Edit at https://www.toptal.com/developers/gitignore?templates=python + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +pytestdebug.log + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +doc/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +# .env +.env/ +.venv/ +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pythonenv* + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# operating system-related files +# file properties cache/storage on macOS +*.DS_Store +# thumbnail cache on Windows +Thumbs.db + +# profiling data +.prof + + +# End of https://www.toptal.com/developers/gitignore/api/python diff --git a/src/connector/python/linux/python2/LICENSE b/src/connector/python/LICENSE similarity index 100% rename from src/connector/python/linux/python2/LICENSE rename to src/connector/python/LICENSE diff --git a/src/connector/python/README.md b/src/connector/python/README.md new file mode 100644 index 0000000000..9151e9b8f0 --- /dev/null +++ b/src/connector/python/README.md @@ -0,0 +1,17 @@ +# TDengine Connector for Python + +[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications. + +## Install + +```sh +pip install git+https://github.com/taosdata/TDengine-connector-python +``` + +## Source Code + +[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python). + +## License - AGPL + +Keep same with [TDengine](https://github.com/taosdata/TDengine). diff --git a/src/connector/python/examples/demo.py b/src/connector/python/examples/demo.py new file mode 100644 index 0000000000..6c7c03f3e2 --- /dev/null +++ b/src/connector/python/examples/demo.py @@ -0,0 +1,12 @@ +import taos + +conn = taos.connect(host='127.0.0.1', + user='root', + passworkd='taodata', + database='log') +cursor = conn.cursor() + +sql = "select * from log.log limit 10" +cursor.execute(sql) +for row in cursor: + print(row) diff --git a/src/connector/python/linux/python2 b/src/connector/python/linux/python2 new file mode 120000 index 0000000000..b870225aa0 --- /dev/null +++ b/src/connector/python/linux/python2 @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/src/connector/python/linux/python2/README.md b/src/connector/python/linux/python2/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/linux/python2/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py deleted file mode 100644 index 3f065e0348..0000000000 --- a/src/connector/python/linux/python2/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 2", - "Operating System :: Linux", - ], -) diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py deleted file mode 100644 index 3d0ecd2901..0000000000 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.CDLL('libtaos.so') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py deleted file mode 100644 index 4c0456b503..0000000000 --- a/src/connector/python/linux/python2/taos/cursor.py +++ /dev/null @@ -1,278 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def next(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the affected_rows of the object - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def log(self, logfile): - self._logfile = logfile - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - # global querySeqNum - # querySeqNum += 1 - # localSeqNum = querySeqNum # avoid raice condition - # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) - self._result = CTaosInterface.query(self._connection._conn, stmt) - # print(" << Query ({}) Exec Done".format(localSeqNum)) - if (self._logfile): - with open(self._logfile, "a") as logfile: - logfile.write("%s;\n" % operation) - - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult( - self._result) - return self._handle_result() - else: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def istype(self, col, dataType): - if (dataType.upper() == "BOOL"): - if (self._description[col][1] == FieldType.C_BOOL): - return True - if (dataType.upper() == "TINYINT"): - if (self._description[col][1] == FieldType.C_TINYINT): - return True - if (dataType.upper() == "TINYINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): - return True - if (dataType.upper() == "SMALLINT"): - if (self._description[col][1] == FieldType.C_SMALLINT): - return True - if (dataType.upper() == "SMALLINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): - return True - if (dataType.upper() == "INT"): - if (self._description[col][1] == FieldType.C_INT): - return True - if (dataType.upper() == "INT UNSIGNED"): - if (self._description[col][1] == FieldType.C_INT_UNSIGNED): - return True - if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_BIGINT): - return True - if (dataType.upper() == "BIGINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): - return True - if (dataType.upper() == "FLOAT"): - if (self._description[col][1] == FieldType.C_FLOAT): - return True - if (dataType.upper() == "DOUBLE"): - if (self._description[col][1] == FieldType.C_DOUBLE): - return True - if (dataType.upper() == "BINARY"): - if (self._description[col][1] == FieldType.C_BINARY): - return True - if (dataType.upper() == "TIMESTAMP"): - if (self._description[col][1] == FieldType.C_TIMESTAMP): - return True - if (dataType.upper() == "NCHAR"): - if (self._description[col][1] == FieldType.C_NCHAR): - return True - - return False - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/linux/python3 b/src/connector/python/linux/python3 new file mode 120000 index 0000000000..b870225aa0 --- /dev/null +++ b/src/connector/python/linux/python3 @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/src/connector/python/linux/python3/LICENSE b/src/connector/python/linux/python3/LICENSE deleted file mode 100644 index 79a9d73086..0000000000 --- a/src/connector/python/linux/python3/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/linux/python3/README.md b/src/connector/python/linux/python3/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/linux/python3/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py deleted file mode 100644 index 0bd7d51b6a..0000000000 --- a/src/connector/python/linux/python3/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: Linux", - ], -) diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py deleted file mode 100644 index 9732635738..0000000000 --- a/src/connector/python/linux/python3/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py deleted file mode 100644 index f6c395342c..0000000000 --- a/src/connector/python/linux/python3/taos/connection.py +++ /dev/null @@ -1,95 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py deleted file mode 100644 index 93466f5184..0000000000 --- a/src/connector/python/linux/python3/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Timestamp precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py deleted file mode 100644 index 594681ada9..0000000000 --- a/src/connector/python/linux/python3/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py deleted file mode 100644 index c584badce8..0000000000 --- a/src/connector/python/linux/python3/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/linux/python3/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/src/connector/python/osx/python3 b/src/connector/python/osx/python3 new file mode 120000 index 0000000000..b870225aa0 --- /dev/null +++ b/src/connector/python/osx/python3 @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE deleted file mode 100644 index 79a9d73086..0000000000 --- a/src/connector/python/osx/python3/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/osx/python3/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py deleted file mode 100644 index 4c865676c9..0000000000 --- a/src/connector/python/osx/python3/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: MacOS X", - ], -) diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py deleted file mode 100644 index 9732635738..0000000000 --- a/src/connector/python/osx/python3/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py deleted file mode 100644 index 720fbef6f5..0000000000 --- a/src/connector/python/osx/python3/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.CDLL('libtaos.dylib') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py deleted file mode 100644 index f6c395342c..0000000000 --- a/src/connector/python/osx/python3/taos/connection.py +++ /dev/null @@ -1,95 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py deleted file mode 100644 index 93466f5184..0000000000 --- a/src/connector/python/osx/python3/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Timestamp precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py deleted file mode 100644 index 32dc0ea3c3..0000000000 --- a/src/connector/python/osx/python3/taos/cursor.py +++ /dev/null @@ -1,280 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - -# querySeqNum = 0 - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def __next__(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the rowcount of insertion - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def log(self, logfile): - self._logfile = logfile - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - # global querySeqNum - # querySeqNum += 1 - # localSeqNum = querySeqNum # avoid raice condition - # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) - self._result = CTaosInterface.query(self._connection._conn, stmt) - # print(" << Query ({}) Exec Done".format(localSeqNum)) - if (self._logfile): - with open(self._logfile, "a") as logfile: - logfile.write("%s;\n" % operation) - - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult( - self._result) - return self._handle_result() - else: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def istype(self, col, dataType): - if (dataType.upper() == "BOOL"): - if (self._description[col][1] == FieldType.C_BOOL): - return True - if (dataType.upper() == "TINYINT"): - if (self._description[col][1] == FieldType.C_TINYINT): - return True - if (dataType.upper() == "TINYINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): - return True - if (dataType.upper() == "SMALLINT"): - if (self._description[col][1] == FieldType.C_SMALLINT): - return True - if (dataType.upper() == "SMALLINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): - return True - if (dataType.upper() == "INT"): - if (self._description[col][1] == FieldType.C_INT): - return True - if (dataType.upper() == "INT UNSIGNED"): - if (self._description[col][1] == FieldType.C_INT_UNSIGNED): - return True - if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_BIGINT): - return True - if (dataType.upper() == "BIGINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): - return True - if (dataType.upper() == "FLOAT"): - if (self._description[col][1] == FieldType.C_FLOAT): - return True - if (dataType.upper() == "DOUBLE"): - if (self._description[col][1] == FieldType.C_DOUBLE): - return True - if (dataType.upper() == "BINARY"): - if (self._description[col][1] == FieldType.C_BINARY): - return True - if (dataType.upper() == "TIMESTAMP"): - if (self._description[col][1] == FieldType.C_TIMESTAMP): - return True - if (dataType.upper() == "NCHAR"): - if (self._description[col][1] == FieldType.C_NCHAR): - return True - - return False - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py deleted file mode 100644 index 594681ada9..0000000000 --- a/src/connector/python/osx/python3/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py deleted file mode 100644 index c584badce8..0000000000 --- a/src/connector/python/osx/python3/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/osx/python3/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py new file mode 100644 index 0000000000..4d083d7ddb --- /dev/null +++ b/src/connector/python/setup.py @@ -0,0 +1,35 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="2.0.9", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/pypa/sampleproject", + packages=setuptools.find_packages(), + classifiers=[ + + "Environment :: Console", + "Environment :: MacOS X", + "Environment :: Win32 (MS Windows)", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", + "Operating System :: MacOS", + "Programming Language :: Python :: 2.7", + "Operating System :: Linux", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Operating System :: Microsoft :: Windows :: Windows 10", + ], +) diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/taos/__init__.py similarity index 100% rename from src/connector/python/linux/python2/taos/__init__.py rename to src/connector/python/taos/__init__.py diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/taos/cinterface.py similarity index 70% rename from src/connector/python/linux/python3/taos/cinterface.py rename to src/connector/python/taos/cinterface.py index 3d0ecd2901..b8824327b0 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -3,6 +3,7 @@ from .constants import FieldType from .error import * import math import datetime +import platform def _convert_millisecond_to_datetime(milli): @@ -20,46 +21,28 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): if micro: _timestamp_converter = _convert_microsecond_to_datetime - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] def _crow_tinyint_unsigned_to_python( @@ -69,92 +52,56 @@ def _crow_tinyint_unsigned_to_python( micro=False): """Function to convert C tinyint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] def _crow_smallint_unsigned_to_python( data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] def _crow_bigint_unsigned_to_python( @@ -164,52 +111,33 @@ def _crow_bigint_unsigned_to_python( micro=False): """Function to convert C bigint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint64))[ + :abs(num_of_rows)]] def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): @@ -236,30 +164,17 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """ assert(nbytes is not None) res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) + for i in range(abs(num_of_rows)): + try: + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) + except ValueError: + res.append(None) return res @@ -268,20 +183,12 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """ assert(nbytes is not None) res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) + for i in range(abs(num_of_rows)): + try: + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) + except ValueError: + res.append(None) return res @@ -330,14 +237,38 @@ class TaosField(ctypes.Structure): # C interface class +def _load_taos_linux(): + return ctypes.CDLL('libtaos.so') + + +def _load_taos_darwin(): + return ctypes.cDLL('libtaos.dylib') + + +def _load_taos_windows(): + return ctypes.windll.LoadLibrary('taos') + + +def _load_taos(): + load_func = { + 'Linux': _load_taos_linux, + 'Darwin': _load_taos_darwin, + 'Windows': _load_taos_windows, + } + try: + return load_func[platform.system()]() + except: + sys.exit('unsupported platform to TDengine connector') + + class CTaosInterface(object): - libtaos = ctypes.CDLL('libtaos.so') + libtaos = _load_taos() libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) libtaos.taos_init.restype = None libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p + # libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p libtaos.taos_subscribe.restype = ctypes.c_void_p @@ -438,7 +369,7 @@ class CTaosInterface(object): '''Close the TDengine handle ''' CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') + # print('connection is closed') @staticmethod def query(connection, sql): diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/taos/connection.py similarity index 100% rename from src/connector/python/linux/python2/taos/connection.py rename to src/connector/python/taos/connection.py diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/taos/constants.py similarity index 100% rename from src/connector/python/linux/python2/taos/constants.py rename to src/connector/python/taos/constants.py diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/taos/cursor.py similarity index 98% rename from src/connector/python/linux/python3/taos/cursor.py rename to src/connector/python/taos/cursor.py index 32dc0ea3c3..d443ec95d0 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/taos/cursor.py @@ -45,6 +45,12 @@ class TDengineCursor(object): return self def __next__(self): + return self._taos_next() + + def next(self): + return self._taos_next() + + def _taos_next(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetch iterator") diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/taos/dbapi.py similarity index 100% rename from src/connector/python/linux/python2/taos/dbapi.py rename to src/connector/python/taos/dbapi.py diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/taos/error.py similarity index 100% rename from src/connector/python/linux/python2/taos/error.py rename to src/connector/python/taos/error.py diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/taos/subscription.py similarity index 100% rename from src/connector/python/linux/python2/taos/subscription.py rename to src/connector/python/taos/subscription.py diff --git a/src/connector/python/windows/python2 b/src/connector/python/windows/python2 new file mode 120000 index 0000000000..b870225aa0 --- /dev/null +++ b/src/connector/python/windows/python2 @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/src/connector/python/windows/python2/LICENSE b/src/connector/python/windows/python2/LICENSE deleted file mode 100644 index 79a9d73086..0000000000 --- a/src/connector/python/windows/python2/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/windows/python2/README.md b/src/connector/python/windows/python2/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/windows/python2/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py deleted file mode 100644 index 24d75f937c..0000000000 --- a/src/connector/python/windows/python2/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 2", - "Operating System :: Windows", - ], -) diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py deleted file mode 100644 index 9732635738..0000000000 --- a/src/connector/python/windows/python2/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py deleted file mode 100644 index 65cb183f26..0000000000 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py deleted file mode 100644 index 5729d01c6d..0000000000 --- a/src/connector/python/windows/python2/taos/connection.py +++ /dev/null @@ -1,96 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py deleted file mode 100644 index 8a8011c3e3..0000000000 --- a/src/connector/python/windows/python2/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Time precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py deleted file mode 100644 index 5f4666b593..0000000000 --- a/src/connector/python/windows/python2/taos/cursor.py +++ /dev/null @@ -1,220 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - -# querySeqNum = 0 - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def __next__(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the affected_rows of the object - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - self._result = CTaosInterface.query(self._connection._conn, stmt) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult(self._result) - return self._handle_result() - else: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py deleted file mode 100644 index 594681ada9..0000000000 --- a/src/connector/python/windows/python2/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py deleted file mode 100644 index c584badce8..0000000000 --- a/src/connector/python/windows/python2/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/windows/python2/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/src/connector/python/windows/python3 b/src/connector/python/windows/python3 new file mode 120000 index 0000000000..b870225aa0 --- /dev/null +++ b/src/connector/python/windows/python3 @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/src/connector/python/windows/python3/LICENSE b/src/connector/python/windows/python3/LICENSE deleted file mode 100644 index 2d032e65d8..0000000000 --- a/src/connector/python/windows/python3/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/windows/python3/README.md b/src/connector/python/windows/python3/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/windows/python3/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py deleted file mode 100644 index 2659c493aa..0000000000 --- a/src/connector/python/windows/python3/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: Windows", - ], -) diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py deleted file mode 100644 index b57e25fd2c..0000000000 --- a/src/connector/python/windows/python3/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py deleted file mode 100644 index 65cb183f26..0000000000 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py deleted file mode 100644 index 5729d01c6d..0000000000 --- a/src/connector/python/windows/python3/taos/connection.py +++ /dev/null @@ -1,96 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py deleted file mode 100644 index 49fc17b2fb..0000000000 --- a/src/connector/python/windows/python3/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Timestamp precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py deleted file mode 100644 index 136cd42fe4..0000000000 --- a/src/connector/python/windows/python3/taos/cursor.py +++ /dev/null @@ -1,220 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - -# querySeqNum = 0 - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def __next__(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the affected_rows of the object - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - self._result = CTaosInterface.query(self._connection._conn, stmt) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult(self._result) - return self._handle_result() - else: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py deleted file mode 100644 index a29621f7a3..0000000000 --- a/src/connector/python/windows/python3/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py deleted file mode 100644 index 238b293a0b..0000000000 --- a/src/connector/python/windows/python3/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/windows/python3/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() From 15a7d1431d49395ae28dacd5bf36c874bec94bed Mon Sep 17 00:00:00 2001 From: lichuang Date: Wed, 12 May 2021 10:32:10 +0800 Subject: [PATCH 136/140] [TD-4145]fix compile in ningsi60 --- deps/rmonotonic/src/monotonic.c | 9 ++++ src/client/src/tscSubquery.c | 6 +++ src/common/inc/texpr.h | 3 -- src/plugins/http/src/httpServer.c | 4 ++ src/query/src/qAggMain.c | 72 ++++++++++++++++++++++++++++--- 5 files changed, 85 insertions(+), 9 deletions(-) diff --git a/deps/rmonotonic/src/monotonic.c b/deps/rmonotonic/src/monotonic.c index 1470f91b56..c6d2df9097 100644 --- a/deps/rmonotonic/src/monotonic.c +++ b/deps/rmonotonic/src/monotonic.c @@ -36,6 +36,15 @@ static char monotonic_info_string[32]; static long mono_ticksPerMicrosecond = 0; +#ifdef _TD_NINGSI_60 +// implement __rdtsc in ningsi60 +uint64_t __rdtsc(){ + unsigned int lo,hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return ((uint64_t)hi << 32) | lo; +} +#endif + static monotime getMonotonicUs_x86() { return __rdtsc() / mono_ticksPerMicrosecond; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 67eea432e6..e9f87b3115 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -637,7 +637,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value +#ifndef _TD_NINGSI_60 pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; +#else + pExpr->base.param[0].i64 = colId; + pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; + pExpr->base.param[0].nLen = sizeof(int64_t); +#endif pExpr->base.numOfParams = 1; } diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index a0854ce81b..d67de9ff69 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -89,9 +89,6 @@ tExprNode* exprdup(tExprNode* pTree); bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param); -typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, - int32_t rightType, void *output, int32_t order); - void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, char *(*cb)(void *, const char*, int32_t)); diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 4dcf3d5501..9d98d3f113 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -269,7 +269,11 @@ static void *httpAcceptHttpConnection(void *arg) { sprintf(pContext->ipstr, "%s:%u", taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); struct epoll_event event; +#ifndef _TD_NINGSI_60 event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP | EPOLLERR | EPOLLHUP | EPOLLRDHUP; +#else + event.events = EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP; +#endif event.data.ptr = pContext; if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, connFd, &event) < 0) { httpError("context:%p, fd:%d, ip:%s, thread:%s, failed to add http fd for epoll, error:%s", pContext, connFd, diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 3b1ffa46d9..7b656d473a 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -4009,7 +4009,13 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si continue; } +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4021,8 +4027,14 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { continue; } - + +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4034,8 +4046,14 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { continue; } - + +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4047,8 +4065,14 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { continue; } - + +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = (double) val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = (double)val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4060,8 +4084,14 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { continue; } - + +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = (double)val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4073,8 +4103,14 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { continue; } - + +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4087,7 +4123,13 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si continue; } +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4100,7 +4142,13 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si continue; } +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4113,7 +4161,13 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si continue; } +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -4125,8 +4179,14 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { continue; } - + +#ifndef _TD_NINGSI_60 SPoint1 st = {.key = tsList[i], .val = (double) val[i]}; +#else + SPoint1 st; + st.key = tsList[i]; + st.val = (double) val[i]; +#endif pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } From 00204198aa99024b894cd00ae647d80286c105c4 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Wed, 12 May 2021 18:50:53 +0800 Subject: [PATCH 137/140] [TD-182]: bump python connector version to v2.0.10 (#6091) Also fix url error in setup.py --- src/connector/python/setup.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py index 4d083d7ddb..901e8396c0 100644 --- a/src/connector/python/setup.py +++ b/src/connector/python/setup.py @@ -5,16 +5,15 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.9", + version="2.0.10", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", long_description=long_description, long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", + url="https://github.com/taosdata/TDengine/tree/develop/src/connector/python", packages=setuptools.find_packages(), classifiers=[ - "Environment :: Console", "Environment :: MacOS X", "Environment :: Win32 (MS Windows)", From 15ad9bd137ed11b251ba8066193d5eb795e74323 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 12 May 2021 19:43:44 +0800 Subject: [PATCH 138/140] [TD-4130]: taosdemo subscribe stable. (#6090) * [TD-4130]: taosdemo subscribe stable. for develop branch. * [TD-4130]: taosdemo subscribe stable. fix append result to file. Co-authored-by: Shuduo Sang From c4ac43af993550ca505799935c81c5938cf469f7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 13 May 2021 00:19:13 +0800 Subject: [PATCH 139/140] [TD-4158]: taosdemo cmake failed with low version git. (#6107) --- src/kit/taosdemo/CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index 4e38a8842e..5f75be0e19 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -10,7 +10,11 @@ IF (GIT_FOUND) COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) - STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) + IF ("${TAOSDEMO_COMMIT_SHA1}" STREQUAL "") + MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) + ELSE () + STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) + ENDIF () EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT From d7a11fb8b847a61ece56b84873e0d5ceb12c7b24 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 13 May 2021 04:25:10 +0800 Subject: [PATCH 140/140] Hotfix/sangshuduo/td 3992 taosdemo subscribe for develop (#6109) * [TD-3992]: taosdemo subscribe. * [TD-3992]: taosdemo subscribe. refactor sync/async mode. * [TD-3992]: taosdemo subscribe stable. add missed code lines Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 43 +++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 8866bf2607..f584448e63 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -770,16 +770,16 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n"); exit(EXIT_FAILURE); } arguments->async_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-T need a number following!\n"); exit(EXIT_FAILURE); @@ -794,24 +794,24 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->insert_interval = atoi(argv[++i]); } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-qt need a number following!\n"); exit(EXIT_FAILURE); } arguments->query_times = atoi(argv[++i]); } else if (strcmp(argv[i], "-B") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-B need a number following!\n"); exit(EXIT_FAILURE); } arguments->interlace_rows = atoi(argv[++i]); } else if (strcmp(argv[i], "-r") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-r need a number following!\n"); exit(EXIT_FAILURE); @@ -1071,7 +1071,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { if (code != 0) { if (!quiet) { debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res)); + errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); //taos_close(taos); @@ -4071,7 +4071,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint < 0) { + if (gQueryTimes->valueint <= 0) { errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); goto PARSE_OVER; @@ -4122,9 +4122,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + if (specifiedQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, specifiedQueryTimes->valueint); goto PARSE_OVER; } @@ -4266,9 +4266,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + if (superQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, superQueryTimes->valueint); goto PARSE_OVER; } g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; @@ -5232,6 +5232,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTs = taosGetTimestampMs(); + if (recOfBatch == 0) { + errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch); + errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n"); + goto free_of_interlace; + } int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); endTs = taosGetTimestampMs();