Merge remote-tracking branch 'origin/master' into feature/crash_gen2

This commit is contained in:
Steven Li 2021-05-11 01:49:14 +00:00
commit ba51543551
120 changed files with 5402 additions and 1723 deletions

View File

@ -7,27 +7,19 @@ platform:
arch: amd64
steps:
- name: build
image: gcc
- name: smoke_test
image: python:3.8
commands:
- apt-get update
- apt-get install -y cmake build-essential git
- apt-get install -y cmake build-essential gcc
- pip3 install psutil
- pip3 install guppy3
- pip3 install src/connector/python/linux/python3/
- mkdir debug
- cd debug
- cmake ..
- make
when:
branch:
- develop
- master
- name: smoke_test
image: python:3.8
commands:
- pip3 install psutil
- pip3 install guppy3
- pip3 install src/connector/python/linux/python3/
- cd tests
- cd ../tests
- ./test-all.sh smoke
when:
branch:
@ -82,7 +74,7 @@ platform:
steps:
- name: build
image: gcc
image: arm32v7/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential

View File

@ -1,296 +0,0 @@
#
# Configuration
#
#
# Build Matrix
#
branches:
only:
- master
- develop
- coverity_scan
- /^.*ci-.*$/
matrix:
- os: linux
dist: focal
language: c
git:
- depth: 1
compiler: gcc
env: DESC="linux/gcc build and test"
addons:
apt:
packages:
- build-essential
- cmake
- net-tools
- python3-pip
- python3-setuptools
- valgrind
- psmisc
- unixodbc
- unixodbc-dev
- mono-complete
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
after_success:
- travis_wait 20
- |-
case $TRAVIS_OS_NAME in
linux)
cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $?
py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
pip3 install psutil
pip3 install guppy3
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs || travis_terminate $?
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
sleep 5
mono taosdemo -Q DEFAULT -y || travis_terminate $?
pkill -KILL -x taosd
fuser -k -n tcp 6030
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests
./test-all.sh smoke || travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
sleep 1
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
do
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
More than our threshold! ## ${NC}"
travis_terminate $memError
fi
fi
done
grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 13 ]; then
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
travis_terminate $defiMemError
fi
fi
done
;;
esac
- os: linux
dist: bionic
language: c
compiler: gcc
env: COVERITY_SCAN=true
git:
- depth: 1
script:
- echo "this job is for coverity scan"
addons:
coverity_scan:
# GitHub project metadata
# ** specific to your project **
project:
name: TDengine
version: 2.x
description: TDengine
# Where email notification of build analysis results will be sent
notification_email: sdsang@taosdata.com, slguan@taosdata.com
# Commands to prepare for build_command
# ** likely specific to your build **
build_command_prepend: cmake . > /dev/null
# The command that will be added as an argument to "cov-build" to compile your project for analysis,
# ** likely specific to your build **
build_command: make
# Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
# Take care in resource usage, and consider the build frequency allowances per
# https://scan.coverity.com/faq#frequency
branch_pattern: coverity_scan
- os: linux
dist: trusty
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- binutils-2.26
- unixodbc
- unixodbc-dev
env:
- DESC="trusty/gcc-4.8/bintuils-2.26 build"
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
- os: linux
dist: bionic
language: c
compiler: clang
env: DESC="linux/clang build"
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- unixodbc
- unixodbc-dev
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
- os: linux
arch: arm64
dist: bionic
language: c
compiler: clang
env: DESC="arm64 linux/clang build"
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
cmake .. -DCPUTYPE=aarch64 > /dev/null;
else
cmake .. > /dev/null;
fi
- make > /dev/null
- os: linux
arch: arm64
dist: xenial
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- unixodbc
- unixodbc-dev
env:
- DESC="arm64 xenial build"
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
cmake .. -DCPUTYPE=aarch64 > /dev/null;
else
cmake .. > /dev/null;
fi
- make > /dev/null
- os: osx
osx_image: xcode11.4
language: c
compiler: clang
env: DESC="mac/clang build"
git:
- depth: 1
addons:
homebrew:
- cmake
- unixodbc
script:
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null

1
Jenkinsfile vendored
View File

@ -94,6 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.20.0")
SET(TD_VER_NUMBER "2.0.20.2")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.20.0'
version: '2.0.20.2'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.20.0
- usr/lib/libtaos.so.2.0.20.2
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -168,7 +168,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
@ -297,7 +298,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);

View File

@ -83,6 +83,7 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>

View File

@ -705,19 +705,11 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
}
code = TSDB_CODE_TSC_INVALID_SQL;
char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
free(tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) {

View File

@ -64,7 +64,7 @@ static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
static bool hasSpecifyDB(SStrToken* pTableName);
static int32_t getDelimiterIndex(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@ -426,17 +426,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: {
const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (!tscValidateTableNameLength(pToken->n)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// additional msg has been attached already
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
@ -447,17 +441,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SHOW_CREATE_TABLE: {
const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (!tscValidateTableNameLength(pToken->n)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -642,17 +631,25 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// set the command/global limit parameters from the first subclause to the sqlcmd object
SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0);
pCmd->command = pQueryInfo1->command;
int32_t diffSize = 0;
// if there is only one element, the limit of clause is the limit of global result.
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i);
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize);
if (ret != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (diffSize) {
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i);
tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
}
}
pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here
}
@ -979,11 +976,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
const char* msg4 = "db name too long";
const char* msg5 = "table name too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
int32_t idx = getDelimiterIndex(pTableName);
if (idx != -1) { // db has been specified in sql string so we ignore current db path
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@ -993,6 +992,13 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (idx >= TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
@ -1337,14 +1343,13 @@ static char* cloneCurrentDBName(SSqlObj* pSql) {
}
/* length limitation, strstr cannot be applied */
static bool hasSpecifyDB(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
return true;
static int32_t getDelimiterIndex(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
return i;
}
}
return false;
return -1;
}
int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
@ -1606,6 +1611,22 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
return false;
}
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
continue;
}
return true;
}
return false;
}
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery) {
assert(pSelectList != NULL && pCmd != NULL);
const char* msg1 = "too many columns in selection clause";
@ -1670,7 +1691,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
addPrimaryTsColIntoResult(pQueryInfo);
}
@ -7459,11 +7480,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}

View File

@ -1973,6 +1973,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
assert(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE || pTableMeta->tableType == TSDB_NORMAL_TABLE || pTableMeta->tableType == TSDB_STREAM_TABLE);
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// check if super table hashmap or not
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
@ -2530,10 +2532,23 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(tIsValidName(&pTableMetaInfo->name));
tfree(pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
pTableMetaInfo->pTableMeta = calloc(1, size);
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->pTableMeta = calloc(1, size);
pTableMetaInfo->tableMetaSize = size;
} else if (pTableMetaInfo->tableMetaSize < size) {
char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
} else {
//uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
}
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
@ -2545,10 +2560,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
char buf[80*1024] = {0};
assert(size < 80*1024);
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}

View File

@ -693,7 +693,8 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
tfree(pTableMetaInfo->pTableMeta);
}
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
}
/*
@ -1098,7 +1099,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
return pInfo->pSqlExpr->offset;
}
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
@ -1110,15 +1111,36 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
if (pField1->type != pField2->type ||
pField1->bytes != pField2->bytes ||
strcasecmp(pField1->name, pField2->name) != 0) {
return 1;
}
if (pField1->bytes != pField2->bytes) {
*diffSize = 1;
if (pField2->bytes > pField1->bytes) {
pField1->bytes = pField2->bytes;
}
}
}
return 0;
}
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) {
TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i);
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
pField2->bytes = pField1->bytes;
}
return 0;
}
int32_t tscGetResRowLength(SArray* pExprList) {
size_t num = taosArrayGetSize(pExprList);
if (num == 0) {
@ -2057,6 +2079,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pTableMeta = pTableMeta;
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->tableMetaSize = 0;
} else {
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
}
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
@ -2331,6 +2358,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
@ -2682,7 +2710,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
// DON't free final since it may be recoreded and used later in APP
TAOS_FIELD* finalBk = pRes->final;
pRes->final = NULL;
tscFreeSqlResult(pSql);
pRes->final = finalBk;
pRes->numOfTotal = num;
@ -2915,11 +2949,11 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
assert(pChild != NULL);
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
assert(pChild != NULL && buf != NULL);
uint32_t size = tscGetTableMetaMaxSize();
STableMeta* p = calloc(1, size);
// uint32_t size = tscGetTableMetaMaxSize();
STableMeta* p = buf;//calloc(1, size);
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
@ -2931,12 +2965,12 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
tfree(p);
// tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(p);
// tfree(p);
return -1;
}
}

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.25</version>
<version>2.0.28</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.25</version>
<version>2.0.28</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>

View File

@ -4,13 +4,23 @@ import java.sql.*;
import java.util.Enumeration;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.*;
public abstract class AbstractConnection extends WrapperImpl implements Connection {
protected volatile boolean isClosed;
protected volatile String catalog;
protected volatile Properties clientInfoProps = new Properties();
protected final Properties clientInfoProps = new Properties();
protected AbstractConnection(Properties properties) {
Set<String> propNames = properties.stringPropertyNames();
for (String propName : propNames) {
clientInfoProps.setProperty(propName, properties.getProperty(propName));
}
String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING");
clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat);
}
@Override
public abstract Statement createStatement() throws SQLException;
@ -35,7 +45,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
if (isClosed())
@ -441,9 +450,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
if (isClosed)
throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
if (clientInfoProps == null)
clientInfoProps = new Properties();
clientInfoProps.setProperty(name, value);
if (clientInfoProps != null)
clientInfoProps.setProperty(name, value);
}
@Override

View File

@ -10,6 +10,7 @@ import java.util.Map;
public abstract class AbstractResultSet extends WrapperImpl implements ResultSet {
private int fetchSize;
protected boolean wasNull;
protected void checkAvailability(int columnIndex, int bounds) throws SQLException {
if (isClosed())
@ -28,7 +29,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
@Override
public boolean wasNull() throws SQLException {
return false;
return wasNull;
}
@Override

View File

@ -28,6 +28,7 @@ public class TSDBConnection extends AbstractConnection {
}
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
super(info);
this.databaseMetaData = meta;
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),

View File

@ -95,11 +95,16 @@ public class TSDBDriver extends AbstractDriver {
*/
public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch";
/**
* timestamp format for JDBC-RESTful,should one of the options: string or timestamp or utc
*/
public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat";
private TSDBDatabaseMetaData dbMetaData = null;
static {
try {
java.sql.DriverManager.registerDriver(new TSDBDriver());
DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
}

View File

@ -14,11 +14,12 @@
*****************************************************************************/
package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.nio.charset.Charset;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
@ -33,17 +34,9 @@ import java.util.regex.Pattern;
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
private String rawSql;
private String sql;
// private ArrayList<Object> parameters = new ArrayList<>();
private Object[] parameters;
private boolean isPrepared;
//start with insert or import and is case-insensitive
private static Pattern savePattern = Pattern.compile("(?i)^\\s*(insert|import)");
// is insert or import
private boolean isSaved;
// private SavedPreparedStatement savedPreparedStatement;
private volatile TSDBParameterMetaData parameterMetaData;
TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
@ -65,35 +58,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
private void init(String sql) {
this.rawSql = sql;
preprocessSql();
// this.isSaved = isSavedSql(this.rawSql);
// if (this.isSaved) {
// try {
// this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this);
// } catch (SQLException e) {
// e.printStackTrace();
// }
// }
}
/**
* if the precompiled sql is insert or import
*
* @param sql
* @return
*/
private boolean isSavedSql(String sql) {
Matcher matcher = savePattern.matcher(sql);
return matcher.find();
}
@Override
public int[] executeBatch() throws SQLException {
// if (isSaved) {
// return this.savedPreparedStatement.executeBatch();
// } else {
return super.executeBatch();
// }
}
/*
@ -157,152 +126,64 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
*
* @return a string of the native sql statement for TSDB
*/
// private String getNativeSql(String rawSql) {
// for (int i = 0; i < parameters.length; i++) {
// Object para = parameters[i];
// if (para != null) {
// String paraStr = para.toString();
// if (para instanceof Timestamp || para instanceof String) {
// paraStr = "'" + paraStr + "'";
// }
// this.sql = this.sql.replaceFirst("[?]", paraStr);
// } else {
// this.sql = this.sql.replaceFirst("[?]", "NULL");
// }
// }
// parameters = new Object[parameters.length];
// return sql;
// }
private String getNativeSql(String rawSql) throws SQLException {
String sql = rawSql;
for (int i = 0; i < parameters.length; ++i) {
Object para = parameters[i];
if (para != null) {
String paraStr;
if (para instanceof byte[]) {
paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
} else {
paraStr = para.toString();
}
// if para is timestamp or String or byte[] need to translate ' character
if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
paraStr = "'" + paraStr + "'";
}
sql = sql.replaceFirst("[?]", paraStr);
} else {
sql = sql.replaceFirst("[?]", "NULL");
}
}
clearParameters();
return sql;
return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
public ResultSet executeQuery() throws SQLException {
// if (isSaved) {
// this.savedPreparedStatement.executeBatchInternal();
// return null;
// } else {
if (!isPrepared)
return executeQuery(this.rawSql);
final String sql = getNativeSql(this.rawSql);
return executeQuery(sql);
// }
}
@Override
public int executeUpdate() throws SQLException {
// if (isSaved) {
// return this.savedPreparedStatement.executeBatchInternal();
// } else {
if (!isPrepared)
return executeUpdate(this.rawSql);
String sql = getNativeSql(this.rawSql);
return executeUpdate(sql);
// }
}
private boolean isSupportedSQLType(int sqlType) {
switch (sqlType) {
case Types.TIMESTAMP:
case Types.INTEGER:
case Types.BIGINT:
case Types.FLOAT:
case Types.DOUBLE:
case Types.SMALLINT:
case Types.TINYINT:
case Types.BOOLEAN:
case Types.BINARY:
case Types.NCHAR:
return true;
default:
return false;
}
}
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
if (!isSupportedSQLType(sqlType) || parameterIndex < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
// if (parameterIndex >= parameters.size())
// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_BOUNDARY);
setObject(parameterIndex, "NULL");
setObject(parameterIndex, null);
}
@Override
public void setBoolean(int parameterIndex, boolean x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@Override
public void setByte(int parameterIndex, byte x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex,x);
setObject(parameterIndex, x);
}
@Override
public void setShort(int parameterIndex, short x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@Override
public void setInt(int parameterIndex, int x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@Override
public void setLong(int parameterIndex, long x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@Override
public void setFloat(int parameterIndex, float x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@Override
public void setDouble(int parameterIndex, double x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@ -315,17 +196,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setString(int parameterIndex, String x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@Override
public void setBytes(int parameterIndex, byte[] x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex,x);
setObject(parameterIndex, x);
}
@Override
@ -344,8 +220,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex, x);
}
@ -360,7 +234,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@ -375,8 +248,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void clearParameters() throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
// parameters.clear();
parameters = new Object[parameters.length];
}
@ -384,43 +255,29 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
setObject(parameterIndex, x);
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
// if (isSaved) {
// this.savedPreparedStatement.setParam(parameterIndex, x);
// } else {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
if (parameterIndex < 1 && parameterIndex >= parameters.length)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
parameters[parameterIndex - 1] = x;
// parameters.add(x);
// }
}
@Override
public boolean execute() throws SQLException {
// if (isSaved) {
// int result = this.savedPreparedStatement.executeBatchInternal();
// return result > 0;
// } else {
if (!isPrepared)
return execute(this.rawSql);
final String sql = getNativeSql(this.rawSql);
return execute(sql);
// }
}
@Override
public void addBatch() throws SQLException {
// if (isSaved) {
// this.savedPreparedStatement.addBatch();
// } else {
if (this.batchedArgs == null) {
batchedArgs = new ArrayList<>();
}
@ -431,7 +288,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
String sql = this.getConnection().nativeSQL(this.rawSql);
addBatch(sql);
}
// }
}
@Override
@ -475,7 +331,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public ResultSetMetaData getMetaData() throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
// return this.getResultSet().getMetaData();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}

View File

@ -203,7 +203,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
if (!lastWasNull) {
res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
Object value = this.rowData.get(columnIndex - 1);
if (value instanceof Timestamp)
res = ((Timestamp) value).getTime();
else
res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
}
return res;
}
@ -273,7 +277,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, this.columnMetaDataList.size());
Timestamp res = null;
if (this.getBatchFetch())
return this.blockData.getTimestamp(columnIndex - 1);

View File

@ -17,6 +17,7 @@ package com.taosdata.jdbc;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
@ -299,7 +300,19 @@ public class TSDBResultSetRowData {
}
public void setTimestamp(int col, long ts) {
data.set(col, new Timestamp(ts));
//TODO: this implementation contains logical error
// when precision is us the (long ts) is 16 digital number
// when precision is ms, the (long ts) is 13 digital number
// we need a JNI function like this:
// public void setTimestamp(int col, long epochSecond, long nanoAdjustment)
if (ts < 1_0000_0000_0000_0L) {
data.set(col, new Timestamp(ts));
} else {
long epochSec = ts / 1000_000l;
long nanoAdjustment = ts % 1000_000l * 1000l;
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
data.set(col, timestamp);
}
}
public Timestamp getTimestamp(int col) {

View File

@ -22,6 +22,7 @@ public class RestfulConnection extends AbstractConnection {
private final DatabaseMetaData metadata;
public RestfulConnection(String host, String port, Properties props, String database, String url) {
super(props);
this.host = host;
this.port = Integer.parseInt(port);
this.database = database;

View File

@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver {
static {
try {
java.sql.DriverManager.registerDriver(new RestfulDriver());
DriverManager.registerDriver(new RestfulDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
}

View File

@ -2,12 +2,12 @@ package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.nio.charset.Charset;
import java.sql.*;
import java.util.Calendar;
@ -21,6 +21,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) {
super(conn, database);
this.rawSql = sql;
if (sql.contains("?")) {
int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
@ -58,29 +59,14 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
return executeUpdate(sql);
}
private String getNativeSql(String rawSql) throws SQLException {
String sql = rawSql;
for (int i = 0; i < parameters.length; ++i) {
Object para = parameters[i];
if (para != null) {
String paraStr;
if (para instanceof byte[]) {
paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
} else {
paraStr = para.toString();
}
// if para is timestamp or String or byte[] need to translate ' character
if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
paraStr = paraStr.replaceAll("'", "\\\\\\\\'");
paraStr = "'" + paraStr + "'";
}
sql = sql.replaceFirst("[?]", paraStr);
} else {
sql = sql.replaceFirst("[?]", "NULL");
}
}
clearParameters();
return sql;
/****
* 将rawSql转换成一条可执行的sql语句使用属性parameters中的变脸进行替换
* 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?)
* @param rawSql可能是insertselect或其他使用?做占位符
* @return
*/
private String getNativeSql(String rawSql) {
return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
@ -220,8 +206,8 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
setObject(parameterIndex,x);
setObject(parameterIndex, x);
}
@Override

View File

@ -5,13 +5,12 @@ import com.alibaba.fastjson.JSONObject;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import com.taosdata.jdbc.AbstractResultSet;
import com.taosdata.jdbc.TSDBConstants;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import com.taosdata.jdbc.*;
import java.math.BigDecimal;
import java.sql.*;
import java.time.Instant;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.Calendar;
@ -19,6 +18,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
private volatile boolean isClosed;
private int pos = -1;
private final String database;
private final Statement statement;
// data
@ -65,7 +65,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
}
}
private Object parseColumnData(JSONArray row, int colIndex, int taosType) {
private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException {
switch (taosType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
return row.getBoolean(colIndex);
@ -81,8 +81,44 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return row.getFloat(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
return row.getDouble(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
return new Timestamp(row.getDate(colIndex).getTime());
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
if (row.get(colIndex) == null)
return null;
String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) {
Long value = row.getLong(colIndex);
//TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9
if (value < 1_0000_0000_0000_0L)
return new Timestamp(value);
long epochSec = value / 1000_000l;
long nanoAdjustment = value % 1000_000l * 1000l;
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
if ("UTC".equalsIgnoreCase(timestampFormat)) {
String value = row.getString(colIndex);
long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000;
int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5));
long nanoAdjustment = 0;
if (value.length() > 28) {
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00
nanoAdjustment = fractionalSec * 1000l;
} else {
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00
nanoAdjustment = fractionalSec * 1000_000l;
}
ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5));
Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant();
return Timestamp.from(instant);
}
String value = row.getString(colIndex);
if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS
return row.getTimestamp(colIndex);
// us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS
long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000;
long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l;
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
return timestamp;
}
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
@ -126,12 +162,12 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
}
}
@Override
public boolean wasNull() throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
return resultSet.isEmpty();
}
// @Override
// public boolean wasNull() throws SQLException {
// if (isClosed())
// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
// return resultSet.isEmpty();
// }
@Override
public String getString(int columnIndex) throws SQLException {
@ -150,8 +186,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return false;
}
wasNull = false;
if (value instanceof Boolean)
return (boolean) value;
return Boolean.valueOf(value.toString());
@ -162,8 +201,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return 0;
}
wasNull = false;
long valueAsLong = Long.parseLong(value.toString());
if (valueAsLong == Byte.MIN_VALUE)
return 0;
@ -183,8 +225,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return 0;
}
wasNull = false;
long valueAsLong = Long.parseLong(value.toString());
if (valueAsLong == Short.MIN_VALUE)
return 0;
@ -198,8 +243,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return 0;
}
wasNull = false;
long valueAsLong = Long.parseLong(value.toString());
if (valueAsLong == Integer.MIN_VALUE)
return 0;
@ -213,9 +261,14 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return 0;
}
wasNull = false;
if (value instanceof Timestamp) {
return ((Timestamp) value).getTime();
}
long valueAsLong = 0;
try {
valueAsLong = Long.parseLong(value.toString());
@ -232,8 +285,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return 0;
}
wasNull = false;
if (value instanceof Float || value instanceof Double)
return (float) value;
return Float.parseFloat(value.toString());
@ -244,8 +300,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
if (value == null)
if (value == null) {
wasNull = true;
return 0;
}
wasNull = false;
if (value instanceof Double || value instanceof Float)
return (double) value;
return Double.parseDouble(value.toString());
@ -307,6 +366,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return (Timestamp) value;
// if (value instanceof Long) {
// if (1_0000_0000_0000_0L > (long) value)
// return Timestamp.from(Instant.ofEpochMilli((long) value));
// long epochSec = (long) value / 1000_000L;
// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000);
// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
// }
return Timestamp.valueOf(value.toString());
}

View File

@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.AbstractStatement;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import com.taosdata.jdbc.utils.HttpClientPoolUtil;
@ -34,14 +35,11 @@ public class RestfulStatement extends AbstractStatement {
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
return executeOneQuery(url, sql);
return executeOneQuery(sql);
}
// if (this.database != null && !this.database.trim().replaceAll("\\s","").isEmpty())
// HttpClientPoolUtil.execute(url, "use " + this.database);
return executeOneQuery(url, sql);
return executeOneQuery(sql);
}
@Override
@ -56,8 +54,6 @@ public class RestfulStatement extends AbstractStatement {
return executeOneUpdate(url, sql);
}
// if (this.database != null && !this.database.trim().replaceAll("\\s", "").isEmpty())
// HttpClientPoolUtil.execute(url, "use " + this.database);
return executeOneUpdate(url, sql);
}
@ -78,14 +74,21 @@ public class RestfulStatement extends AbstractStatement {
//如果执行了use操作应该将当前Statement的catalog设置为新的database
boolean result = true;
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) {
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
}
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) {
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
}
if (SqlSyntaxValidator.isUseSql(sql)) {
HttpClientPoolUtil.execute(url, sql);
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
result = false;
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
executeOneQuery(url, sql);
executeOneQuery(sql);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
executeOneUpdate(url, sql);
result = false;
@ -101,11 +104,18 @@ public class RestfulStatement extends AbstractStatement {
return result;
}
private ResultSet executeOneQuery(String url, String sql) throws SQLException {
private ResultSet executeOneQuery(String sql) throws SQLException {
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
// row data
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat))
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
if ("UTC".equalsIgnoreCase(timestampFormat))
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
@ -126,21 +136,21 @@ public class RestfulStatement extends AbstractStatement {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
}
this.resultSet = null;
this.affectedRows = checkJsonResultSet(jsonObject);
this.affectedRows = getAffectedRows(jsonObject);
return this.affectedRows;
}
private int checkJsonResultSet(JSONObject jsonObject) {
private int getAffectedRows(JSONObject jsonObject) throws SQLException {
// create ... SQLs should return 0 , and Restful result is this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head");
if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
JSONArray data = jsonObject.getJSONArray("data");
int rows = Integer.parseInt(jsonObject.getString("rows"));
if (head.size() == 1 && "affected_rows".equals(head.getString(0))
&& data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) {
return 0;
}
return rows;
if (data != null)
return data.getJSONArray(0).getInteger(0);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
@Override

View File

@ -0,0 +1,12 @@
package com.taosdata.jdbc.utils;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
public class UtcTimestampUtil {
public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
.appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+")
// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true)
.toFormatter();
}

View File

@ -0,0 +1,135 @@
package com.taosdata.jdbc.utils;
import com.google.common.collect.Range;
import com.google.common.collect.RangeSet;
import com.google.common.collect.TreeRangeSet;
import java.nio.charset.Charset;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class Utils {
private static Pattern ptn = Pattern.compile(".*?'");
public static String escapeSingleQuota(String origin) {
Matcher m = ptn.matcher(origin);
StringBuffer sb = new StringBuffer();
int end = 0;
while (m.find()) {
end = m.end();
String seg = origin.substring(m.start(), end);
int len = seg.length();
if (len == 1) {
if ('\'' == seg.charAt(0)) {
sb.append("\\'");
} else {
sb.append(seg);
}
} else { // len > 1
sb.append(seg.substring(0, seg.length() - 2));
char lastcSec = seg.charAt(seg.length() - 2);
if (lastcSec == '\\') {
sb.append("\\'");
} else {
sb.append(lastcSec);
sb.append("\\'");
}
}
}
if (end < origin.length()) {
sb.append(origin.substring(end));
}
return sb.toString();
}
public static String getNativeSql(String rawSql, Object[] parameters) {
// toLowerCase
String preparedSql = rawSql.trim().toLowerCase();
String[] clause = new String[0];
if (SqlSyntaxValidator.isInsertSql(preparedSql)) {
// insert or import
clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"};
}
if (SqlSyntaxValidator.isSelectSql(preparedSql)) {
// select
clause = new String[]{"where\\s*.*"};
}
Map<Integer, Integer> placeholderPositions = new HashMap<>();
RangeSet<Integer> clauseRangeSet = TreeRangeSet.create();
findPlaceholderPosition(preparedSql, placeholderPositions);
findClauseRangeSet(preparedSql, clause, clauseRangeSet);
return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
}
private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet<Integer> clauseRangeSet) {
clauseRangeSet.clear();
for (String regex : regexArr) {
Matcher matcher = Pattern.compile(regex).matcher(preparedSql);
while (matcher.find()) {
int start = matcher.start();
int end = matcher.end();
clauseRangeSet.add(Range.closed(start, end));
}
}
}
private static void findPlaceholderPosition(String preparedSql, Map<Integer, Integer> placeholderPosition) {
placeholderPosition.clear();
Matcher matcher = Pattern.compile("\\?").matcher(preparedSql);
int index = 0;
while (matcher.find()) {
int pos = matcher.start();
placeholderPosition.put(index, pos);
index++;
}
}
/***
*
* @param rawSql
* @param paramArr
* @param placeholderPosition
* @param clauseRangeSet
* @return
*/
private static String transformSql(String rawSql, Object[] paramArr, Map<Integer, Integer> placeholderPosition, RangeSet<Integer> clauseRangeSet) {
String[] sqlArr = rawSql.split("\\?");
return IntStream.range(0, sqlArr.length).mapToObj(index -> {
if (index == paramArr.length)
return sqlArr[index];
Object para = paramArr[index];
String paraStr;
if (para != null) {
if (para instanceof byte[]) {
paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
} else {
paraStr = para.toString();
}
// if para is timestamp or String or byte[] need to translate ' character
if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) {
paraStr = Utils.escapeSingleQuota(paraStr);
Integer pos = placeholderPosition.get(index);
boolean contains = clauseRangeSet.contains(pos);
if (contains) {
paraStr = "'" + paraStr + "'";
}
}
} else {
paraStr = "NULL";
}
return sqlArr[index] + paraStr;
}).collect(Collectors.joining());
}
}

View File

@ -12,6 +12,7 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeTest {
Connection connection;
Statement statement;
String dbName = "test";
@ -19,62 +20,53 @@ public class SubscribeTest {
String host = "127.0.0.1";
String topic = "test";
@Before
public void createDatabase() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
for (int i = 0; i < 2; i++) {
ts += i;
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
statement.executeUpdate(sql);
}
} catch (ClassNotFoundException | SQLException e) {
return;
}
}
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
System.out.println(rawSql);
// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
TSDBConnection conn = connection.unwrap(TSDBConnection.class);
TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
// int a = 0;
// while (true) {
// TimeUnit.MILLISECONDS.sleep(1000);
// TSDBResultSet resSet = subscribe.consume();
// while (resSet.next()) {
// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
// System.out.printf(i + ": " + resSet.getString(i) + "\t");
// }
// System.out.println("\n======" + a + "==========");
// }
// a++;
// if (a >= 2) {
// break;
// }
// resSet.close();
// }
//
// subscribe.close(true);
int a = 0;
while (true) {
TimeUnit.MILLISECONDS.sleep(1000);
TSDBResultSet resSet = subscribe.consume();
while (resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf(i + ": " + resSet.getString(i) + "\t");
}
System.out.println("\n======" + a + "==========");
}
a++;
if (a >= 2) {
break;
}
resSet.close();
}
subscribe.close(true);
} catch (Exception e) {
e.printStackTrace();
}
}
@Before
public void createDatabase() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
}
@After
public void close() {
try {
@ -86,6 +78,5 @@ public class SubscribeTest {
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -50,6 +50,51 @@ public class TSDBPreparedStatementTest {
pstmt_insert.setNull(2, Types.INTEGER);
int result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(3, Types.BIGINT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(4, Types.FLOAT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(5, Types.DOUBLE);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(6, Types.SMALLINT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(7, Types.TINYINT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(8, Types.BOOLEAN);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(9, Types.BINARY);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(10, Types.NCHAR);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(10, Types.OTHER);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
}
@Test
@ -129,7 +174,7 @@ public class TSDBPreparedStatementTest {
Assert.assertFalse(pstmt_insert.execute());
}
class Person implements Serializable {
class Person {
String name;
int age;
boolean sex;

View File

@ -160,6 +160,7 @@ public class TSDBResultSetTest {
@Test
public void getTime() throws SQLException {
Time f1 = rs.getTime("f1");
Assert.assertNotNull(f1);
Assert.assertEquals("00:00:00", f1.toString());
}

View File

@ -20,6 +20,7 @@ public class DriverAutoloadTest {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(url, properties);
Assert.assertNotNull(conn);
conn.close();
}
@Test
@ -27,6 +28,7 @@ public class DriverAutoloadTest {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(url, properties);
Assert.assertNotNull(conn);
conn.close();
}

View File

@ -0,0 +1,400 @@
package com.taosdata.jdbc.cases;
import org.junit.*;
import java.sql.*;
public class InsertSpecialCharacterJniTest {
private static final String host = "127.0.0.1";
private static Connection conn;
private static String dbName = "spec_char_test";
private static String tbname1 = "test";
private static String tbname2 = "weather";
private static String special_character_str_1 = "$asd$$fsfsf$";
private static String special_character_str_2 = "\\asdfsfsf\\\\";
private static String special_character_str_3 = "\\\\asdfsfsf\\";
private static String special_character_str_4 = "?asd??fsf?sf?";
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
@Test
public void testCase01() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_1.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from ?";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, tbname1);
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_1, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase02() throws SQLException {
//TODO:
// Expected :\asdfsfsf\\
// Actual :\asdfsfsf\
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_2.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
//TODO: bug to be fixed
// Assert.assertEquals(special_character_str_2, f1);
Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test(expected = SQLException.class)
public void testCase03() throws SQLException {
//TODO:
// TDengine ERROR (216): Syntax error in SQL
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_3.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_3, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase04() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_4.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_4, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase05() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_5.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_5, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase06() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setInt(1, 1);
pstmt.setString(2, special_character_str_4);
pstmt.setTimestamp(3, new Timestamp(now));
pstmt.setBytes(4, special_character_str_4.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query t1
final String query = "select * from t1";
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_4, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase07() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_4.getBytes());
pstmt.setString(3, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_4, f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Test(expected = SQLException.class)
public void testCase08() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setInt(1, 1);
pstmt.setString(2, special_character_str_5);
pstmt.setTimestamp(3, new Timestamp(now));
pstmt.setBytes(4, special_character_str_5.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
}
@Test
public void testCase09() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
// t1
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setString(3, special_character_str_5);
pstmt.setTimestamp(4, new Timestamp(now));
pstmt.setBytes(5, special_character_str_5.getBytes());
// t2
pstmt.setInt(7, 2);
pstmt.setString(8, special_character_str_5);
pstmt.setTimestamp(9, new Timestamp(now));
pstmt.setString(11, special_character_str_5);
int ret = pstmt.executeUpdate();
Assert.assertEquals(2, ret);
}
// query t1
String query = "select * from t?";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 1);
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_5, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
// query t2
query = "select * from t2";
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
byte[] f1 = rs.getBytes(2);
Assert.assertNull(f1);
String f2 = new String(rs.getBytes(3));
Assert.assertEquals(special_character_str_5, f2);
}
}
@Test
public void testCase10() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
// t1
pstmt.setInt(1, 1);
pstmt.setString(2, tbname2);
pstmt.setString(3, special_character_str_5);
pstmt.setTimestamp(4, new Timestamp(now));
pstmt.setBytes(5, special_character_str_5.getBytes());
// t2
pstmt.setInt(7, 2);
pstmt.setString(8, special_character_str_5);
pstmt.setTimestamp(9, new Timestamp(now));
pstmt.setString(11, special_character_str_5);
int ret = pstmt.executeUpdate();
Assert.assertEquals(2, ret);
}
//query t1
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(4, new Timestamp(0));
pstmt.setString(5, "f1");
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_5, f1);
byte[] f2 = rs.getBytes(3);
Assert.assertNull(f2);
}
// query t2
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 2);
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(3, new Timestamp(0));
pstmt.setString(4, "f2");
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
byte[] f1 = rs.getBytes(2);
Assert.assertNull(f1);
String f2 = new String(rs.getBytes(3));
Assert.assertEquals(special_character_str_5, f2);
}
}
@Test(expected = SQLException.class)
public void testCase11() throws SQLException {
final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
final long now = System.currentTimeMillis();
final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setInt(1, 1);
pstmt.setTimestamp(2, new Timestamp(now));
pstmt.setBytes(3, speicalCharacterStr.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
}
@Test
public void testCase12() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setString(2, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals("HelloTDengine", f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Before
public void before() throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop table if exists " + tbname1 + "");
stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
stmt.execute("drop table if exists " + tbname2);
stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
}
}
@BeforeClass
public static void beforeClass() throws SQLException {
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
conn = DriverManager.getConnection(url);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName);
stmt.execute("use " + dbName);
}
}
@AfterClass
public static void afterClass() throws SQLException {
if (conn != null)
conn.close();
}
}

View File

@ -0,0 +1,401 @@
package com.taosdata.jdbc.cases;
import org.junit.*;
import java.sql.*;
public class InsertSpecialCharacterRestfulTest {
private static final String host = "127.0.0.1";
// private static final String host = "master";
private static Connection conn;
private static String dbName = "spec_char_test";
private static String tbname1 = "test";
private static String tbname2 = "weather";
private static String special_character_str_1 = "$asd$$fsfsf$";
private static String special_character_str_2 = "\\asdfsfsf\\\\";
private static String special_character_str_3 = "\\\\asdfsfsf\\";
private static String special_character_str_4 = "?asd??fsf?sf?";
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
@Test
public void testCase01() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_1.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from ?";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, tbname1);
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_1, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase02() throws SQLException {
//TODO:
// Expected :\asdfsfsf\\
// Actual :\asdfsfsf\
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_2.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
//TODO: bug to be fixed
// Assert.assertEquals(special_character_str_2, f1);
Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test(expected = SQLException.class)
public void testCase03() throws SQLException {
//TODO:
// TDengine ERROR (216): Syntax error in SQL
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_3.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_3, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase04() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_4.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_4, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase05() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1) values(?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_5.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_5, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase06() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?)";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setInt(1, 1);
pstmt.setString(2, special_character_str_4);
pstmt.setTimestamp(3, new Timestamp(now));
pstmt.setBytes(4, special_character_str_4.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query t1
final String query = "select * from t1";
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_4, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
}
@Test
public void testCase07() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, ?, ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setBytes(2, special_character_str_4.getBytes());
pstmt.setString(3, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_4, f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Test(expected = SQLException.class)
public void testCase08() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into t? using " + tbname2 + " tags(?) values(?, ?, ?) ? ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setInt(1, 1);
pstmt.setString(2, special_character_str_5);
pstmt.setTimestamp(3, new Timestamp(now));
pstmt.setBytes(4, special_character_str_5.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
}
@Test
public void testCase09() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into ?.t? using " + tbname2 + " tags(?) values(?, ?, ?) t? using weather tags(?) values(?,?,?) ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
// t1
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setString(3, special_character_str_5);
pstmt.setTimestamp(4, new Timestamp(now));
pstmt.setBytes(5, special_character_str_5.getBytes());
// t2
pstmt.setInt(7, 2);
pstmt.setString(8, special_character_str_5);
pstmt.setTimestamp(9, new Timestamp(now));
pstmt.setString(11, special_character_str_5);
int ret = pstmt.executeUpdate();
Assert.assertEquals(2, ret);
}
// query t1
String query = "select * from t?";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 1);
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_5, f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
// query t2
query = "select * from t2";
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
byte[] f1 = rs.getBytes(2);
Assert.assertNull(f1);
String f2 = new String(rs.getBytes(3));
Assert.assertEquals(special_character_str_5, f2);
}
}
@Test
public void testCase10() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
// t1
pstmt.setInt(1, 1);
pstmt.setString(2, tbname2);
pstmt.setString(3, special_character_str_5);
pstmt.setTimestamp(4, new Timestamp(now));
pstmt.setBytes(5, special_character_str_5.getBytes());
// t2
pstmt.setInt(7, 2);
pstmt.setString(8, special_character_str_5);
pstmt.setTimestamp(9, new Timestamp(now));
pstmt.setString(11, special_character_str_5);
int ret = pstmt.executeUpdate();
Assert.assertEquals(2, ret);
}
//query t1
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(4, new Timestamp(0));
pstmt.setString(5, "f1");
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals(special_character_str_5, f1);
byte[] f2 = rs.getBytes(3);
Assert.assertNull(f2);
}
// query t2
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 2);
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(3, new Timestamp(0));
pstmt.setString(4, "f2");
ResultSet rs = pstmt.executeQuery();
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
byte[] f1 = rs.getBytes(2);
Assert.assertNull(f1);
String f2 = new String(rs.getBytes(3));
Assert.assertEquals(special_character_str_5, f2);
}
}
@Test(expected = SQLException.class)
public void testCase11() throws SQLException {
final String speicalCharacterStr = "?#sd@$f(((s[P)){]}f?s[]{}%vs^a&d*jhg)(j))(f@~!?$";
final long now = System.currentTimeMillis();
final String sql = "insert into t? using " + tbname2 + " values(?, ?, 'abc?abc') ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setInt(1, 1);
pstmt.setTimestamp(2, new Timestamp(now));
pstmt.setBytes(3, speicalCharacterStr.getBytes());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
}
@Test
public void testCase12() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setString(2, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals("HelloTDengine", f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Before
public void before() throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop table if exists " + tbname1 + "");
stmt.execute("create table " + tbname1 + "(ts timestamp,f1 binary(64),f2 nchar(64))");
stmt.execute("drop table if exists " + tbname2);
stmt.execute("create table " + tbname2 + "(ts timestamp, f1 binary(64), f2 nchar(64)) tags(loc nchar(64))");
}
}
@BeforeClass
public static void beforeClass() throws SQLException {
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn = DriverManager.getConnection(url);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName);
stmt.execute("use " + dbName);
}
}
@AfterClass
public static void afterClass() throws SQLException {
if (conn != null)
conn.close();
}
}

View File

@ -0,0 +1,64 @@
package com.taosdata.jdbc.cases;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
public class NullValueInResultSetForJdbcJniTest {
private static final String host = "127.0.0.1";
Connection conn;
@Test
public void test() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from weather");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
Object value = rs.getObject(i);
System.out.print(meta.getColumnLabel(i) + ": " + value + "\t");
}
System.out.println();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
@Before
public void before() throws SQLException {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
conn = DriverManager.getConnection(url);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_null");
stmt.execute("create database if not exists test_null");
stmt.execute("use test_null");
stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))");
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)");
stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)");
stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)");
stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)");
stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)");
stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)");
stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)");
stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')");
stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')");
} catch (SQLException e) {
e.printStackTrace();
}
}
@After
public void after() {
try {
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,91 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.utils.TimestampUtil;
import org.junit.*;
import java.sql.*;
import java.util.Properties;
public class TD3841Test {
private static final String host = "127.0.0.1";
private static Properties properties;
private static Connection conn_restful;
private static Connection conn_jni;
@Test
public void testRestful() throws SQLException {
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn_restful = DriverManager.getConnection(url, properties);
try (Statement stmt = conn_restful.createStatement()) {
stmt.execute("drop database if exists test_null");
stmt.execute("create database if not exists test_null");
stmt.execute("use test_null");
stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))");
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")");
ResultSet rs = stmt.executeQuery("select * from weather");
rs.next();
Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime()));
Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull());
Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull());
Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull());
Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull());
Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull());
Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull());
Assert.assertEquals(null, rs.getBytes(9));
Assert.assertEquals(null, rs.getString(10));
rs.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testJNI() throws SQLException {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
conn_jni = DriverManager.getConnection(url, properties);
try (Statement stmt = conn_jni.createStatement()) {
stmt.execute("drop database if exists test_null");
stmt.execute("create database if not exists test_null");
stmt.execute("use test_null");
stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))");
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")");
ResultSet rs = stmt.executeQuery("select * from weather");
rs.next();
Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime()));
Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull());
Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull());
Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull());
Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull());
Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull());
Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull());
Assert.assertEquals(null, rs.getBytes(9));
Assert.assertEquals(null, rs.getString(10));
rs.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@BeforeClass
public static void beforeClass() {
properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
}
@AfterClass
public static void afterClass() throws SQLException {
if (conn_restful != null)
conn_restful.close();
if (conn_jni != null)
conn_jni.close();
}
}

View File

@ -0,0 +1,89 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
public class TwoTypeTimestampPercisionInJniTest {
private static final String host = "127.0.0.1";
private static final String ms_timestamp_db = "ms_precision_test";
private static final String us_timestamp_db = "us_precision_test";
private static final long timestamp1 = System.currentTimeMillis();
private static final long timestamp2 = timestamp1 * 1000 + 123;
private static Connection conn;
@Test
public void testCase1() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
rs.next();
long ts = rs.getTimestamp(1).getTime();
Assert.assertEquals(timestamp1, ts);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testCase2() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
System.out.println(timestamp);
long ts = timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@BeforeClass
public static void beforeClass() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
conn = DriverManager.getConnection(url, properties);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists " + ms_timestamp_db);
stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'");
stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)");
stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)");
stmt.execute("drop database if exists " + us_timestamp_db);
stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'");
stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)");
stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)");
stmt.close();
}
@AfterClass
public static void afterClass() {
try {
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,168 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
public class TwoTypeTimestampPercisionInRestfulTest {
private static final String host = "127.0.0.1";
private static final String ms_timestamp_db = "ms_precision_test";
private static final String us_timestamp_db = "us_precision_test";
private static final long timestamp1 = System.currentTimeMillis();
private static final long timestamp2 = timestamp1 * 1000 + 123;
private static Connection conn1;
private static Connection conn2;
private static Connection conn3;
@Test
public void testCase1() {
try (Statement stmt = conn1.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
rs.next();
long ts = rs.getTimestamp(1).getTime();
Assert.assertEquals(timestamp1, ts);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testCase2() {
try (Statement stmt = conn1.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
long ts = timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testCase3() {
try (Statement stmt = conn2.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
rs.next();
Timestamp rsTimestamp = rs.getTimestamp(1);
long ts = rsTimestamp.getTime();
Assert.assertEquals(timestamp1, ts);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testCase4() {
try (Statement stmt = conn2.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
long ts = timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testCase5() {
try (Statement stmt = conn3.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
rs.next();
long ts = rs.getTimestamp(1).getTime();
Assert.assertEquals(timestamp1, ts);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void testCase6() {
try (Statement stmt = conn3.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
long ts = timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
} catch (SQLException e) {
e.printStackTrace();
}
}
@BeforeClass
public static void beforeClass() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
// properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "TIMESTAMP");
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn1 = DriverManager.getConnection(url, properties);
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&timestampFormat=timestamp";
conn2 = DriverManager.getConnection(url, properties);
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&timestampFormat=utc";
conn3 = DriverManager.getConnection(url, properties);
Statement stmt = conn1.createStatement();
stmt.execute("drop database if exists " + ms_timestamp_db);
stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'");
stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)");
stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)");
stmt.execute("drop database if exists " + us_timestamp_db);
stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'");
stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)");
stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)");
stmt.close();
}
@AfterClass
public static void afterClass() {
try {
if (conn1 != null)
conn1.close();
if (conn2 != null)
conn2.close();
if (conn3 != null)
conn3.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -6,11 +6,11 @@ import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.io.Serializable;
import java.sql.*;
public class RestfulPreparedStatementTest {
private static final String host = "127.0.0.1";
// private static final String host = "master";
private static Connection conn;
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static PreparedStatement pstmt_insert;
@ -50,6 +50,51 @@ public class RestfulPreparedStatementTest {
pstmt_insert.setNull(2, Types.INTEGER);
int result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(3, Types.BIGINT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(4, Types.FLOAT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(5, Types.DOUBLE);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(6, Types.SMALLINT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(7, Types.TINYINT);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(8, Types.BOOLEAN);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(9, Types.BINARY);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(10, Types.NCHAR);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setNull(10, Types.OTHER);
result = pstmt_insert.executeUpdate();
Assert.assertEquals(1, result);
}
@Test
@ -129,7 +174,7 @@ public class RestfulPreparedStatementTest {
Assert.assertFalse(pstmt_insert.execute());
}
class Person implements Serializable {
private class Person {
String name;
int age;
boolean sex;

View File

@ -160,6 +160,7 @@ public class RestfulResultSetTest {
@Test
public void getTime() throws SQLException {
Time f1 = rs.getTime("f1");
Assert.assertNotNull(f1);
Assert.assertEquals("00:00:00", f1.toString());
}

View File

@ -0,0 +1,24 @@
package com.taosdata.jdbc.utils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
public class UtilsTest {
@Test
public void escapeSingleQuota() {
String s = "'''''a\\'";
String news = Utils.escapeSingleQuota(s);
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
s = "\'''''a\\'";
news = Utils.escapeSingleQuota(s);
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
s = "\'\'\'\''a\\'";
news = Utils.escapeSingleQuota(s);
Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news);
}
}

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.8",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.7",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.7",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.7",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.7",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_int64))[
:abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):

View File

@ -218,6 +218,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")

File diff suppressed because it is too large Load Diff

View File

@ -72,7 +72,8 @@ enum _show_db_index {
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_CACHELAST_INDEX,
TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
@ -83,10 +84,10 @@ enum _show_tables_index {
TSDB_SHOW_TABLES_NAME_INDEX,
TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
TSDB_SHOW_TABLES_COLUMNS_INDEX,
TSDB_SHOW_TABLES_METRIC_INDEX,
TSDB_SHOW_TABLES_UID_INDEX,
TSDB_SHOW_TABLES_METRIC_INDEX,
TSDB_SHOW_TABLES_UID_INDEX,
TSDB_SHOW_TABLES_TID_INDEX,
TSDB_SHOW_TABLES_VGID_INDEX,
TSDB_SHOW_TABLES_VGID_INDEX,
TSDB_MAX_SHOW_TABLES
};
@ -99,22 +100,24 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
#define COL_NOTE_LEN 128
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
char note[128];
char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
char name[TSDB_COL_NAME_LEN + 1];
char name[TSDB_TABLE_NAME_LEN];
SColDes cols[];
} STableDef;
extern char version[];
typedef struct {
char name[TSDB_DB_NAME_LEN + 1];
char name[TSDB_DB_NAME_LEN];
char create_time[32];
int32_t ntables;
int32_t vgroups;
@ -132,14 +135,15 @@ typedef struct {
int8_t wallevel;
int32_t fsync;
int8_t comp;
int8_t cachelast;
char precision[8]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
typedef struct {
char name[TSDB_TABLE_NAME_LEN + 1];
char metric[TSDB_TABLE_NAME_LEN + 1];
char name[TSDB_TABLE_NAME_LEN];
char metric[TSDB_TABLE_NAME_LEN];
} STableRecord;
typedef struct {
@ -151,7 +155,7 @@ typedef struct {
pthread_t threadID;
int32_t threadIndex;
int32_t totalThreads;
char dbName[TSDB_TABLE_NAME_LEN + 1];
char dbName[TSDB_DB_NAME_LEN];
void *taosCon;
int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut;
@ -210,13 +214,13 @@ static struct argp_option options[] = {
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
// dump unit options
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
{"databases", 'B', 0, 0, "Dump assigned databases", 2},
{"databases", 'D', 0, 0, "Dump assigned databases", 2},
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
@ -337,15 +341,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'A':
arguments->all_databases = true;
break;
case 'B':
case 'D':
arguments->databases = true;
break;
// dump format option
case 's':
arguments->schemaonly = true;
break;
case 'M':
arguments->with_property = true;
case 'N':
arguments->with_property = false;
break;
case 'S':
// parse time here.
@ -354,23 +358,23 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'E':
arguments->end_time = atol(arg);
break;
case 'N':
case 'B':
arguments->data_batch = atoi(arg);
if (arguments->data_batch >= INT16_MAX) {
arguments->data_batch = INT16_MAX - 1;
}
}
break;
case 'L':
case 'L':
{
int32_t len = atoi(arg);
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
} else if (len < TSDB_MAX_SQL_LEN) {
len = TSDB_MAX_SQL_LEN;
}
}
arguments->max_sql_len = len;
break;
}
}
case 't':
arguments->table_batch = atoi(arg);
break;
@ -398,27 +402,27 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
int taosDumpOut(struct arguments *arguments);
int taosDumpIn(struct arguments *arguments);
void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
int taosCheckParam(struct arguments *arguments);
void taosFreeDbInfos();
static int taosDumpOut(struct arguments *arguments);
static int taosDumpIn(struct arguments *arguments);
static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
static int taosCheckParam(struct arguments *arguments);
static void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
struct arguments g_args = {
// connection option
NULL,
"root",
NULL,
"root",
#ifdef _TD_POWER_
"powerdb",
"powerdb",
#else
"taosdata",
"taosdata",
#endif
0,
"",
@ -432,8 +436,8 @@ struct arguments g_args = {
false,
false,
// dump format option
false,
false,
false, // schemeonly
true, // with_property
0,
INT64_MAX,
1,
@ -523,7 +527,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
if (argc > 1)
if (argc > 2)
parse_args(argc, argv, &g_args);
argp_parse(&argp, argc, argv, 0, 0, &g_args);
@ -675,10 +679,10 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
}
sprintf(tempCommand, "show tables like %s", table);
result = taos_query(taosCon, tempCommand);
result = taos_query(taosCon, tempCommand);
int32_t code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@ -705,12 +709,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
free(tempCommand);
return 0;
}
sprintf(tempCommand, "show stables like %s", table);
result = taos_query(taosCon, tempCommand);
result = taos_query(taosCon, tempCommand);
code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@ -748,7 +752,7 @@ int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric
return -1;
}
}
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
@ -770,7 +774,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
sprintf(tmpCommand, "select tbname from %s", metric);
TAOS_RES *res = taos_query(taosCon, tmpCommand);
int32_t code = taos_errno(res);
if (code != 0) {
@ -792,20 +796,20 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
TAOS_FIELD *fields = taos_fetch_fields(res);
int32_t numOfTable = 0;
while ((row = taos_fetch_row(res)) != NULL) {
while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
taosWrite(fd, &tableRecord, sizeof(STableRecord));
numOfTable++;
}
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
int maxThreads = arguments->thread_num;
int tableOfPerFile ;
if (numOfTable <= arguments->thread_num) {
@ -815,16 +819,16 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
tableOfPerFile = numOfTable / arguments->thread_num;
if (0 != numOfTable % arguments->thread_num) {
tableOfPerFile += 1;
}
}
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
if (NULL == tblBuf){
fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
close(fd);
return -1;
}
int32_t numOfThread = *totalNumOfThread;
int subFd = -1;
for (; numOfThread < maxThreads; numOfThread++) {
@ -838,7 +842,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
(void)remove(tmpBuf);
}
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
(void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
@ -856,11 +860,11 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
if (fd >= 0) {
close(fd);
fd = -1;
}
}
*totalNumOfThread = numOfThread;
@ -884,7 +888,7 @@ int taosDumpOut(struct arguments *arguments) {
} else {
sprintf(tmpBuf, "dbs.sql");
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@ -916,9 +920,9 @@ int taosDumpOut(struct arguments *arguments) {
taosDumpCharset(fp);
sprintf(command, "show databases");
result = taos_query(taos, command);
result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
goto _exit_failure;
@ -955,15 +959,17 @@ int taosDumpOut(struct arguments *arguments) {
goto _exit_failure;
}
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
if (arguments->with_property) {
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
//dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//dbInfos[count]->daysToKeep1;
//dbInfos[count]->daysToKeep2;
@ -974,8 +980,10 @@ int taosDumpOut(struct arguments *arguments) {
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
//dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@ -1007,8 +1015,8 @@ int taosDumpOut(struct arguments *arguments) {
g_resultStatistics.totalDatabasesOfDumpOut++;
sprintf(command, "use %s", dbInfos[0]->name);
result = taos_query(taos, command);
result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
@ -1038,7 +1046,7 @@ int taosDumpOut(struct arguments *arguments) {
int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
if (0 == ret) {
superTblCnt++;
}
}
}
retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
}
@ -1050,7 +1058,7 @@ int taosDumpOut(struct arguments *arguments) {
goto _clean_tmp_file;
}
}
// TODO: save dump super table <superTblCnt> into result_output.txt
fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
@ -1076,7 +1084,7 @@ int taosDumpOut(struct arguments *arguments) {
taos_close(taos);
taos_free_result(result);
tfree(command);
taosFreeDbInfos();
taosFreeDbInfos();
fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
return 0;
@ -1090,15 +1098,17 @@ _exit_failure:
return -1;
}
int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
int taosGetTableDes(
char* dbName, char *table,
STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
TAOS_ROW row = NULL;
TAOS_RES* res = NULL;
int count = 0;
char sqlstr[COMMAND_SIZE];
sprintf(sqlstr, "describe %s.%s;", dbName, table);
res = taos_query(taosCon, sqlstr);
res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@ -1108,7 +1118,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
TAOS_FIELD *fields = taos_fetch_fields(res);
tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN);
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
@ -1128,23 +1138,23 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
if (isSuperTable) {
return count;
}
// if chidl-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
res = taos_query(taosCon, sqlstr);
res = taos_query(taosCon, sqlstr);
code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
taos_free_result(res);
return -1;
}
fields = taos_fetch_fields(res);
fields = taos_fetch_fields(res);
row = taos_fetch_row(res);
if (NULL == row) {
@ -1159,7 +1169,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
res = NULL;
continue;
}
int32_t* length = taos_fetch_lengths(res);
//int32_t* length = taos_fetch_lengths(tmpResult);
@ -1188,16 +1198,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
case TSDB_DATA_TYPE_BINARY: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
char tbuf[COMMAND_SIZE];
converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
char tbuf[COL_NOTE_LEN];
converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
char tbuf[COMMAND_SIZE];
convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
@ -1219,15 +1229,17 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
default:
break;
}
taos_free_result(res);
res = NULL;
res = NULL;
}
return count;
}
int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) {
int32_t taosDumpTable(
char *table, char *metric, struct arguments *arguments,
FILE *fp, TAOS* taosCon, char* dbName) {
int count = 0;
STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
@ -1280,9 +1292,10 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
"TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d",
dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update);
"REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
dbInfo->comp, dbInfo->precision, dbInfo->update);
}
pstr += sprintf(pstr, ";");
@ -1293,8 +1306,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
{
SThreadParaObj *pThread = (SThreadParaObj*)arg;
STableRecord tableRecord;
int fd;
int fd;
char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
@ -1305,13 +1318,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@ -1321,13 +1334,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
memset(tmpBuf, 0, TSDB_FILENAME_LEN);
sprintf(tmpBuf, "use %s", pThread->dbName);
TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", pThread->dbName);
taos_free_result(tmpResult);
fclose(fp);
fclose(fp);
close(fd);
return NULL;
}
@ -1340,14 +1353,17 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
int ret = taosDumpTable(
tableRecord.name, tableRecord.metric, &g_args,
fp, pThread->taosCon, pThread->dbName);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
pThread->rowsOfDumpOut += ret;
if (pThread->rowsOfDumpOut >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName);
printf(" %"PRId64 " rows already be dumpout from database %s\n",
pThread->rowsOfDumpOut, pThread->dbName);
lastRowsPrint += 5000000;
}
@ -1355,15 +1371,18 @@ void* taosDumpOutWorkThreadFp(void *arg)
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
g_args.outpath, pThread->dbName,
pThread->threadIndex, fileNameIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
sprintf(tmpBuf, "%s.tables.%d-%d.sql",
pThread->dbName, pThread->threadIndex, fileNameIndex);
}
fileNameIndex++;
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@ -1377,7 +1396,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
taos_free_result(tmpResult);
close(fd);
fclose(fp);
fclose(fp);
return NULL;
}
@ -1385,15 +1404,16 @@ void* taosDumpOutWorkThreadFp(void *arg)
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName)
{
pthread_attr_t thattr;
SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
SThreadParaObj *threadObj =
(SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
for (int t = 0; t < numOfThread; ++t) {
SThreadParaObj *pThread = threadObj + t;
pThread->rowsOfDumpOut = 0;
pThread->tablesOfDumpOut = 0;
pThread->threadIndex = t;
pThread->totalThreads = numOfThread;
tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN);
pThread->taosCon = taosCon;
tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
pThread->taosCon = taosCon;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@ -1408,7 +1428,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
pthread_join(threadObj[t].threadID, NULL);
}
// TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
// TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
int64_t totalRowsOfDumpOut = 0;
int64_t totalChildTblsOfDumpOut = 0;
for (int32_t t = 0; t < numOfThread; ++t) {
@ -1449,7 +1469,7 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
}
int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
TAOS_ROW row;
int fd = -1;
@ -1457,8 +1477,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
sprintf(sqlstr, "show %s.stables", dbName);
TAOS_RES* res = taos_query(taosCon, sqlstr);
TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
@ -1478,13 +1498,14 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
(void)remove(".stables.tmp");
exit(-1);
}
while ((row = taos_fetch_row(res)) != NULL) {
while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
}
}
taos_free_result(res);
(void)lseek(fd, 0, SEEK_SET);
@ -1492,7 +1513,7 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
while (1) {
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
if (0 == ret) {
superTblCnt++;
@ -1505,8 +1526,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
close(fd);
(void)remove(".stables.tmp");
return 0;
return 0;
}
@ -1516,19 +1537,19 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
STableRecord tableRecord;
taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
g_resultStatistics.totalDatabasesOfDumpOut++;
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
fprintf(fp, "USE %s;\n\n", dbInfo->name);
(void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
sprintf(sqlstr, "show %s.tables", dbInfo->name);
TAOS_RES* res = taos_query(taosCon, sqlstr);
TAOS_RES* res = taos_query(taosCon, sqlstr);
int code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@ -1547,15 +1568,17 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
}
TAOS_FIELD *fields = taos_fetch_fields(res);
int32_t numOfTable = 0;
while ((row = taos_fetch_row(res)) != NULL) {
while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
numOfTable++;
}
taos_free_result(res);
@ -1570,7 +1593,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
}
}
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
@ -1579,7 +1602,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
return -1;
}
int32_t numOfThread = 0;
int subFd = -1;
for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
@ -1616,7 +1639,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
fd = -1;
}
taos_free_result(res);
// start multi threads to dumpout
@ -1624,7 +1647,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf);
}
}
free(tblBuf);
return 0;
@ -1637,15 +1660,18 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
char* pstr = sqlstr;
pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name);
pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
dbName, tableDes->name);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (counter == 0) {
pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
pstr += sprintf(pstr, " (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@ -1658,9 +1684,11 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
for (; counter < numOfCols; counter++) {
if (counter == count_temp) {
pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
pstr += sprintf(pstr, ") TAGS (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type);
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
@ -1687,7 +1715,8 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
char *pstr = NULL;
pstr = tmpBuf;
pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric);
pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
dbName, tableDes->name, dbName, metric);
for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
@ -1735,7 +1764,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
char *pstr = NULL;
TAOS_ROW row = NULL;
int numFields = 0;
if (arguments->schemaonly) {
return 0;
}
@ -1750,11 +1779,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
pstr = tmpBuffer;
char sqlstr[1024] = {0};
sprintf(sqlstr,
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
sprintf(sqlstr,
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbname, arguments->start_time, arguments->end_time);
TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
@ -1774,7 +1803,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
while ((row = taos_fetch_row(tmpResult)) != NULL) {
pstr = tmpBuffer;
curr_sqlstr_len = 0;
int32_t* length = taos_fetch_lengths(tmpResult); // act len
if (count == 0) {
@ -1829,7 +1858,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR: {
@ -1857,10 +1886,10 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
totalRows++;
totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
lastRowsPrint += 5000000;
@ -2206,7 +2235,7 @@ static FILE* taosOpenDumpInFile(char *fptr) {
}
char *fname = full_path.we_wordv[0];
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
@ -2240,7 +2269,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
line[--read_len] = '\0';
//if (read_len == 0 || isCommentLine(line)) { // line starts with #
if (read_len == 0 ) {
if (read_len == 0 ) {
continue;
}
@ -2259,8 +2288,8 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
cmd_len = 0;
cmd_len = 0;
if (lineNo >= lastRowsPrint) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
@ -2300,7 +2329,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
if (totalThreads > tsSqlFileNum) {
totalThreads = tsSqlFileNum;
}
SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
for (int32_t t = 0; t < totalThreads; ++t) {
pThread = threadObj + t;
@ -2330,7 +2359,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
int taosDumpIn(struct arguments *arguments) {
assert(arguments->isDumpIn);
TAOS *taos = NULL;
FILE *fp = NULL;
@ -2345,22 +2374,22 @@ int taosDumpIn(struct arguments *arguments) {
int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
if (tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
fp = taosOpenDumpInFile(tsDbSqlFile);
if (NULL == fp) {
fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
return -1;
}
fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
taosLoadFileCharset(fp, tsfCharset);
taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
}
if (0 != tsSqlFileNumOfTbls) {
taosStartDumpInWorkThreads(taos, arguments);
}
}
taos_close(taos);
taosFreeSQLFiles();

View File

@ -249,7 +249,7 @@ typedef struct SAcctObj {
} SAcctObj;
typedef struct {
char db[TSDB_DB_NAME_LEN];
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t type;
int16_t numOfColumns;
int32_t index;

View File

@ -129,7 +129,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
SShowObj *pShow = calloc(1, showObjSize);
pShow->type = pShowMsg->type;
pShow->payloadLen = htons(pShowMsg->payloadLen);
tstrncpy(pShow->db, pShowMsg->db, TSDB_DB_NAME_LEN);
tstrncpy(pShow->db, pShowMsg->db, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN);
memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen);
pShow = mnodePutShowObj(pShow);

View File

@ -171,7 +171,7 @@ typedef struct HttpThread {
EpollFd pollFd;
int32_t numOfContexts;
int32_t threadId;
char label[HTTP_LABEL_SIZE];
char label[HTTP_LABEL_SIZE << 1];
bool (*processData)(HttpContext *pContext);
} HttpThread;

View File

@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
return NULL;
}
} else {
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 20);
if ( pRpc->pCache == NULL ) {
tError("%s failed to init connection cache", pRpc->label);
rpcClose(pRpc);

View File

@ -230,6 +230,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TSDB_STATE, "Invalid tsdb state")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")
@ -253,6 +254,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, "Need to reconfigure t
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, "Invalid information to create table")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_AVAIL_DISK, "No available disk")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value")
// query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle")

View File

@ -539,7 +539,7 @@ static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) {
}
void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
// tscEmbedded = 1;
tscEmbedded = 1;
if (host == NULL) host = tsLocalFqdn;
if (port == 0) port = tsServerPort;
if (pkgLen <= 10) pkgLen = 1000;
@ -550,6 +550,7 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
} else if (0 == strcmp("server", role)) {
taosNetTestServer(host, port, pkgLen);
} else if (0 == strcmp("rpc", role)) {
tscEmbedded = 0;
taosNetTestRpc(host, port, pkgLen);
} else if (0 == strcmp("sync", role)) {
taosNetCheckSync(host, port);
@ -559,5 +560,5 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
taosNetTestStartup(host, port);
}
// tscEmbedded = 0;
tscEmbedded = 0;
}

View File

@ -310,11 +310,11 @@ int32_t vnodeOpen(int32_t vgId) {
vnodeCleanUp(pVnode);
return terrno;
} else if (tsdbGetState(pVnode->tsdb) != TSDB_STATE_OK) {
vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica,
tstrerror(terrno));
vError("vgId:%d, failed to open tsdb(state: %d), replica:%d reason:%s", pVnode->vgId,
tsdbGetState(pVnode->tsdb), pVnode->syncCfg.replica, tstrerror(terrno));
if (pVnode->syncCfg.replica <= 1) {
vnodeCleanUp(pVnode);
return terrno;
return TSDB_CODE_VND_INVALID_TSDB_STATE;
} else {
pVnode->fversion = 0;
pVnode->version = 0;

View File

@ -119,7 +119,6 @@ void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) {
void *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
vError("vgId:%d, vnode not found while confirm forward", vgId);
return;
}
dnodeSendRpcVWriteRsp(pVnode, wparam, code);
@ -162,4 +161,4 @@ int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) {
void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool force) {
SVnodeObj *pVnode = vparam;
syncConfirmForward(pVnode->sync, version, code, force);
}
}

View File

@ -317,12 +317,13 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar
void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
SVnodeObj *pVnode = vparam;
if (pVnode) {
int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite,
pWrite->rpcMsg.ahandle, queued, queuedSize);
vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite,
pWrite->rpcMsg.ahandle, queued, queuedSize);
}
taosFreeQitem(pWrite);
vnodeRelease(pVnode);
@ -371,8 +372,8 @@ static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
taosMsleep(ms);
return 0;
} else {
void *unUsed = NULL;
taosTmrReset(vnodeFlowCtrlMsgToWQueue, 100, pWrite, tsDnodeTmr, &unUsed);
void *unUsedTimerId = NULL;
taosTmrReset(vnodeFlowCtrlMsgToWQueue, 100, pWrite, tsDnodeTmr, &unUsedTimerId);
vTrace("vgId:%d, msg:%p, app:%p, perform flowctrl, retry:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle,
pWrite->processedCount);

1
tests/Jenkinsfile vendored
View File

@ -21,6 +21,7 @@ def pre_test(){
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}

View File

@ -28,20 +28,22 @@ class TDTestCase:
sql = "select server_version()"
ret = tdSql.query(sql)
version = tdSql.getData(0, 0)[0:3]
expectedVersion = "2.0"
if(version == expectedVersion):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (sql, 0, 0, version, expectedVersion))
expectedVersion_dev = "2.0"
expectedVersion_master = "2.1"
if(version == expectedVersion_dev or version == expectedVersion_master):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version))
else:
tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s" % (sql, 0, 0, version, expectedVersion))
tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master))
sql = "select client_version()"
ret = tdSql.query(sql)
version = tdSql.getData(0, 0)[0:3]
expectedVersion = "2.0"
if(version == expectedVersion):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (sql, 0, 0, version, expectedVersion))
expectedVersion_dev = "2.0"
expectedVersion_master = "2.1"
if(version == expectedVersion_dev or version == expectedVersion_master):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version))
else:
tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s" % (sql, 0, 0, version, expectedVersion))
tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master))
def stop(self):

View File

@ -1,119 +0,0 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import taos
import random
import argparse
class BuildDockerCluser:
def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion, dockerDir, removeFlag):
self.hostName = hostName
self.user = user
self.password = password
self.configDir = configDir
self.numOfNodes = numOfNodes
self.clusterVersion = clusterVersion
self.dockerDir = dockerDir
self.removeFlag = removeFlag
def getConnection(self):
self.conn = taos.connect(
host = self.hostName,
user = self.user,
password = self.password,
config = self.configDir)
def createDondes(self):
self.cursor = self.conn.cursor()
for i in range(2, self.numOfNodes + 1):
self.cursor.execute("create dnode tdnode%d" % i)
def startArbitrator(self):
print("start arbitrator")
os.system("docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator")
def run(self):
if self.numOfNodes < 2 or self.numOfNodes > 10:
print("the number of nodes must be between 2 and 10")
exit(0)
print("remove Flag value %s" % self.removeFlag)
if self.removeFlag == False:
os.system("./cleanClusterEnv.sh -d %s" % self.dockerDir)
os.system("./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.clusterVersion, self.dockerDir))
self.getConnection()
self.createDondes()
self.startArbitrator()
parser = argparse.ArgumentParser()
parser.add_argument(
'-H',
'--host',
action='store',
default='tdnode1',
type=str,
help='host name to be connected (default: tdnode1)')
parser.add_argument(
'-u',
'--user',
action='store',
default='root',
type=str,
help='user (default: root)')
parser.add_argument(
'-p',
'--password',
action='store',
default='taosdata',
type=str,
help='password (default: taosdata)')
parser.add_argument(
'-c',
'--config-dir',
action='store',
default='/etc/taos',
type=str,
help='configuration directory (default: /etc/taos)')
parser.add_argument(
'-n',
'--num-of-nodes',
action='store',
default=2,
type=int,
help='number of nodes in the cluster (default: 2, min: 2, max: 5)')
parser.add_argument(
'-v',
'--version',
action='store',
default='2.0.18.1',
type=str,
help='the version of the cluster to be build, Default is 2.0.17.1')
parser.add_argument(
'-d',
'--docker-dir',
action='store',
default='/data',
type=str,
help='the data dir for docker, default is /data')
parser.add_argument(
'--flag',
action='store_true',
help='remove docker containers flag, default: True')
args = parser.parse_args()
cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version, args.docker_dir, args.flag)
cluster.run()
# usage 1: python3 basic.py -n 2 --flag (flag is True)
# usage 2: python3 basic.py -n 2 (flag should be False when it is not specified)

View File

@ -1,39 +0,0 @@
#!/bin/bash
echo "Executing cleanClusterEnv.sh"
CURR_DIR=`pwd`
if [ $# != 2 ]; then
echo "argument list need input : "
echo " -d docker dir"
exit 1
fi
DOCKER_DIR=
while getopts "d:" arg
do
case $arg in
d)
DOCKER_DIR=$OPTARG
;;
?)
echo "unkonwn argument"
;;
esac
done
function removeDockerContainers {
cd $DOCKER_DIR
docker-compose down --remove-orphans
}
function cleanEnv {
echo "Clean up docker environment"
for i in {1..10}
do
rm -rf $DOCKER_DIR/node$i/data/*
rm -rf $DOCKER_DIR/node$i/log/*
done
}
removeDockerContainers
cleanEnv

View File

@ -1,62 +0,0 @@
version: '3.7'
services:
td2.0-node4:
build:
context: .
args:
- PACKAGE=${PACKAGE}
- TARBITRATORPKG=${TARBITRATORPKG}
- EXTRACTDIR=${DIR}
- EXTRACTDIR2=${DIR2}
- DATADIR=${DATADIR}
image: 'tdengine:${VERSION}'
container_name: 'tdnode4'
cap_add:
- ALL
stdin_open: true
tty: true
environment:
TZ: "Asia/Shanghai"
command: >
sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime &&
echo $TZ > /etc/timezone &&
mkdir /coredump &&
echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf &&
sysctl -p &&
exec my-main-application"
extra_hosts:
- "tdnode2:172.27.0.8"
- "tdnode3:172.27.0.9"
- "tdnode4:172.27.0.10"
- "tdnode5:172.27.0.11"
- "tdnode6:172.27.0.12"
- "tdnode7:172.27.0.13"
- "tdnode8:172.27.0.14"
- "tdnode9:172.27.0.15"
- "tdnode10:172.27.0.16"
volumes:
# bind data directory
- type: bind
source: ${DATADIR}/node4/data
target: /var/lib/taos
# bind log directory
- type: bind
source: ${DATADIR}/node4/log
target: /var/log/taos
# bind configuration
- type: bind
source: ${DATADIR}/node4/cfg
target: /etc/taos
# bind core dump path
- type: bind
source: ${DATADIR}/node4/core
target: /coredump
- type: bind
source: ${DATADIR}
target: /root
hostname: tdnode4
networks:
taos_update_net:
ipv4_address: 172.27.0.10
command: taosd

View File

@ -1,62 +0,0 @@
version: '3.7'
services:
td2.0-node5:
build:
context: .
args:
- PACKAGE=${PACKAGE}
- TARBITRATORPKG=${TARBITRATORPKG}
- EXTRACTDIR=${DIR}
- EXTRACTDIR2=${DIR2}
- DATADIR=${DATADIR}
image: 'tdengine:${VERSION}'
container_name: 'tdnode5'
cap_add:
- ALL
stdin_open: true
tty: true
environment:
TZ: "Asia/Shanghai"
command: >
sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime &&
echo $TZ > /etc/timezone &&
mkdir /coredump &&
echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf &&
sysctl -p &&
exec my-main-application"
extra_hosts:
- "tdnode2:172.27.0.8"
- "tdnode3:172.27.0.9"
- "tdnode4:172.27.0.10"
- "tdnode5:172.27.0.11"
- "tdnode6:172.27.0.12"
- "tdnode7:172.27.0.13"
- "tdnode8:172.27.0.14"
- "tdnode9:172.27.0.15"
- "tdnode10:172.27.0.16"
volumes:
# bind data directory
- type: bind
source: ${DATADIR}/node5/data
target: /var/lib/taos
# bind log directory
- type: bind
source: ${DATADIR}/node5/log
target: /var/log/taos
# bind configuration
- type: bind
source: ${DATADIR}/node5/cfg
target: /etc/taos
# bind core dump path
- type: bind
source: ${DATADIR}/node5/core
target: /coredump
- type: bind
source: ${DATADIR}
target: /root
hostname: tdnode5
networks:
taos_update_net:
ipv4_address: 172.27.0.11
command: taosd

View File

@ -17332,3 +17332,168 @@
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_and_cache_attr
fun:lib_getattr
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:PyEval_EvalCode
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_and_cache_attr
fun:lib_getattr
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:PyEval_EvalCode
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:lib_getattr
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_PyObject_GC_New
fun:lib_getattr
fun:ffi_internal_new
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_cpython_func.isra.87
fun:lib_build_and_cache_attr
fun:lib_getattr
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_and_cache_attr
fun:lib_getattr
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:PyInit__openssl
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_PyObject_GC_New
fun:ffi_internal_new
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
}

View File

@ -0,0 +1,39 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from basic import *
from util.sql import tdSql
class TDTestCase:
def init(self):
# tdLog.debug("start to execute %s" % __file__)
self.numOfNodes = 5
self.dockerDir = "/data"
cluster.init(self.numOfNodes, self.dockerDir)
cluster.prepardBuild()
for i in range(self.numOfNodes):
if i == 0:
cluster.cfg("role", "1", i + 1)
else:
cluster.cfg("role", "2", i + 1)
cluster.run()
td = TDTestCase()
td.init()

View File

View File

@ -0,0 +1,152 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import taos
class BuildDockerCluser:
def init(self, numOfNodes, dockerDir):
self.numOfNodes = numOfNodes
self.dockerDir = dockerDir
self.hostName = "tdnode1"
self.user = "root"
self.password = "taosdata"
self.configDir = "/etc/taos"
self.dirs = ["data", "cfg", "log", "core"]
self.cfgDict = {
"numOfLogLines":"100000000",
"mnodeEqualVnodeNum":"0",
"walLevel":"1",
"numOfThreadsPerCore":"2.0",
"monitor":"0",
"vnodeBak":"1",
"dDebugFlag":"135",
"mDebugFlag":"135",
"sdbDebugFlag":"135",
"rpcDebugFlag":"135",
"tmrDebugFlag":"131",
"cDebugFlag":"135",
"httpDebugFlag":"135",
"monitorDebugFlag":"135",
"udebugFlag":"135",
"jnidebugFlag":"135",
"qdebugFlag":"135",
"maxSQLLength":"1048576"
}
# execute command, and return the output
# ref: https://blog.csdn.net/wowocpp/article/details/80775650
def execCmdAndGetOutput(self, cmd):
r = os.popen(cmd)
text = r.read()
r.close()
return text
def execCmd(self, cmd):
if os.system(cmd) != 0:
quit()
def getTaosdVersion(self):
cmd = "taosd -V |grep version|awk '{print $3}'"
taosdVersion = self.execCmdAndGetOutput(cmd)
cmd = "find %s -name '*server*.tar.gz' | awk -F- '{print $(NF-2)}'|sort|awk 'END {print}'" % self.dockerDir
packageVersion = self.execCmdAndGetOutput(cmd)
if (taosdVersion is None or taosdVersion.isspace()) and (packageVersion is None or packageVersion.isspace()):
print("Please install taosd or have a install package ready")
quit()
else:
self.version = taosdVersion if taosdVersion >= packageVersion else packageVersion
return self.version.strip()
def getConnection(self):
self.conn = taos.connect(
host = self.hostName,
user = self.user,
password = self.password,
config = self.configDir)
def removeFile(self, rootDir, index, dir):
cmd = "rm -rf %s/node%d/%s/*" % (rootDir, index, dir)
self.execCmd(cmd)
def clearEnv(self):
cmd = "cd %s && docker-compose down --remove-orphans" % self.dockerDir
self.execCmd(cmd)
for i in range(1, self.numOfNodes + 1):
self.removeFile(self.dockerDir, i, self.dirs[0])
self.removeFile(self.dockerDir, i, self.dirs[1])
self.removeFile(self.dockerDir, i, self.dirs[2])
def createDir(self, rootDir, index, dir):
cmd = "mkdir -p %s/node%d/%s" % (rootDir, index, dir)
self.execCmd(cmd)
def createDirs(self):
for i in range(1, self.numOfNodes + 1):
for j in range(len(self.dirs)):
self.createDir(self.dockerDir, i, self.dirs[j])
def addExtraCfg(self, option, value):
self.cfgDict.update({option: value})
def cfg(self, option, value, nodeIndex):
cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex)
cmd = "echo '%s %s' >> %s" % (option, value, cfgPath)
self.execCmd(cmd)
def updateLocalhosts(self):
cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts"
result = self.execCmdAndGetOutput(cmd)
if result and not result.isspace():
cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts"
self.execCmd(cmd)
def deploy(self):
self.clearEnv()
self.createDirs()
for i in range(1, self.numOfNodes + 1):
self.cfg("firstEp", "tdnode1:6030", i)
for key, value in self.cfgDict.items():
self.cfg(key, value, i)
def createDondes(self):
self.cursor = self.conn.cursor()
for i in range(2, self.numOfNodes + 1):
self.cursor.execute("create dnode tdnode%d" % i)
def startArbitrator(self):
for i in range(1, self.numOfNodes + 1):
self.cfg("arbitrator", "tdnode1:6042", i)
cmd = "docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator"
self.execCmd(cmd)
def prepardBuild(self):
if self.numOfNodes < 2 or self.numOfNodes > 10:
print("the number of nodes must be between 2 and 10")
exit(0)
self.clearEnv()
self.createDirs()
self.updateLocalhosts()
self.deploy()
def run(self):
cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir)
self.execCmd(cmd)
self.getConnection()
self.createDondes()
cluster = BuildDockerCluser()

View File

@ -32,43 +32,14 @@ do
esac
done
function addTaoscfg {
for((i=1;i<=$NUM_OF_NODES;i++))
do
touch $DOCKER_DIR/node$i/cfg/taos.cfg
echo 'firstEp tdnode1:6030' > $DOCKER_DIR/node$i/cfg/taos.cfg
echo 'fqdn tdnode'$i >> $DOCKER_DIR/node$i/cfg/taos.cfg
echo 'arbitrator tdnode1:6042' >> $DOCKER_DIR/node$i/cfg/taos.cfg
done
}
function createDIR {
for((i=1;i<=$NUM_OF_NODES;i++))
do
mkdir -p $DOCKER_DIR/node$i/data
mkdir -p $DOCKER_DIR/node$i/log
mkdir -p $DOCKER_DIR/node$i/cfg
mkdir -p $DOCKER_DIR/node$i/core
done
}
function cleanEnv {
echo "Clean up docker environment"
for((i=1;i<=$NUM_OF_NODES;i++))
do
rm -rf $DOCKER_DIR/node$i/data/*
rm -rf $DOCKER_DIR/node$i/log/*
done
}
function prepareBuild {
if [ -d $CURR_DIR/../../../../release ]; then
if [ -d $CURR_DIR/../../../release ]; then
echo release exists
rm -rf $CURR_DIR/../../../../release/*
rm -rf $CURR_DIR/../../../release/*
fi
cd $CURR_DIR/../../../../packaging
cd $CURR_DIR/../../../packaging
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
@ -76,17 +47,17 @@ function prepareBuild {
echo "generating TDeninge enterprise packages"
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then
if [ ! -e $CURR_DIR/../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then
echo "no TDengine install package found"
exit 1
fi
if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
if [ ! -e $CURR_DIR/../../../release/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
echo "no arbitrator install package found"
exit 1
fi
cd $CURR_DIR/../../../../release
cd $CURR_DIR/../../../release
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
fi
@ -96,17 +67,17 @@ function prepareBuild {
echo "generating TDeninge community packages"
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then
if [ ! -e $CURR_DIR/../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then
echo "no TDengine install package found"
exit 1
fi
if [ ! -e $CURR_DIR/../../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
if [ ! -e $CURR_DIR/../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
echo "no arbitrator install package found"
exit 1
fi
cd $CURR_DIR/../../../../release
cd $CURR_DIR/../../../release
mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
fi
@ -147,13 +118,10 @@ function clusterUp {
done
docker_run=$docker_run" up -d"
fi
echo $docker_run |sh
echo $docker_run |sh
echo "docker compose finish"
}
createDIR
cleanEnv
addTaoscfg
prepareBuild
clusterUp

View File

@ -53,7 +53,7 @@ services:
source: ${DATADIR}/node1/core
target: /coredump
- type: bind
source: /data
source: ${DATADIR}
target: /root
hostname: tdnode1
networks:
@ -90,6 +90,11 @@ services:
- "tdnode3:172.27.0.9"
- "tdnode4:172.27.0.10"
- "tdnode5:172.27.0.11"
- "tdnode6:172.27.0.12"
- "tdnode7:172.27.0.13"
- "tdnode8:172.27.0.14"
- "tdnode9:172.27.0.15"
- "tdnode10:172.27.0.16"
volumes:
# bind data directory
- type: bind

View File

@ -26,6 +26,7 @@ services:
sysctl -p &&
exec my-main-application"
extra_hosts:
- "tdnode1:172.27.0.7"
- "tdnode2:172.27.0.8"
- "tdnode3:172.27.0.9"
- "tdnode4:172.27.0.10"

View File

@ -23,6 +23,7 @@ python3 ./test.py -f insert/insertIntoTwoTables.py
python3 ./test.py -f insert/before_1970.py
python3 bug2265.py
python3 ./test.py -f insert/bug3654.py
python3 ./test.py -f insert/insertDynamicColBeforeVal.py
#table
python3 ./test.py -f table/alter_wal0.py
@ -151,8 +152,7 @@ python3 test.py -f tools/taosdemoTestTblAlt.py
python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
# update
@ -226,6 +226,7 @@ python3 ./test.py -f query/querySecondtscolumnTowherenow.py
python3 ./test.py -f query/queryFilterTswithDateUnit.py
python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
@ -328,4 +329,7 @@ python3 ./test.py -f alter/alter_debugFlag.py
python3 ./test.py -f query/queryBetweenAnd.py
python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
#======================p4-end===============

View File

@ -0,0 +1,136 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
"create table stb1 (ts timestamp, c11 int, c12 float ) TAGS(t11 int, t12 int )"
)
tdLog.printNoPrefix("==========step2:insert data with new syntax")
tdSql.execute(
"insert into t1 using stb1(t11, t12) tags(11, 12) (ts, c11, c12) values (now, 10, 20)"
)
# case for tag-value
tdSql.execute(
"insert into t2 using stb1(t11) tags(21) (ts, c11, c12) values (now-1m, 11, 21)"
)
tdSql.execute(
"insert into t3 using stb1 tags(31, 32) (ts, c11, c12) values (now-2m, 12, 22)"
)
tdSql.error(
"insert into t4 using stb1(t11, t12) (ts, c11, c12) values (now-3m, 13, 23)"
)
tdSql.error(
"insert into t5 using stb1(t11, t12) tags() (ts, c11, c12) values (now-4m, 14, 24)"
)
tdSql.error(
"insert into t6 using stb1(t11, t12) tags(41) (ts, c11, c12) values (now-5m, 15, 25)"
)
tdSql.error(
"insert into t7 using stb1(t12) tags(51, 52) (ts, c11, c12) values (now-6m, 16, 26)"
)
tdSql.execute(
"insert into t8 using stb1(t11, t12) tags('61', 62) (ts, c11, c12) values (now-7m, 17, 27)"
)
# case for col-value
tdSql.execute(
"insert into t9 using stb1(t11, t12) tags(71, 72) values (now-8m, 18, 28)"
)
tdSql.error(
"insert into t10 using stb1(t11, t12) tags(81, 82) (ts, c11, c12) values ()"
)
tdSql.error(
"insert into t11 using stb1(t11, t12) tags(91, 92) (ts, c11, c12) "
)
tdSql.error(
"insert into t12 using stb1(t11, t12) tags(101, 102) values (now-9m, 19)"
)
tdSql.error(
"insert into t13 using stb1(t11, t12) tags(111, 112) (ts, c11) values (now-10m, 110, 210)"
)
tdSql.error(
"insert into t14 using stb1(t11, t12) tags(121, 122) (ts, c11, c12) values (now-11m, 111)"
)
tdSql.execute(
"insert into t15 using stb1(t11, t12) tags(131, 132) (ts, c11, c12) values (now-12m, NULL , 212)"
)
tdSql.execute(
"insert into t16 using stb1(t11, t12) tags(141, 142) (ts, c11, c12) values (now-13m, 'NULL', 213)"
)
tdSql.error(
"insert into t17 using stb1(t11, t12) tags(151, 152) (ts, c11, c12) values (now-14m, Nan, 214)"
)
tdSql.error(
"insert into t18 using stb1(t11, t12) tags(161, 162) (ts, c11, c12) values (now-15m, 'NaN', 215)"
)
tdSql.execute(
"insert into t19 using stb1(t11, t12) tags(171, 172) (ts, c11) values (now-16m, 216)"
)
tdSql.error(
"insert into t20 using stb1(t11, t12) tags(181, 182) (c11, c12) values (117, 217)"
)
# multi-col_value
tdSql.execute(
"insert into t21 using stb1(t11, t12) tags(191, 192) (ts, c11, c12) values (now-17m, 118, 218)(now-18m, 119, 219)"
)
tdSql.execute(
"insert into t22 using stb1(t11, t12) tags(201, 202) values (now-19m, 120, 220)(now-19m, 121, 221)"
)
tdSql.error(
"insert into t23 using stb1(t11, t12) tags(211, 212) values (now-20m, 122, 222) (ts, c11, c12) values (now-21m, 123, 223)"
)
tdSql.error(
"insert into t24 using stb1(t11, t12) tags(221, 222) (ts, c11, c12) values (now-22m, 124, 224) (ts, c11, c12) values (now-23m, 125, 225)"
)
tdSql.execute(
"insert into t25 (ts, c11, c12) using stb1(t11, t12) tags(231, 232) values (now-24m, 126, 226)(now-25m, 127, 227)"
)
tdSql.error(
"insert into t26 (ts, c11, c12) values (now-24m, 128, 228)(now-25m, 129, 229) using stb1(t11, t12) tags(241, 242) "
)
tdSql.error(
"insert into t27 (ts, c11, c12) values (now-24m, 130, 230) using stb1(t11, t12) tags(251, 252) "
)
tdSql.query("show tables")
tdSql.checkRows(21)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table and insert data")
tdSql.execute(
"create table stb1 (ts timestamp, c1 timestamp , c2 int) TAGS(t1 int )"
)
tdLog.printNoPrefix("==========step2:query data where timestamp data is null")
tdSql.execute(
"insert into t1 using stb1(t1) tags(1) (ts, c1, c2) values (now-1m, null, 1)"
)
tdSql.execute(
"insert into t1 using stb1(t1) tags(1) (ts, c2) values (now-2m, 2)"
)
tdSql.query("select * from t1 where c1 is NULL")
tdSql.checkRows(2)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -56,6 +56,34 @@ class TDTestCase:
tdSql.query(sql)
tdSql.checkRows(6)
tdSql.execute("create table stb(ts timestamp, options binary(7), city binary(10)) tags(type int)")
tdSql.execute("insert into tb1 using stb tags(1) values(%d, 'option1', 'beijing')" % self.ts)
tdSql.execute("insert into tb2 using stb tags(2) values(%d, 'option2', 'shanghai')" % self.ts)
tdSql.query("select options from stb where type = 1 limit 1 union all select options from stb where type = 2 limit 1")
tdSql.checkData(0, 0, "option1")
tdSql.checkData(1, 0, "option2")
tdSql.query("select 'dc' as options from stb where type = 1 limit 1 union all select 'ad' as options from stb where type = 2 limit 1")
tdSql.checkData(0, 0, "dc")
tdSql.checkData(1, 0, "ad")
tdSql.query("select 'dc' as options from stb where type = 1 limit 1 union all select 'adc' as options from stb where type = 2 limit 1")
tdSql.checkData(0, 0, "dc")
tdSql.checkData(1, 0, "adc")
tdSql.error("select 'dc' as options from stb where type = 1 limit 1 union all select 'ad' as city from stb where type = 2 limit 1")
# for defect https://jira.taosdata.com:18080/browse/TD-4017
tdSql.execute("alter table stb add column col int")
tdSql.execute("insert into tb1 values(%d, 'option1', 'beijing', 10)" % (self.ts + 1000))
tdSql.query("select 'dc' as options from stb where col > 10 limit 1")
tdSql.checkRows(0)
tdSql.query("select 'dcs' as options from stb where col > 200 limit 1 union all select 'aaa' as options from stb limit 10")
tdSql.checkData(0, 0, 'aaa')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -2,36 +2,36 @@
ulimit -c unlimited
# insert
python3 ./test.py $1 -f insert/basic.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/bigint.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/nchar.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/multi.py
python3 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f insert/basic.py
python3.8 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f insert/bigint.py
python3.8 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f insert/nchar.py
python3.8 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f insert/multi.py
python3.8 ./test.py $1 -s && sleep 1
# table
python3 ./test.py $1 -f table/column_name.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/column_num.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/db_table.py
python3 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f table/column_name.py
python3.8 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f table/column_num.py
python3.8 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f table/db_table.py
python3.8 ./test.py $1 -s && sleep 1
# import
python3 ./test.py $1 -f import_merge/importDataLastSub.py
python3 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f import_merge/importDataLastSub.py
python3.8 ./test.py $1 -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f tag_lite/filter.py
python3.8 ./test.py $1 -s && sleep 1
#query
python3 ./test.py $1 -f query/filter.py
python3 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f query/filter.py
python3.8 ./test.py $1 -s && sleep 1
# client
python3 ./test.py $1 -f client/client.py
python3 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f client/client.py
python3.8 ./test.py $1 -s && sleep 1

View File

@ -67,14 +67,15 @@ class TDTestCase:
self.queryRows = len(self.queryResult)
self.queryCols = len(tdSql.cursor.description)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= timeout:
if self.queryRows >= 1:
tdSql.query(sql)
tdSql.checkData(0, 5, None)
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
tdLog.info(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ")
else:
tdLog.exit(f"sql: {sql} except raise {exception}, actually not")
tdLog.exit(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ")
# else:
# tdLog.exit(f"sql: {sql} except raise {exception}, actually not")
def run(self):
tdSql.execute("drop database if exists dbcq")

View File

@ -55,8 +55,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
},
{
"name": "stb1",
@ -81,8 +81,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":4}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -55,8 +55,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
},
{
"name": "stb1",
@ -81,8 +81,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -6,7 +6,7 @@
"user": "root",
"password": "taosdata",
"thread_count": 4,
"": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,

View File

@ -11,8 +11,8 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"max_sql_len": 10240000000,
"num_of_records_per_req": "-asdf",
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
@ -41,12 +41,12 @@
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"insert_rows": 10000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"insert_interval":-4,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
@ -55,8 +55,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
@ -64,10 +64,10 @@
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"batch_create_tbl_num": "asdf",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",

View File

@ -55,7 +55,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
@ -81,8 +81,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":9}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -55,7 +55,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{

View File

@ -0,0 +1,140 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 10240000000,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 16374, "count":1}],
"tags": [{"type": "TINYINT", "count":12}, {"type": "BINARY", "len": 16, "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1000000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb2",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1000000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 16375, "count":1},{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb3",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1000000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 16371, "count":1},{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -11,7 +11,7 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"num_of_records_per_req": 10,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
@ -35,13 +35,13 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_count": 0,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -55,19 +55,19 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_count": 10,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"insert_rows": 2,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -81,7 +81,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]

View File

@ -11,7 +11,7 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"num_of_records_per_req": 10,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
@ -35,13 +35,13 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_count": -1,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -55,19 +55,19 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1024}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_count": 10,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"insert_rows": 2,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -81,8 +81,8 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 1000,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10000,
"num_of_records_per_req": 10000,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
}]
}]
}

View File

@ -11,11 +11,11 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"num_of_records_per_req": 0,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db1",
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
@ -35,13 +35,13 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_count": 1,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -55,19 +55,19 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}]
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_count": 2,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"insert_rows": 2,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -81,7 +81,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": -1,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 2,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 1, "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

Some files were not shown because too many files have changed in this diff Show More