diff --git a/.drone.yml b/.drone.yml index 3f6c622598..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -26,7 +26,32 @@ steps: - 2.0 --- kind: pipeline -name: test_arm64 +name: test_arm64_bionic + +platform: + os: linux + arch: arm64 +steps: +- name: build + image: arm64v8/ubuntu:bionic + commands: + - apt-get update + - apt-get install -y cmake build-essential + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make -j4 + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm64_focal platform: os: linux @@ -34,14 +59,15 @@ platform: steps: - name: build - image: gcc + image: arm64v8/ubuntu:focal commands: - - apt-get update - - apt-get install -y cmake build-essential + - echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + - apt-get update + - apt-get install -y -qq cmake build-essential - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -52,7 +78,57 @@ steps: - 2.0 --- kind: pipeline -name: test_arm +name: test_arm64_centos7 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/centos:7 + commands: + - yum install -y gcc gcc-c++ make cmake git + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make -j4 + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm64_centos8 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/centos:8 + commands: + - dnf install -y gcc gcc-c++ make cmake epel-release git libarchive + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make -j4 + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm_bionic platform: os: linux @@ -67,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -76,7 +152,6 @@ steps: - develop - master - 2.0 - --- kind: pipeline name: build_trusty @@ -95,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -121,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -130,7 +205,6 @@ steps: - develop - master - 2.0 - --- kind: pipeline name: build_bionic @@ -147,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -171,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -179,28 +253,4 @@ steps: branch: - develop - master - - 2.0 - ---- -kind: pipeline -name: goodbye - -platform: - os: linux - arch: amd64 - -steps: -- name: 64-bit - image: alpine - commands: - - echo 64-bit is good. - when: - branch: - - develop - - master - - 2.0 - - -depends_on: -- test_arm64 -- test_amd64 \ No newline at end of file + - 2.0 \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index c1d44c1f54..882d224407 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,7 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } -def skipstage=0 +def skipbuild=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -33,8 +33,7 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - - + sh'hostname' sh ''' sudo rmtaos || echo "taosd has not installed" ''' @@ -107,19 +106,17 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python + pip3 install ${WKC}/src/connector/python/ ''' return 1 } pipeline { agent none - environment{ WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' } - stages { stage('pre_build'){ agent{label 'master'} @@ -135,19 +132,22 @@ pipeline { rm -rf ${WORKSPACE}.tes cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes - + git fetch ''' script { if (env.CHANGE_TARGET == 'master') { sh ''' git checkout master - git pull origin master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + git checkout 2.0 + ''' + } + else{ sh ''' git checkout develop - git pull origin develop ''' } } @@ -155,32 +155,34 @@ pipeline { git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' - - script{ - env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + + script{ + skipbuild='2' + skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) + println skipbuild } - println env.skipstage sh''' rm -rf ${WORKSPACE}.tes ''' } } - stage('Parallel test stage') { //only build pr when { + allOf{ changeRequest() - expression { - env.skipstage != 0 + expression{ + return skipbuild.trim() == '2' } + } } parallel { stage('python_1_s1') { - agent{label 'p1'} + agent{label " slave1 || slave11 "} steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -191,11 +193,11 @@ pipeline { } } stage('python_2_s5') { - agent{label 'p2'} + agent{label " slave5 || slave15 "} steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -205,9 +207,9 @@ pipeline { } } stage('python_3_s6') { - agent{label 'p3'} + agent{label " slave6 || slave16 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -218,10 +220,30 @@ pipeline { } } stage('test_b1_s2') { - agent{label 'b1'} + agent{label " slave2 || slave12 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() + sh ''' + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + nohup taosd >/dev/null & + sleep 10 + ''' + sh ''' + cd ${WKC}/tests/examples/nodejs + npm install td2.0-connector > /dev/null 2>&1 + node nodejsChecker.js host=localhost + ''' + sh ''' + cd ${WKC}/tests/examples/C#/taosdemo + mcs -out:taosdemo *.cs > /dev/null 2>&1 + echo '' |./taosdemo + ''' + sh ''' + cd ${WKC}/tests/gotest + bash batchtest.sh + ''' sh ''' cd ${WKC}/tests ./test-all.sh b1fq @@ -229,9 +251,8 @@ pipeline { } } } - stage('test_crash_gen_s3') { - agent{label "b2"} + agent{label " slave3 || slave13 "} steps { pre_test() @@ -257,20 +278,18 @@ pipeline { ./handle_taosd_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests ./test-all.sh b2fq date ''' - } - + } } } - stage('test_valgrind_s4') { - agent{label "b3"} + agent{label " slave4 || slave14 "} steps { pre_test() @@ -281,7 +300,7 @@ pipeline { ./handle_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -296,9 +315,9 @@ pipeline { } } stage('test_b4_s7') { - agent{label 'b4'} + agent{label " slave7 || slave17 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -315,9 +334,9 @@ pipeline { } } stage('test_b5_s8') { - agent{label 'b5'} + agent{label " slave8 || slave18 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -328,9 +347,9 @@ pipeline { } } stage('test_b6_s9') { - agent{label 'b6'} + agent{label " slave9 || slave19 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -341,9 +360,9 @@ pipeline { } } stage('test_b7_s10') { - agent{label 'b7'} + agent{label " slave10 || slave20 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -433,6 +452,5 @@ pipeline { from: "support@taosdata.com" ) } - } - -} + } +} \ No newline at end of file diff --git a/cmake/version.inc b/cmake/version.inc index ffceecf492..148c33106a 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.6.0") + SET(TD_VER_NUMBER "2.1.7.1") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c index 40b6e6462d..1ec0dc7354 100644 --- a/deps/MsvcLibX/src/iconv.c +++ b/deps/MsvcLibX/src/iconv.c @@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) { int nBytes; char *pBuf; + char *pBuf1; nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */ pBuf = (char *)malloc(nBytes); if (!pBuf) { @@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault free(pBuf); return NULL; } - pBuf = realloc(pBuf, nBytes+1); - return pBuf; + pBuf1 = realloc(pBuf, nBytes+1); + if(pBuf1 == NULL && pBuf != NULL) free(pBuf); + return pBuf1; } int CountCharacters(const char *string, UINT cp) { diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index f366b081ad..85f4c83f24 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */ int nBackslash = 0; char **ppszArg; + char **ppszArg1; int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */ ppszArg = (char **)malloc((argc+1)*sizeof(char *)); @@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */ iArg = TRUE; ppszArg[argc++] = pszCopy+j; - ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + if(ppszArg1 == NULL && ppszArg != NULL) + free(ppszArg); + ppszArg = ppszArg1; if (!ppszArg) return -1; pszCopy[j] = c0 = '\0'; } @@ -212,7 +216,7 @@ int _initU(void) { fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n"); _acmdln[0] = '\0'; } - realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ + //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ /* Should not fail since we make it smaller */ /* Record the console code page, to allow converting the output accordingly */ diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 5fbcf773a2..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,6 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1 = NULL; int iErr; const char *pc; @@ -242,8 +243,11 @@ realpath_failed: return NULL; } - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); - return pOutbuf; + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } + return pOutbuf1; } #endif @@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; @@ -590,10 +595,13 @@ realpathU_failed: } DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf)); - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } free(pPath1); free(pPath2); - return pOutbuf; + return pOutbuf1; } #endif /* defined(_WIN32) */ diff --git a/deps/TSZ b/deps/TSZ index 0ca5b15a8e..ceda5bf9fc 160000 --- a/deps/TSZ +++ b/deps/TSZ @@ -1 +1 @@ -Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c +Subproject commit ceda5bf9fcd7836509ac97dcc0056b3f1dd48cc5 diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index bbe6eae419..3ae4e9941e 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -144,6 +144,9 @@ keepColumnName 1 # max length of an SQL # maxSQLLength 65480 +# max length of WildCards +# maxWildCardsLength 100 + # the maximum number of records allowed for super table time sorting # maxNumOfOrderedRes 100000 diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 1064f0b0e5..e9266ec80d 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -35,7 +35,7 @@ fi if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh" else bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index c04fa3298b..859d40cf69 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.6.0' +version: '2.1.7.1' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.6.0 + - usr/lib/libtaos.so.2.1.7.1 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index 7d94df1c23..04a14357c9 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -116,8 +116,17 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { - tsBnDnodes.maxSize = dnodesNum * 2; - tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + int32_t maxSize = dnodesNum * 2; + SDnodeObj** list1 = NULL; + int32_t retry = 0; + + while(list1 == NULL && retry++ < 3) { + list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *)); + } + if(list1) { + tsBnDnodes.list = list1; + tsBnDnodes.maxSize = maxSize; + } } } diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f0349c2b3d..a012ca5a7f 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -50,6 +50,12 @@ void tscUnlockByThread(int64_t *lockedBy); int tsInsertInitialCheck(SSqlObj *pSql); +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); + +void tscFreeRetrieveSup(SSqlObj *pSql); + + + #ifdef __cplusplus } #endif diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b3674a7bf5..e11748efbe 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -29,15 +29,16 @@ extern "C" { #include "tsched.h" #include "tsclient.h" -#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE)) + #define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE)) - -#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\ - (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo))) -#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \ + (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo))) + +#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE)) #pragma pack(push,1) @@ -142,6 +143,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo); +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool hasTagValOutput(SQueryInfo* pQueryInfo); bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 904f5d4503..4ead7d4180 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -84,9 +84,14 @@ typedef struct SParamInfo { } SParamInfo; typedef struct SBoundColumn { - bool hasVal; // denote if current column has bound or not - int32_t offset; // all column offset value + int32_t offset; // all column offset value + int32_t toffset; // first part offset for SDataRow TODO: get offset from STSchema on future + uint8_t valStat; // denote if current column bound or not(0 means has val, 1 means no val) } SBoundColumn; +typedef enum { + VAL_STAT_HAS = 0x0, // 0 means has val + VAL_STAT_NONE = 0x01, // 1 means no val +} EValStat; typedef struct { uint16_t schemaColIdx; @@ -99,32 +104,106 @@ typedef enum _COL_ORDER_STATUS { ORDER_STATUS_ORDERED = 1, ORDER_STATUS_DISORDERED = 2, } EOrderStatus; - typedef struct SParsedDataColInfo { int16_t numOfCols; int16_t numOfBound; - int32_t * boundedColumns; // bounded column idx according to schema + uint16_t flen; // TODO: get from STSchema + uint16_t allNullLen; // TODO: get from STSchema + uint16_t extendedVarLen; + int32_t * boundedColumns; // bound column idx according to schema SBoundColumn * cols; SBoundIdxInfo *colIdxInfo; - int8_t orderStatus; // bounded columns: + int8_t orderStatus; // bound columns } SParsedDataColInfo; -#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED) +#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED) typedef struct { - SSchema * pSchema; - int16_t sversion; - int32_t flen; - uint16_t nCols; - void * buf; - void * pDataBlock; - SSubmitBlk *pSubmitBlk; + int32_t dataLen; // len of SDataRow + int32_t kvLen; // len of SKVRow +} SMemRowInfo; +typedef struct { + uint8_t memRowType; + uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need + TDRowTLenT dataRowInitLen; + TDRowTLenT kvRowInitLen; + SMemRowInfo *rowInfo; } SMemRowBuilder; -typedef struct { - TDRowLenT allNullLen; -} SMemRowHelper; +typedef enum { + ROW_COMPARE_UNKNOWN = 0, + ROW_COMPARE_NEED = 1, + ROW_COMPARE_NO_NEED = 2, +} ERowCompareStat; +int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); + +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, + int32_t allNullLen); +void destroyMemRowBuilder(SMemRowBuilder *pBuilder); + +/** + * @brief + * + * @param memRowType + * @param spd + * @param idx the absolute bound index of columns + * @return FORCE_INLINE + */ +static FORCE_INLINE void tscGetMemRowAppendInfo(SSchema *pSchema, uint8_t memRowType, SParsedDataColInfo *spd, + int32_t idx, int32_t *toffset, int16_t *colId) { + int32_t schemaIdx = 0; + if (IS_DATA_COL_ORDERED(spd)) { + schemaIdx = spd->boundedColumns[idx]; + if (isDataRowT(memRowType)) { + *toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart + } else { + *toffset = idx * sizeof(SColIdx); // the offset of SColIdx + } + } else { + ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx); + schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx; + if (isDataRowT(memRowType)) { + *toffset = (spd->cols + schemaIdx)->toffset; + } else { + *toffset = ((spd->colIdxInfo + idx)->finalIdx) * sizeof(SColIdx); + } + } + *colId = pSchema[schemaIdx].colId; +} + +/** + * @brief Applicable to consume by multi-columns + * + * @param row + * @param value + * @param isCopyVarData In some scenario, the varVal is copied to row directly before calling tdAppend***ColVal() + * @param colId + * @param colType + * @param idx index in SSchema + * @param pBuilder + * @param spd + * @return FORCE_INLINE + */ +static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder, + int32_t rowNum) { + tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); + if (pBuilder->compareStat == ROW_COMPARE_NEED) { + SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum; + tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen); + } +} + +// Applicable to consume by one row +static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen, + uint8_t compareStat) { + tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); + if (compareStat == ROW_COMPARE_NEED) { + tdGetColAppendDeltaLen(value, colType, dataLen, kvLen); + } +} typedef struct STableDataBlocks { SName tableName; int8_t tsSource; // where does the UNIX timestamp come from, server or client @@ -146,7 +225,7 @@ typedef struct STableDataBlocks { uint32_t numOfAllocedParams; uint32_t numOfParams; SParamInfo * params; - SMemRowHelper rowHelper; + SMemRowBuilder rowBuilder; } STableDataBlocks; typedef struct { @@ -435,8 +514,398 @@ int16_t getNewResColId(SSqlCmd* pCmd); int32_t schemaIdxCompar(const void *lhs, const void *rhs); int32_t boundIdxCompar(const void *lhs, const void *rhs); -int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen); -int32_t getExtendedRowSize(STableComInfo *tinfo); +static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) { + ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize); + return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; +} + +static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) { + if (isDataRow(row)) { + if (kvLen < (dataLen * KVRatioConvert)) { + memRowSetConvert(row); + } + } else if (kvLen > dataLen) { + memRowSetConvert(row); + } +} + +static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) { + memRowSetType(row, memRowType); + if (isDataRowT(memRowType)) { + dataRowSetVersion(memRowDataBody(row), pBlock->pTableMeta->sversion); + dataRowSetLen(memRowDataBody(row), (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBlock->boundColumnInfo.flen)); + } else { + ASSERT(nBoundCols > 0); + memRowSetKvVersion(row, pBlock->pTableMeta->sversion); + kvRowSetNCols(memRowKvBody(row), nBoundCols); + kvRowSetLen(memRowKvBody(row), (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); + } +} +/** + * TODO: Move to tdataformat.h and refactor when STSchema available. + * - fetch flen and toffset from STSChema and remove param spd + */ +static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, + SParsedDataColInfo *spd) { + ASSERT(isKvRow(src)); + SKVRow kvRow = memRowKvBody(src); + SDataRow dataRow = memRowDataBody(dest); + + memRowSetType(dest, SMEM_ROW_DATA); + dataRowSetVersion(dataRow, memRowKvVersion(src)); + dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + spd->flen)); + + int32_t kvIdx = 0; + for (int i = 0; i < nCols; ++i) { + SSchema *schema = pSchema + i; + void * val = tdGetKVRowValOfColEx(kvRow, schema->colId, &kvIdx); + tdAppendDataColVal(dataRow, val != NULL ? val : getNullValue(schema->type), true, schema->type, + (spd->cols + i)->toffset); + } +} + +// TODO: Move to tdataformat.h and refactor when STSchema available. +static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, int nBoundCols, + SParsedDataColInfo *spd) { + ASSERT(isDataRow(src)); + + SDataRow dataRow = memRowDataBody(src); + SKVRow kvRow = memRowKvBody(dest); + + memRowSetType(dest, SMEM_ROW_KV); + memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); + kvRowSetNCols(kvRow, nBoundCols); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); + + int32_t toffset = 0, kvOffset = 0; + for (int i = 0; i < nCols; ++i) { + if ((spd->cols + i)->valStat == VAL_STAT_HAS) { + SSchema *schema = pSchema + i; + toffset = (spd->cols + i)->toffset; + void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE); + tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset); + kvOffset += sizeof(SColIdx); + } + } +} + +// TODO: Move to tdataformat.h and refactor when STSchema available. +static FORCE_INLINE void convertSMemRow(SMemRow dest, SMemRow src, STableDataBlocks *pBlock) { + STableMeta * pTableMeta = pBlock->pTableMeta; + STableComInfo tinfo = tscGetTableInfo(pTableMeta); + SSchema * pSchema = tscGetTableSchema(pTableMeta); + SParsedDataColInfo *spd = &pBlock->boundColumnInfo; + + ASSERT(dest != src); + + if (isDataRow(src)) { + // TODO: Can we use pBlock -> numOfParam directly? + ASSERT(spd->numOfBound > 0); + convertToSKVRow(dest, src, pSchema, tinfo.numOfColumns, spd->numOfBound, spd); + } else { + convertToSDataRow(dest, src, pSchema, tinfo.numOfColumns, spd); + } +} + +static bool isNullStr(SStrToken *pToken) { + return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) && + (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); +} + +static FORCE_INLINE int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { + errno = 0; + *value = strtold(pToken->z, endPtr); + + // not a valid integer number, return error + if ((*endPtr - pToken->z) != pToken->n) { + return TK_ILLEGAL; + } + + return pToken->type; +} + +static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; +static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; + +static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str, + bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId, + int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) { + int64_t iv; + int32_t ret; + char * endptr = NULL; + + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); + } + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_INTEGER) { + iv = strtoll(pToken->z, NULL, 10); + tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, + dataLen, kvLen, compareStat); + } else if (pToken->type == TK_FLOAT) { + double dv = strtod(pToken->z, NULL); + tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, + dataLen, kvLen, compareStat); + } else { + return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); + } + } + break; + } + + case TSDB_DATA_TYPE_TINYINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return tscInvalidOperationMsg(msg, "data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_UTINYINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); + } + + int16_t tmpVal = (int16_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_USMALLINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); + } + + uint16_t tmpVal = (uint16_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_INT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); + } + + int32_t tmpVal = (int32_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_UINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); + } + + uint32_t tmpVal = (uint32_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); + } + + tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_UBIGINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { + return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); + } + + uint64_t tmpVal = (uint64_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + float tmpVal = (float)dv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (pToken->type == TK_NULL) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { // too long values will return invalid sql, not be truncated automatically + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor + return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); + } + // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); + char *rowEnd = memRowEnd(row); + STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n); + tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_NCHAR: + if (pToken->type == TK_NULL) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' + int32_t output = 0; + char * rowEnd = memRowEnd(row); + if (!taosMbsToUcs4(pToken->z, pToken->n, (char *)varDataVal(rowEnd), pSchema->bytes - VARSTR_HEADER_SIZE, + &output)) { + char buf[512] = {0}; + snprintf(buf, tListLen(buf), "%s", strerror(errno)); + return tscInvalidOperationMsg(msg, buf, pToken->z); + } + varDataSetLen(rowEnd, output); + tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (pToken->type == TK_NULL) { + if (primaryKey) { + // When building SKVRow primaryKey, we should not skip even with NULL value. + int64_t tmpVal = 0; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } else { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } + } else { + int64_t tmpVal; + if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); + } + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + } + } + + return TSDB_CODE_SUCCESS; +} #ifdef __cplusplus } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index ecc29a8d89..89e3832007 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -38,43 +38,60 @@ enum { TSDB_USE_CLI_TS = 1, }; -static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; -static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; - static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema, char *str, char **end); - -int32_t getExtendedRowSize(STableComInfo *tinfo) { - return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns; -} -int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) { - pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta - if (pHelper->allNullLen == 0) { - for (uint16_t i = 0; i < nCols; ++i) { - uint8_t type = pSSchema[i].type; - int32_t typeLen = TYPE_BYTES[type]; - pHelper->allNullLen += typeLen; - if (TSDB_DATA_TYPE_BINARY == type) { - pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); - } else if (TSDB_DATA_TYPE_NCHAR == type) { - int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE; - pHelper->allNullLen += len; - } +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, + int32_t allNullLen) { + ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols)); + if (nRows > 0) { + // already init(bind multiple rows by single column) + if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) { + return TSDB_CODE_SUCCESS; } } - return 0; -} -static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { - errno = 0; - *value = strtold(pToken->z, endPtr); - - // not a valid integer number, return error - if ((*endPtr - pToken->z) != pToken->n) { - return TK_ILLEGAL; + + if (nBoundCols == 0) { // file input + pBuilder->memRowType = SMEM_ROW_DATA; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } else { + float boundRatio = ((float)nBoundCols / (float)nCols); + + if (boundRatio < KVRatioKV) { + pBuilder->memRowType = SMEM_ROW_KV; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } else if (boundRatio > KVRatioData) { + pBuilder->memRowType = SMEM_ROW_DATA; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } + pBuilder->compareStat = ROW_COMPARE_NEED; + + if (boundRatio < KVRatioPredict) { + pBuilder->memRowType = SMEM_ROW_KV; + } else { + pBuilder->memRowType = SMEM_ROW_DATA; + } } - return pToken->type; + pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; + pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); + + if (nRows > 0) { + pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo)); + if (pBuilder->rowInfo == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int i = 0; i < nRows; ++i) { + (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; + (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; + } + } + + return TSDB_CODE_SUCCESS; } int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { @@ -146,10 +163,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 return TSDB_CODE_SUCCESS; } -static bool isNullStr(SStrToken* pToken) { - return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) && - (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); -} int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec) { int64_t iv; @@ -400,342 +413,6 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha return TSDB_CODE_SUCCESS; } -static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId, - uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) { - payloadColSetId(payload, columnId); - payloadColSetType(payload, columnType); - memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen); - return valueLen; -} - -static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart, - char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec, - TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen, - TDRowLenT *kvRowColLen) { - int64_t iv; - int32_t ret; - char * endptr = NULL; - - if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { - return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); - } - - switch (pSchema->type) { - case TSDB_DATA_TYPE_BOOL: { // bool - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - } else { - if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { - if (strncmp(pToken->z, "true", pToken->n) == 0) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE, - TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else if (strncmp(pToken->z, "false", pToken->n) == 0) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE, - TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else { - return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); - } - } else if (pToken->type == TK_INTEGER) { - iv = strtoll(pToken->z, NULL, 10); - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else if (pToken->type == TK_FLOAT) { - double dv = strtod(pToken->z, NULL); - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else { - return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); - } - } - break; - } - - case TSDB_DATA_TYPE_TINYINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); - } else if (!IS_VALID_TINYINT(iv)) { - return tscInvalidOperationMsg(msg, "data overflow", pToken->z); - } - - uint8_t tmpVal = (uint8_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]); - } - - break; - - case TSDB_DATA_TYPE_UTINYINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); - } else if (!IS_VALID_UTINYINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); - } - - uint8_t tmpVal = (uint8_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]); - } - - break; - - case TSDB_DATA_TYPE_SMALLINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); - } else if (!IS_VALID_SMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); - } - - int16_t tmpVal = (int16_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]); - } - - break; - - case TSDB_DATA_TYPE_USMALLINT: - if (isNullStr(pToken)) { - *sizeAppend = - tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); - } else if (!IS_VALID_USMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); - } - - uint16_t tmpVal = (uint16_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]); - } - - break; - - case TSDB_DATA_TYPE_INT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); - } else if (!IS_VALID_INT(iv)) { - return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); - } - - int32_t tmpVal = (int32_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]); - } - - break; - - case TSDB_DATA_TYPE_UINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); - } else if (!IS_VALID_UINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); - } - - uint32_t tmpVal = (uint32_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]); - } - - break; - - case TSDB_DATA_TYPE_BIGINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); - } else if (!IS_VALID_BIGINT(iv)) { - return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv, - TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]); - } - break; - - case TSDB_DATA_TYPE_UBIGINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); - } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { - return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); - } - - uint64_t tmpVal = (uint64_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]); - } - break; - - case TSDB_DATA_TYPE_FLOAT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || - isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - float tmpVal = (float)dv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]); - } - break; - - case TSDB_DATA_TYPE_DOUBLE: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv, - TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]); - } - break; - - case TSDB_DATA_TYPE_BINARY: - // binary data cannot be null-terminated char string, otherwise the last char of the string is lost - if (pToken->type == TK_NULL) { - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES); - } else { // too long values will return invalid sql, not be truncated automatically - if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor - return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); - } - // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); - - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n); - memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n); - *dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n); - } - - break; - - case TSDB_DATA_TYPE_NCHAR: - if (pToken->type == TK_NULL) { - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); - } else { - // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' - int32_t output = 0; - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)), - pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { - char buf[512] = {0}; - snprintf(buf, tListLen(buf), "%s", strerror(errno)); - return tscInvalidOperationMsg(msg, buf, pToken->z); - } - - varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output); - - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output); - *dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t)); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output); - } - break; - - case TSDB_DATA_TYPE_TIMESTAMP: { - if (pToken->type == TK_NULL) { - if (primaryKey) { - // When building SKVRow primaryKey, we should not skip even with NULL value. - int64_t tmpVal = 0; - *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); - } else { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_TIMESTAMP), - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - } - } else { - int64_t tmpVal; - if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId, - pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); - } - - break; - } - } - - return TSDB_CODE_SUCCESS; -} - /* * The server time/client time should not be mixed up in one sql string * Do not employ sort operation is not involved if server time is used. @@ -777,31 +454,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i int32_t index = 0; SStrToken sToken = {0}; - SMemRowHelper *pHelper = &pDataBlocks->rowHelper; - char * payload = pDataBlocks->pData + pDataBlocks->size; + char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; - SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); + STableMeta * pTableMeta = pDataBlocks->pTableMeta; + SSchema * schema = tscGetTableSchema(pTableMeta); + SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; + int32_t dataLen = pBuilder->dataRowInitLen; + int32_t kvLen = pBuilder->kvRowInitLen; + bool isParseBindParam = false; - TDRowTLenT dataRowLen = pHelper->allNullLen; - TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE; - TDRowTLenT payloadValOffset = 0; - TDRowLenT colValOffset = 0; - ASSERT(dataRowLen > 0); - - payloadSetNCols(payload, spd->numOfBound); - payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols - // payloadSetTLen(payload, payloadValOffset); - - char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple - char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey + initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound); // 1. set the parsed value from sql string for (int i = 0; i < spd->numOfBound; ++i) { // the start position in data block buffer of current value in sql int32_t colIndex = spd->boundedColumns[i]; - char *start = payload + spd->cols[colIndex].offset; + char *start = row + spd->cols[colIndex].offset; SSchema *pSchema = &schema[colIndex]; // get colId here @@ -810,6 +480,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i *str += index; if (sToken.type == TK_QUESTION) { + if (!isParseBindParam) { + isParseBindParam = true; + } if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); } @@ -860,54 +533,45 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i sToken.n -= 2 + cnt; } - bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns. - TDRowLenT kvRowColLen = 0; - TDRowLenT colValAppended = 0; + bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); + int32_t toffset = -1; + int16_t colId = -1; + tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId); - if (!IS_DATA_COL_ORDERED(spd->orderStatus)) { - ASSERT(spd->colIdxInfo != NULL); - if(!isPrimaryKey) { - kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN); - } else { - ASSERT(spd->colIdxInfo[i].finalIdx == 0); - } - } - // the primary key locates in 1st column - int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str, - isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended, - &dataRowDeltaColLen, &kvRowColLen); + int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, + colId, &dataLen, &kvLen, pBuilder->compareStat); if (ret != TSDB_CODE_SUCCESS) { return ret; } if (isPrimaryKey) { - if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) { + TSKEY tsKey = memRowKey(row); + if (tsCheckTimestamp(pDataBlocks, (const char *)&tsKey) != TSDB_CODE_SUCCESS) { tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); return TSDB_CODE_TSC_INVALID_TIME_STAMP; } - payloadColSetOffset(kvPrimaryKeyStart, colValOffset); - } else { - payloadColSetOffset(kvStart, colValOffset); - if (IS_DATA_COL_ORDERED(spd->orderStatus)) { - kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column + } + } + + if (!isParseBindParam) { + // 2. check and set convert flag + if (pBuilder->compareStat == ROW_COMPARE_NEED) { + checkAndConvertMemRow(row, dataLen, kvLen); + } + + // 3. set the null value for the columns that do not assign values + if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) { + SDataRow dataRow = memRowDataBody(row); + for (int32_t i = 0; i < spd->numOfCols; ++i) { + if (spd->cols[i].valStat == VAL_STAT_NONE) { + tdAppendDataColVal(dataRow, getNullValue(schema[i].type), true, schema[i].type, spd->cols[i].toffset); + } } } - - colValOffset += colValAppended; - kvRowLen += kvRowColLen; - dataRowLen += dataRowDeltaColLen; } - if (kvRowLen < dataRowLen) { - payloadSetType(payload, SMEM_ROW_KV); - } else { - payloadSetType(payload, SMEM_ROW_DATA); - } + *len = getExtendedRowSize(pDataBlocks); - *len = (int32_t)(payloadValOffset + colValOffset); - payloadSetTLen(payload, *len); - return TSDB_CODE_SUCCESS; } @@ -957,11 +621,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn int32_t precision = tinfo.precision; - int32_t extendedRowSize = getExtendedRowSize(&tinfo); - - initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta), - tscGetNumOfColumns(pDataBlock->pTableMeta), 0); + int32_t extendedRowSize = getExtendedRowSize(pDataBlock); + if (TSDB_CODE_SUCCESS != + (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound, + pDataBlock->boundColumnInfo.allNullLen))) { + return code; + } while (1) { index = 0; sToken = tStrGetToken(*str, &index, false); @@ -1010,19 +676,37 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; - pColInfo->orderStatus = ORDER_STATUS_ORDERED; + pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); pColInfo->colIdxInfo = NULL; + pColInfo->flen = 0; + pColInfo->allNullLen = 0; + int32_t nVar = 0; for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { + uint8_t type = pSchema[i].type; if (i > 0) { pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; + pColInfo->cols[i].toffset = pColInfo->flen; + } + pColInfo->flen += TYPE_BYTES[type]; + switch (type) { + case TSDB_DATA_TYPE_BINARY: + pColInfo->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); + ++nVar; + break; + case TSDB_DATA_TYPE_NCHAR: + pColInfo->allNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + ++nVar; + break; + default: + break; } - - pColInfo->cols[i].hasVal = true; pColInfo->boundedColumns[i] = i; } + pColInfo->allNullLen += pColInfo->flen; + pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { @@ -1122,35 +806,29 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk if (dataBuf->tsSource == TSDB_USE_SERVER_TS) { assert(dataBuf->ordered); } - // allocate memory + // allocate memory size_t nAlloc = nRows * sizeof(SBlockKeyTuple); if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); if (tmp == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return TSDB_CODE_TSC_OUT_OF_MEMORY; } pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; } memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); + int32_t extendedRowSize = getExtendedRowSize(dataBuf); SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; char * pBlockData = pBlocks->data; - TDRowTLenT totolPayloadTLen = 0; - TDRowTLenT payloadTLen = 0; int n = 0; while (n < nRows) { - pBlkKeyTuple->skey = payloadTSKey(pBlockData); + pBlkKeyTuple->skey = memRowKey(pBlockData); pBlkKeyTuple->payloadAddr = pBlockData; - payloadTLen = payloadTLen(pBlockData); -#if 0 - ASSERT(payloadNCols(pBlockData) <= 4096); - ASSERT(payloadTLen(pBlockData) < 65536); -#endif - totolPayloadTLen += payloadTLen; + // next loop - pBlockData += payloadTLen; + pBlockData += extendedRowSize; ++pBlkKeyTuple; ++n; } @@ -1167,7 +845,6 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk TSKEY tj = (pBlkKeyTuple + j)->skey; if (ti == tj) { - totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j); ++j; continue; } @@ -1183,17 +860,15 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk pBlocks->numOfRows = i + 1; } - dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen; + dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize; dataBuf->prevTS = INT64_MIN; return 0; } -static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { - STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta); - +static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { int32_t maxNumOfRows; - int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows); + int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(dataBuf), &maxNumOfRows); if (TSDB_CODE_SUCCESS != code) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1531,7 +1206,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat pColInfo->numOfBound = 0; memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols); for (int32_t i = 0; i < nCols; ++i) { - pColInfo->cols[i].hasVal = false; + pColInfo->cols[i].valStat = VAL_STAT_NONE; } int32_t code = TSDB_CODE_SUCCESS; @@ -1570,12 +1245,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nScanned = 0, t = lastColIdx + 1; while (t < nCols) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { - if (pColInfo->cols[t].hasVal == true) { + if (pColInfo->cols[t].valStat == VAL_STAT_HAS) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); goto _clean; } - pColInfo->cols[t].hasVal = true; + pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; findColumnIndex = true; @@ -1593,12 +1268,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nRemain = nCols - nScanned; while (t < nRemain) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { - if (pColInfo->cols[t].hasVal == true) { + if (pColInfo->cols[t].valStat == VAL_STAT_HAS) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); goto _clean; } - pColInfo->cols[t].hasVal = true; + pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; findColumnIndex = true; @@ -1833,7 +1508,7 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - if (dataBuf->boundColumnInfo.cols[0].hasVal == false) { + if (dataBuf->boundColumnInfo.cols[0].valStat == VAL_STAT_NONE) { code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL); goto _clean; } @@ -2044,15 +1719,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows); + tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows); tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW); if (tokenBuf == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta), - tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0); + if (TSDB_CODE_SUCCESS != + (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams, + pTableDataBlock->boundColumnInfo.allNullLen))) { + goto _error; + } while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 2c2a299549..06a9505086 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) { SSchema *schema = (SSchema*)pBlock->pTableMeta->schema; for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null + if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null for (int32_t n = 0; n < rowNum; ++n) { char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset; @@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT; pCmd->insertParam.objectId = pSql->self; - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); - + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7adc0812ae..235ba15c8b 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1940,20 +1940,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) { pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } -bool isValidDistinctSql(SQueryInfo* pQueryInfo) { - if (pQueryInfo == NULL) { - return false; - } - if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY - && (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) { - return false; - } - if (tscNumOfExprs(pQueryInfo) == 1){ - return true; - } - return false; -} - static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) { size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < numOfExprs; ++i) { @@ -2043,8 +2029,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg1 = "too many items in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "only support distinct one column or tag"; + const char* msg4 = "not support distinct mixed with proj/agg func"; const char* msg5 = "invalid function name"; + const char* msg6 = "not support distinct mixed with join"; + const char* msg7 = "not support distinct mixed with groupby"; + const char* msg8 = "not support distinct in nest query"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2055,19 +2044,31 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } + bool hasDistinct = false; + bool hasAgg = false; size_t numOfExpr = taosArrayGetSize(pSelNodeList); + int32_t distIdx = -1; for (int32_t i = 0; i < numOfExpr; ++i) { int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo); tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i); if (hasDistinct == false) { hasDistinct = (pItem->distinct == true); + distIdx = hasDistinct ? i : -1; } int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { + hasAgg = true; + if (hasDistinct) break; + pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); + + if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + SUdfInfo* pUdfInfo = NULL; if (pItem->pNode->functionId < 0) { pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); @@ -2102,10 +2103,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS } } + //TODO(dengyihao), refactor as function + //handle distinct func mixed with other func if (hasDistinct == true) { - if (!isValidDistinctSql(pQueryInfo) ) { + if (distIdx != 0 || hasAgg) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + if (joinQuery) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } + if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + if (pQueryInfo->pDownstream != NULL) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + pQueryInfo->distinct = true; } @@ -2630,7 +2643,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); - } + } if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); @@ -2664,8 +2677,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col assert(ids.num == 1); tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); + return TSDB_CODE_SUCCESS; } @@ -3047,7 +3060,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s); } } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); return TSDB_CODE_SUCCESS; } @@ -3218,7 +3230,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pCmd->command = TSDB_SQL_SHOW; const char* msg1 = "invalid name"; - const char* msg2 = "pattern filter string too long"; + const char* msg2 = "wildcard string should be less than %d characters"; const char* msg3 = "database name too long"; const char* msg4 = "invalid ip address"; const char* msg5 = "database name is empty"; @@ -3262,8 +3274,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - if (!tscValidateTableNameLength(pCmd->payloadLen)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + if (pPattern->n > tsMaxWildCardsLen){ + char tmp[64] = {0}; + sprintf(tmp, msg2, tsMaxWildCardsLen); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), tmp); } } } else if (showType == TSDB_MGMT_TABLE_VNODES) { @@ -4253,11 +4267,15 @@ static bool isValidExpr(tSqlExpr* pLeft, tSqlExpr* pRight, int32_t optr) { if (pRight == NULL) { return true; } - + if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && pRight->tokenId >= TK_BOOL && pRight->tokenId <= TK_BINARY) { return false; } + if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && (optr == TK_NOTNULL || optr == TK_ISNULL)) { + return false; + } + return true; } @@ -4390,15 +4408,17 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) { // check for like expression static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { - const char* msg1 = "wildcard string should be less than 20 characters"; + const char* msg1 = "wildcard string should be less than %d characters"; const char* msg2 = "illegal column name"; tSqlExpr* pLeft = pExpr->pLeft; tSqlExpr* pRight = pExpr->pRight; if (pExpr->tokenId == TK_LIKE) { - if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) { - return invalidOperationMsg(msgBuf, msg1); + if (pRight->value.nLen > tsMaxWildCardsLen) { + char tmp[64] = {0}; + sprintf(tmp, msg1, tsMaxWildCardsLen); + return invalidOperationMsg(msgBuf, tmp); } SSchema* pSchema = tscGetTableSchema(pTableMeta); @@ -4504,7 +4524,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql */ tSqlExprDestroy(*pExpr); } else { - ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pCmd->payload); } *pExpr = NULL; // remove this expression @@ -4542,7 +4562,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; } else { // do nothing @@ -4560,7 +4580,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; // remove it from expr tree } @@ -5362,6 +5382,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo const char* msg3 = "top/bottom not support fill"; const char* msg4 = "illegal value or data overflow"; const char* msg5 = "fill only available for interval query"; + const char* msg6 = "not supported function now"; if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); @@ -5400,6 +5421,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_PREV; + if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } } else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_NEXT; } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { @@ -5478,14 +5502,19 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { pQueryInfo->order.order = TSDB_ORDER_ASC; if (isTopBottomQuery(pQueryInfo)) { pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; - } else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column - pQueryInfo->order.orderColId = INT32_MIN; + } else { // in case of select tbname from super_table, the default order column can not be the primary ts column + pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro } /* for super table query, set default ascending order for group output */ if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } + + if (pQueryInfo->distinct) { + pQueryInfo->order.order = TSDB_ORDER_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { @@ -5493,21 +5522,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg1 = "invalid column name"; const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; + const char* msg4 = "orderby column must projected in subquery"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - if (pQueryInfo->distinct == true) { - pQueryInfo->order.order = TSDB_ORDER_ASC; - pQueryInfo->order.orderColId = 0; - return TSDB_CODE_SUCCESS; - } - if (pSqlNode->pSortOrder == NULL) { + if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; } - SArray* pSortorder = pSqlNode->pSortOrder; + char* pMsgBuf = tscGetErrorMsgPayload(pCmd); + SArray* pSortOrder = pSqlNode->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -5515,19 +5540,19 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq * * for super table query, the order option must be less than 3. */ - size_t size = taosArrayGetSize(pSortorder); - if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + size_t size = taosArrayGetSize(pSortOrder); + if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { if (size > 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0); + return invalidOperationMsg(pMsgBuf, msg0); } } else { if (size > 2) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } } // handle the first part of order by - tVariant* pVar = taosArrayGet(pSortorder, 0); + tVariant* pVar = taosArrayGet(pSortOrder, 0); // e.g., order by 1 asc, return directly with out further check. if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { @@ -5539,7 +5564,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } bool orderByTags = false; @@ -5551,7 +5576,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); if (relTagIndex == pColIndex->colIndex) { @@ -5572,13 +5597,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq orderByGroupbyCol = true; } } + if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } else { // order by top/bottom result value column is not supported in case of interval query. assert(!(orderByTags && orderByTS && orderByGroupbyCol)); } - size_t s = taosArrayGetSize(pSortorder); + size_t s = taosArrayGetSize(pSortOrder); if (s == 1) { if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5591,13 +5617,15 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pQueryInfo->groupbyExpr.orderType = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5612,12 +5640,21 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // orderby ts query on super table if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + bool found = false; + for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + found = true; + break; + } + } + if (!found && pQueryInfo->pDownstream) { + return invalidOperationMsg(pMsgBuf, msg4); + } addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } - } - - if (s == 2) { + } else { tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5634,22 +5671,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } else { - tVariantListItem* p1 = taosArrayGet(pSortorder, 1); + tVariantListItem* p1 = taosArrayGet(pSortOrder, 1); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } - } else { // meter query - if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); } + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { bool validOrder = false; SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; @@ -5657,13 +5695,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } + if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderType = p1->sortOrder; - } if (isTopBottomQuery(pQueryInfo)) { @@ -5673,19 +5712,22 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } else { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + validOrder = true; } if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5695,6 +5737,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return TSDB_CODE_SUCCESS; } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } else { + // handle the temp table order by clause. You can order by any single column in case of the temp table, created by + // inner subquery. + assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1); + + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); + } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; @@ -5884,6 +5938,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + + if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { + return invalidOperationMsg(pMsg, msg14); + } pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { @@ -8367,7 +8425,7 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { n += 1; } - + info->numOfColumns = n; return meta; } @@ -8390,13 +8448,12 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS pSub->pUdfInfo = pUdfInfo; pSub->udfCopy = true; + pSub->pDownstream = pQueryInfo; int32_t code = validateSqlNode(pSql, p, pSub); if (code != TSDB_CODE_SUCCESS) { return code; } - pSub->pDownstream = pQueryInfo; - // create dummy table meta info STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo)); if (pTableMetaInfo1 == NULL) { @@ -8454,8 +8511,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg8 = "condition missing for join query"; const char* msg9 = "not support 3 level select"; - int32_t code = TSDB_CODE_SUCCESS; - + int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -8750,8 +8806,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo); pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo); pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo); - //pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0); - pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo); pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3e8dfac1da..c0723e210a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -409,7 +409,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) { if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) || - (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) { + (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) { // do nothing in case of super table subquery } else { pSql->retry += 1; @@ -880,16 +880,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); + query.vgId = pTableMeta->vgId; SArray* tableScanOperator = createTableScanPlan(&query); SArray* queryOperator = createExecOperatorPlan(&query); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; - SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 6f3d5c3a63..026c65a595 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -887,7 +887,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; } - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tfree(pSql); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 8c0d642ca6..816347cecd 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2034,17 +2034,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { tscAsyncResultOnError(pSql); } -static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0); for(int32_t i = 0; i < numOfSubs; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSupport); + + tscFreeRetrieveSup(pSub); taos_free_result(pSub); } @@ -2395,8 +2392,12 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1); tscColumnCopy(x, pCol); } else { - SColumn *p = tscColumnClone(pCol); - taosArrayPush(pNewQueryInfo->colList, &p); + SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; + tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); + int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid); + assert(ti >= 0); + SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti); + tscColumnCopy(x, pCol); } } } @@ -2558,7 +2559,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } -static void tscFreeRetrieveSup(SSqlObj *pSql) { +void tscFreeRetrieveSup(SSqlObj *pSql) { SRetrieveSupport *trsupport = pSql->param; void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0); @@ -2716,33 +2717,43 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { - // remove the cached tableMeta and vgroup id list, and then parse the sql again - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); + SSqlObj *userSql = NULL; + if (pParentSql->param) { + userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + } - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if (userSql == NULL) { + userSql = pParentSql; + } - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { + if (userSql != pParentSql) { + tscFreeRetrieveSup(pParentSql); + } - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscFreeSubobj(userSql); + tfree(userSql->pSubs); - code = tsParseSql(pParentSql, true); + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; + + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); + + tscResetSqlCmd(&userSql->cmd, true); + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - executeQuery(pParentSql, pQueryInfo); + pQueryInfo = tscGetQueryInfo(&userSql->cmd); + executeQuery(userSql, pQueryInfo); } else { (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); } @@ -2812,7 +2823,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows); SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - tscClearInterpInfo(pPQueryInfo); code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self); pParentSql->res.code = code; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 5a724af7bf..1af8eaac2e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -369,6 +369,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) { return false; } +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscNumOfExprs(pQueryInfo); + + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TS) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + return i; + } + } + + return -1; +} + bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscNumOfExprs(pQueryInfo); @@ -625,8 +646,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol - pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); - + char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + if(buffer == NULL) + return ; + pRes->buffer[i] = buffer; // string terminated char for binary data memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); @@ -1206,7 +1229,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo); SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols); - pOutput->precision = pSqlObjList[0]->res.precision; SSchema* schema = NULL; @@ -1502,7 +1524,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscFreeSqlResult(pSql); tscResetSqlCmd(pCmd, false); - memset(pCmd->payload, 0, (size_t)pCmd->allocSize); tfree(pCmd->payload); pCmd->allocSize = 0; @@ -1776,101 +1797,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i return TSDB_CODE_SUCCESS; } -static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { - SSchema* pSchema = pBuilder->pSchema; - char* p = (char*)pBuilder->buf; - int toffset = 0; - uint16_t nCols = pBuilder->nCols; - - uint8_t memRowType = payloadType(p); - uint16_t nColsBound = payloadNCols(p); - if (pBuilder->nCols <= 0 || nColsBound <= 0) { - return NULL; - } - char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p)); - SMemRow* memRow = (SMemRow)pBuilder->pDataBlock; - memRowSetType(memRow, memRowType); - - // ----------------- Raw payload structure for row: - /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| - * | |<----------------- flen ------------->|<--- value part --->| - * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | - * +-----------+----------+----------+--------------------------------------|--------------------| - * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | - * +-----------+----------+----------+--------------------------------------+--------------------| - * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. - * 2. dataTLen: total length including the header and body. - */ - - if (memRowType == SMEM_ROW_DATA) { - SDataRow trow = (SDataRow)memRowDataBody(memRow); - dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen)); - dataRowSetVersion(trow, pBuilder->sversion); - - p = (char*)payloadBody(pBuilder->buf); - uint16_t i = 0, j = 0; - while (j < nCols) { - if (i >= nColsBound) { - break; - } - int16_t colId = payloadColId(p); - if (colId == pSchema[j].colId) { - // ASSERT(payloadColType(p) == pSchema[j].type); - tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - p = payloadNextCol(p); - ++i; - ++j; - } else if (colId < pSchema[j].colId) { - p = payloadNextCol(p); - ++i; - } else { - tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - ++j; - } - } - - while (j < nCols) { - tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - ++j; - } - - #if 0 // no need anymore - while (i < nColsBound) { - p = payloadNextCol(p); - ++i; - } - #endif - - } else if (memRowType == SMEM_ROW_KV) { - SKVRow kvRow = (SKVRow)memRowKvBody(memRow); - kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound)); - kvRowSetNCols(kvRow, nColsBound); - memRowSetKvVersion(memRow, pBuilder->sversion); - - p = (char*)payloadBody(pBuilder->buf); - int i = 0; - while (i < nColsBound) { - int16_t colId = payloadColId(p); - uint8_t colType = payloadColType(p); - tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset); - //toffset += sizeof(SColIdx); - p = payloadNextCol(p); - ++i; - } - - } else { - ASSERT(0); - } - int32_t rowTLen = memRowTLen(memRow); - pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row - pBuilder->pSubmitBlk->dataLen += rowTLen; - - return memRow; -} - // Erase the empty space reserved for binary data static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam, SBlockKeyTuple* blkKeyTuple) { @@ -1902,10 +1828,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI int32_t schemaSize = sizeof(STColumn) * numOfCols; pBlock->schemaLen = schemaSize; } else { - for (int32_t j = 0; j < tinfo.numOfColumns; ++j) { - flen += TYPE_BYTES[pSchema[j].type]; + if (IS_RAW_PAYLOAD(insertParam->payloadType)) { + for (int32_t j = 0; j < tinfo.numOfColumns; ++j) { + flen += TYPE_BYTES[pSchema[j].type]; + } } - pBlock->schemaLen = 0; } @@ -1932,18 +1859,19 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI pBlock->dataLen += memRowTLen(memRow); } } else { - SMemRowBuilder rowBuilder; - rowBuilder.pSchema = pSchema; - rowBuilder.sversion = pTableMeta->sversion; - rowBuilder.flen = flen; - rowBuilder.nCols = tinfo.numOfColumns; - rowBuilder.pDataBlock = pDataBlock; - rowBuilder.pSubmitBlk = pBlock; - rowBuilder.buf = p; - for (int32_t i = 0; i < numOfRows; ++i) { - rowBuilder.buf = (blkKeyTuple + i)->payloadAddr; - tdGenMemRowFromBuilder(&rowBuilder); + char* payload = (blkKeyTuple + i)->payloadAddr; + if (isNeedConvertRow(payload)) { + convertSMemRow(pDataBlock, payload, pTableDataBlock); + TDRowTLenT rowTLen = memRowTLen(pDataBlock); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; + } else { + TDRowTLenT rowTLen = memRowTLen(payload); + memcpy(pDataBlock, payload, rowTLen); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; + } } } @@ -1956,9 +1884,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI static int32_t getRowExpandSize(STableMeta* pTableMeta) { int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE; - int32_t columns = tscGetNumOfColumns(pTableMeta); + int32_t columns = tscGetNumOfColumns(pTableMeta); SSchema* pSchema = tscGetTableSchema(pTableMeta); - for(int32_t i = 0; i < columns; i++) { + for (int32_t i = 0; i < columns; i++) { if (IS_VAR_DATA_TYPE((pSchema + i)->type)) { result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY]; } @@ -2004,7 +1932,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; if (pBlocks->numOfRows > 0) { // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); + int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0; STableDataBlocks* dataBuf = NULL; int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, @@ -2017,7 +1945,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl return ret; } - int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { dataBuf->nAllocSize = (uint32_t)(destSize * 1.5); @@ -2061,7 +1990,9 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); } - int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int32_t len = pBlocks->numOfRows * + (isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) + + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); pBlocks->uid = htobe64(pBlocks->uid); @@ -3622,8 +3553,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->numOfTables = 0; pNewQueryInfo->pTableMetaInfo = NULL; pNewQueryInfo->bufLen = pQueryInfo->bufLen; - pNewQueryInfo->buf = malloc(pQueryInfo->bufLen); + + + pNewQueryInfo->distinct = pQueryInfo->distinct; if (pNewQueryInfo->buf == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; @@ -3834,8 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { int32_t index = ps->subqueryIndex; bool ret = subAndCheckDone(pSql, pParentSql, index); - tfree(ps); - pSql->param = NULL; + tscFreeRetrieveSup(pSql); if (!ret) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); @@ -3845,12 +3777,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) { + tscAsyncResultOnError(pParentSql); + return; + } - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + tscFreeSubobj(pParentSql); + tfree(pParentSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; @@ -3858,6 +3791,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + + tscResetSqlCmd(&pParentSql->cmd, true); + code = tsParseSql(pParentSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -3869,7 +3805,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd); + executeQuery(pParentSql, pQueryInfo); return; } @@ -3892,9 +3829,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { } if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + assert(pSql->subState.numOfSub == 0); pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); - + assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); + assert(pSql->subState.states == NULL); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); @@ -3920,6 +3859,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pNew->sqlstr = strdup(pSql->sqlstr); pNew->fp = tscSubqueryCompleteCallback; pNew->maxRetry = pSql->maxRetry; + + pNew->cmd.resColumnId = TSDB_RES_COL_ID; + tsem_init(&pNew->rspSem, 0, 0); SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id @@ -4454,6 +4396,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, STableMeta* p = NULL; size_t sz = 0; STableMeta* pChild = *ppChild; + STableMeta* pChild1; taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz); @@ -4464,7 +4407,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema); int32_t tableMetaSize = sizeof(STableMeta) + totalBytes; if (*tableMetaCapacity < tableMetaSize) { - pChild = realloc(pChild, tableMetaSize); + pChild1 = realloc(pChild, tableMetaSize); + if(pChild1 == NULL) + return -1; + pChild = pChild1; *tableMetaCapacity = (size_t)tableMetaSize; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index fb6bab0cf2..a01c377539 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -186,6 +186,7 @@ typedef void *SDataRow; #define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) #define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535 +#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r)) #define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) #define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE) #define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r))) @@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row); void tdInitDataRow(SDataRow row, STSchema *pSchema); SDataRow tdDataRowDup(SDataRow row); + // offset here not include dataRow header length -static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { +static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type, + int32_t offset) { ASSERT(value != NULL); int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE; if (IS_VAR_DATA_TYPE(type)) { *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); - memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); + if (isCopyVarData) { + memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); + } dataRowLen(row) += varDataTLen(value); } else { if (offset == 0) { @@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t return 0; } + +// offset here not include dataRow header length +static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { + return tdAppendDataColVal(row, value, true, type, offset); +} + // NOTE: offset here including the header size static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) { if (IS_VAR_DATA_TYPE(type)) { @@ -328,11 +339,10 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } int tdAllocMemForCol(SDataCol *pCol, int maxPoints); void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints); -void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); +int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColSetOffset(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); -void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); // Get the data pointer from a column-wised data static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) { @@ -357,13 +367,11 @@ static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) { } typedef struct { - int maxRowSize; - int maxCols; // max number of columns - int maxPoints; // max number of points - - int numOfRows; - int numOfCols; // Total number of cols - int sversion; // TODO: set sversion + int maxCols; // max number of columns + int maxPoints; // max number of points + int numOfRows; + int numOfCols; // Total number of cols + int sversion; // TODO: set sversion SDataCol *cols; } SDataCols; @@ -407,7 +415,7 @@ static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) { } } -SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows); +SDataCols *tdNewDataCols(int maxCols, int maxRows); void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); @@ -475,9 +483,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { } // offset here not include kvRow header length -static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) { +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type, + int32_t offset) { ASSERT(value != NULL); - int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE; + int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); @@ -485,10 +494,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE if (IS_VAR_DATA_TYPE(type)) { - memcpy(ptr, value, varDataTLen(value)); + if (isCopyValData) { + memcpy(ptr, value, varDataTLen(value)); + } kvRowLen(row) += varDataTLen(value); } else { - if (*offset == 0) { + if (offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); @@ -497,7 +508,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t } kvRowLen(row) += TYPE_BYTES[type]; } - *offset += sizeof(SColIdx); return 0; } @@ -537,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); - if (pBuilder->pColIdx == NULL) return -1; + SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); + if (pColIdx == NULL) return -1; + pBuilder->pColIdx = pColIdx; } pBuilder->pColIdx[pBuilder->nCols].colId = colId; @@ -551,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, while (tlen > pBuilder->alloc - pBuilder->size) { pBuilder->alloc *= 2; } - pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc); - if (pBuilder->buf == NULL) return -1; + void* buf = realloc(pBuilder->buf, pBuilder->alloc); + if (buf == NULL) return -1; + pBuilder->buf = buf; } memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen); @@ -592,12 +604,24 @@ typedef void *SMemRow; #define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) -#define SMEM_ROW_DATA 0U // SDataRow -#define SMEM_ROW_KV 1U // SKVRow +#define SMEM_ROW_DATA 0x0U // SDataRow +#define SMEM_ROW_KV 0x01U // SKVRow +#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag -#define memRowType(r) (*(uint8_t *)(r)) +#define KVRatioKV (0.2f) // all bool +#define KVRatioPredict (0.4f) +#define KVRatioData (0.75f) // all bigint +#define KVRatioConvert (0.9f) + +#define memRowType(r) ((*(uint8_t *)(r)) & 0x01) + +#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory +#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit +#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01)) #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) +#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) +#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT) #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowKvBody(r) \ @@ -614,6 +638,14 @@ typedef void *SMemRow; #define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r)) #define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen +static FORCE_INLINE char *memRowEnd(SMemRow row) { + if (isDataRow(row)) { + return (char *)dataRowEnd(memRowDataBody(row)); + } else { + return (char *)kvRowEnd(memRowKvBody(row)); + } +} + #define memRowDataVersion(r) dataRowVersion(memRowDataBody(r)) #define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) #define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version @@ -631,7 +663,6 @@ typedef void *SMemRow; } \ } while (0) -#define memRowSetType(r, t) (memRowType(r) = (t)) #define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) #define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v)) #define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) @@ -664,12 +695,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_ } } -static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset, - int32_t *kvOffset) { +static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t type, int32_t offset) { if (isDataRow(row)) { - tdAppendColVal(memRowDataBody(row), value, type, offset); + tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset); } else { - tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset); + tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset); } return 0; } @@ -691,6 +722,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value return len; } +/** + * 1. calculate the delta of AllNullLen for SDataRow. + * 2. calculate the real len for SKVRow. + */ +static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) { + switch (colType) { + case TSDB_DATA_TYPE_BINARY: { + int32_t varLen = varDataLen(value); + *dataLen += (varLen - CHAR_BYTES); + *kvLen += (varLen + sizeof(SColIdx)); + break; + } + case TSDB_DATA_TYPE_NCHAR: { + int32_t varLen = varDataLen(value); + *dataLen += (varLen - TSDB_NCHAR_SIZE); + *kvLen += (varLen + sizeof(SColIdx)); + break; + } + default: { + *kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx)); + break; + } + } +} typedef struct { int16_t colId; @@ -706,7 +761,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); - +#if 0 // ----------------- Raw payload structure for row: /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| * | |<----------------- flen ------------->|<--- value part --->| @@ -752,6 +807,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); } +#endif + #ifdef __cplusplus } #endif diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 7290db6ec9..ed35168457 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -59,6 +59,7 @@ extern char tsLocale[]; extern char tsCharset[]; // default encode string extern int8_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; +extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; //query buffer management @@ -70,6 +71,7 @@ extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; +extern int32_t tsMaxWildCardsLen; extern int8_t tsTscEnableRecordSql; extern int32_t tsMaxNumOfOrderedResults; extern int32_t tsMinSlidingTime; diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 3f0ab7f93e..181afa225d 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -19,10 +19,10 @@ #include "wchar.h" #include "tarray.h" +static void dataColSetNEleNull(SDataCol *pCol, int nEle); static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows, bool forceSetNull); -//TODO: change caller to use return val int tdAllocMemForCol(SDataCol *pCol, int maxPoints) { int spaceNeeded = pCol->bytes * maxPoints; if(IS_VAR_DATA_TYPE(pCol->type)) { @@ -31,7 +31,7 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints) { if(pCol->spaceSize < spaceNeeded) { void* ptr = realloc(pCol->pData, spaceNeeded); if(ptr == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)pCol->spaceSize, + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded, strerror(errno)); return -1; } else { @@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1 if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); - if (pBuilder->columns == NULL) return -1; + STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); + if (columns == NULL) return -1; + pBuilder->columns = columns; } STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); @@ -239,20 +240,19 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) { pDataCol->len = 0; } // value from timestamp should be TKEY here instead of TSKEY -void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { +int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { ASSERT(pCol != NULL && value != NULL); if (isAllRowsNull(pCol)) { if (isNull(value, pCol->type)) { // all null value yet, just return - return; + return 0; } + if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1; if (numOfRows > 0) { // Find the first not null value, fill all previouse values as NULL - dataColSetNEleNull(pCol, numOfRows, maxPoints); - } else { - tdAllocMemForCol(pCol, maxPoints); + dataColSetNEleNull(pCol, numOfRows); } } @@ -268,12 +268,21 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); pCol->len += pCol->bytes; } + return 0; +} + +static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) { + if (IS_VAR_DATA_TYPE(pCol->type)) { + return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); + } else { + return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row); + } } bool isNEleNull(SDataCol *pCol, int nEle) { if(isAllRowsNull(pCol)) return true; for (int i = 0; i < nEle; i++) { - if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false; + if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false; } return true; } @@ -290,9 +299,7 @@ static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { } } -void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { - tdAllocMemForCol(pCol, maxPoints); - +static void dataColSetNEleNull(SDataCol *pCol, int nEle) { if (IS_VAR_DATA_TYPE(pCol->type)) { pCol->len = 0; for (int i = 0; i < nEle; i++) { @@ -318,7 +325,7 @@ void dataColSetOffset(SDataCol *pCol, int nEle) { } } -SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { +SDataCols *tdNewDataCols(int maxCols, int maxRows) { SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols)); if (pCols == NULL) { uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno)); @@ -326,6 +333,9 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { } pCols->maxPoints = maxRows; + pCols->maxCols = maxCols; + pCols->numOfRows = 0; + pCols->numOfCols = 0; if (maxCols > 0) { pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); @@ -342,13 +352,8 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { pCols->cols[i].pData = NULL; pCols->cols[i].dataOff = NULL; } - - pCols->maxCols = maxCols; } - pCols->maxRowSize = maxRowSize; - - return pCols; } @@ -357,8 +362,9 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { int oldMaxCols = pCols->maxCols; if (schemaNCols(pSchema) > oldMaxCols) { pCols->maxCols = schemaNCols(pSchema); - pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols); - if (pCols->cols == NULL) return -1; + void* ptr = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols); + if (ptr == NULL) return -1; + pCols->cols = ptr; for(i = oldMaxCols; i < pCols->maxCols; i++) { pCols->cols[i].pData = NULL; pCols->cols[i].dataOff = NULL; @@ -366,10 +372,6 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { } } - if (schemaTLen(pSchema) > pCols->maxRowSize) { - pCols->maxRowSize = schemaTLen(pSchema); - } - tdResetDataCols(pCols); pCols->numOfCols = schemaNCols(pSchema); @@ -398,7 +400,7 @@ SDataCols *tdFreeDataCols(SDataCols *pCols) { } SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { - SDataCols *pRet = tdNewDataCols(pDataCols->maxRowSize, pDataCols->maxCols, pDataCols->maxPoints); + SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints); if (pRet == NULL) return NULL; pRet->numOfCols = pDataCols->numOfCols; @@ -413,7 +415,10 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { if (keepData) { if (pDataCols->cols[i].len > 0) { - tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints); + if(tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) { + tdFreeDataCols(pRet); + return NULL; + } pRet->cols[i].len = pDataCols->cols[i].len; memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len); if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) { @@ -584,9 +589,12 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { for (int i = 0; i < src2->numOfCols; i++) { ASSERT(target->cols[i].type == src2->cols[i].type); - if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) { + if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, target->maxPoints); + } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) { + dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, + target->maxPoints); } } target->numOfRows++; @@ -844,7 +852,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch int16_t k; for (k = 0; k < nKvNCols; ++k) { SColInfo *pColInfo = taosArrayGet(stashRow, k); - tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset); + tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset); + toffset += sizeof(SColIdx); } ASSERT(kvLen == memRowTLen(tRow)); } diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..e4ff353787 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -25,6 +25,10 @@ #include "tutil.h" #include "tlocale.h" #include "ttimezone.h" +#include "tcompare.h" + +// TSDB +bool tsdbForceKeepFile = false; // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -75,6 +79,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from @@ -84,6 +89,9 @@ int32_t tsMaxNumOfOrderedResults = 100000; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; +// the maxinum number of distict query result +int32_t tsMaxNumOfDistinctResults = 1000 * 10000; + // 1 us for interval time range, changed accordingly int32_t tsMinIntervalTime = 1; @@ -541,6 +549,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "maxNumOfDistinctRes"; + cfg.ptr = &tsMaxNumOfDistinctResults; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 10*10000; + cfg.maxValue = 10000*10000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "numOfMnodes"; cfg.ptr = &tsNumOfMnodes; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -984,6 +1002,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); + cfg.option = "maxWildCardsLength"; + cfg.ptr = &tsMaxWildCardsLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = TSDB_MAX_FIELD_LEN; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + cfg.option = "maxNumOfOrderedRes"; cfg.ptr = &tsMaxNumOfOrderedResults; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1531,6 +1559,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; diff --git a/src/connector/go b/src/connector/go index b8f76da4a7..050667e5b4 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 +Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index 4a4d79099b..32e2c97a4c 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d +Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension index ce52010141..b62a26ecc1 160000 --- a/src/connector/hivemq-tdengine-extension +++ b/src/connector/hivemq-tdengine-extension @@ -1 +1 @@ -Subproject commit ce5201014136503d34fecbd56494b67b4961056c +Subproject commit b62a26ecc164a310104df57691691b237e091c89 diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 907562fe26..fbeeeb56d3 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -113,7 +113,6 @@ **/AppMemoryLeakTest.java - **/AuthenticationTest.java **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java **/DatetimeBefore1970Test.java **/FailOverTest.java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index f5f16758c1..521a88b128 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -14,6 +14,8 @@ *****************************************************************************/ package com.taosdata.jdbc; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.sql.*; import java.util.*; import java.util.logging.Logger; @@ -127,6 +129,11 @@ public class TSDBDriver extends AbstractDriver { return null; } + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED); + try { TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE)); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index d626698663..977ae66515 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -33,6 +33,8 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index 3c44d69be5..2207db6f93 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -29,6 +29,9 @@ public class TSDBErrorNumbers { public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317; public static final int ERROR_RESTFul_Client_IOException = 0x2318; + public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required + public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required + public static final int ERROR_UNKNOWN = 0x2350; //unknown error @@ -67,6 +70,8 @@ public class TSDBErrorNumbers { errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE); errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION); errorNumbers.add(ERROR_RESTFul_Client_IOException); + errorNumbers.add(ERROR_USER_IS_REQUIRED); + errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED); errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 4fdbb308c5..c634fe2e95 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -36,7 +36,6 @@ public class TSDBJNIConnector { static { System.loadLibrary("taos"); - System.out.println("java.library.path:" + System.getProperty("java.library.path")); } public boolean isClosed() { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index 9ab67c5502..0a8809e84f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -7,6 +7,7 @@ import com.taosdata.jdbc.utils.HttpClientPoolUtil; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.sql.*; import java.util.Properties; import java.util.logging.Logger; @@ -40,8 +41,13 @@ public class RestfulDriver extends AbstractDriver { String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; try { - String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8"); - String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8"); + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED); + + String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName()); + String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; } catch (UnsupportedEncodingException e) { e.printStackTrace(); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index f8acd8f061..a88dc411f3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -7,6 +7,7 @@ import com.taosdata.jdbc.AbstractStatement; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.enums.TimestampFormat; import com.taosdata.jdbc.utils.HttpClientPoolUtil; import com.taosdata.jdbc.utils.SqlSyntaxValidator; @@ -45,9 +46,7 @@ public class RestfulStatement extends AbstractStatement { if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - - return executeOneUpdate(url, sql); + return executeOneUpdate(sql); } @Override @@ -62,34 +61,25 @@ public class RestfulStatement extends AbstractStatement { public boolean execute(String sql) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (!SqlSyntaxValidator.isValidForExecute(sql)) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql); //如果执行了use操作应该将当前Statement的catalog设置为新的database boolean result = true; - String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) { - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; - } - if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) { - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; - } if (SqlSyntaxValidator.isUseSql(sql)) { - HttpClientPoolUtil.execute(url, sql, this.conn.getToken()); + HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; } else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { executeOneQuery(sql); } else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { - executeOneUpdate(url, sql); + executeOneUpdate(sql); result = false; } else { if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) { - executeQuery(sql); + executeOneQuery(sql); } else { - executeUpdate(sql); + executeOneUpdate(sql); result = false; } } @@ -97,19 +87,25 @@ public class RestfulStatement extends AbstractStatement { return result; } + private String getUrl() throws SQLException { + TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase()); + String url; + switch (timestampFormat) { + case TIMESTAMP: + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + break; + case UTC: + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + break; + default: + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + } + return url; + } + private ResultSet executeOneQuery(String sql) throws SQLException { - if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); - // row data - String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); - if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; - if ("UTC".equalsIgnoreCase(timestampFormat)) - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; - - String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken()); + String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); @@ -119,11 +115,8 @@ public class RestfulStatement extends AbstractStatement { return resultSet; } - private int executeOneUpdate(String url, String sql) throws SQLException { - if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); - - String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken()); + private int executeOneUpdate(String sql) throws SQLException { + String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject jsonObject = JSON.parseObject(result); if (jsonObject.getString("status").equals("error")) { throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc")); @@ -134,7 +127,7 @@ public class RestfulStatement extends AbstractStatement { } private int getAffectedRows(JSONObject jsonObject) throws SQLException { - // create ... SQLs should return 0 , and Restful result is this: + // create ... SQLs should return 0 , and Restful result like this: // {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1} JSONArray head = jsonObject.getJSONArray("head"); if (head.size() != 1 || !"affected_rows".equals(head.getString(0))) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java index 0f99ff4f66..cbd806b35a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java @@ -16,8 +16,7 @@ package com.taosdata.jdbc.utils; public class SqlSyntaxValidator { - private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"}; - private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"}; + private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set", "reset"}; private static final String[] querySQL = {"select", "show", "describe"}; private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"}; @@ -38,14 +37,6 @@ public class SqlSyntaxValidator { return false; } - public static boolean isValidForExecute(String sql) { - for (String prefix : SQL) { - if (sql.trim().toLowerCase().startsWith(prefix)) - return true; - } - return false; - } - public static boolean isDatabaseUnspecifiedQuery(String sql) { for (String databaseObj : databaseUnspecifiedShow) { if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*")) @@ -63,9 +54,5 @@ public class SqlSyntaxValidator { return sql.trim().toLowerCase().startsWith("use"); } - public static boolean isSelectSql(String sql) { - return sql.trim().toLowerCase().startsWith("select"); - } - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 24c73fdd5c..95307071e1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -69,6 +69,8 @@ public class SubscribeTest { @Before public void createDatabase() throws SQLException { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java index 6702de9bdb..0ea46dade2 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java @@ -1,6 +1,9 @@ package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBErrorNumbers; +import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import java.sql.*; @@ -12,6 +15,47 @@ public class AuthenticationTest { private static final String password = "taos?data"; private Connection conn; + @Test + public void connectWithoutUserByJni() { + try { + DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (2319): user is required", e.getMessage()); + } + } + + @Test + public void connectWithoutUserByRestful() { + try { + DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (2319): user is required", e.getMessage()); + } + } + + @Test + public void connectWithoutPasswordByJni() { + try { + DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (231a): password is required", e.getMessage()); + } + } + + @Test + public void connectWithoutPasswordByRestful() { + try { + DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (231a): password is required", e.getMessage()); + } + } + + @Ignore @Test public void test() { // change password diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java index e175d6d114..f2b102cfe7 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java @@ -29,6 +29,8 @@ public class BatchInsertTest { public void before() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java index bc11c7f34e..1297a6b4c4 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java @@ -21,6 +21,8 @@ public class ImportTest { public static void before() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java index 9f8243542f..ac254bebf3 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java @@ -270,6 +270,41 @@ public class InsertSpecialCharacterJniTest { } } + @Ignore + @Test + public void testSingleQuotaEscape() throws SQLException { + final long now = System.currentTimeMillis(); + final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + // t1 + pstmt.setInt(1, 1); + pstmt.setString(2, tbname2); + pstmt.setString(3, special_character_str_5); + pstmt.setTimestamp(4, new Timestamp(now)); + pstmt.setBytes(5, special_character_str_5.getBytes()); + // t2 + pstmt.setInt(7, 2); + pstmt.setString(8, special_character_str_5); + pstmt.setTimestamp(9, new Timestamp(now)); + pstmt.setString(11, special_character_str_5); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(2, ret); + } + + String query = "select * from ?.t? where ? < ? and ts >= ? and f1 is not null"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setString(1, dbName); + pstmt.setInt(2, 1); + pstmt.setString(3, "ts"); + pstmt.setTimestamp(4, new Timestamp(System.currentTimeMillis())); + pstmt.setTimestamp(5, new Timestamp(0)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertNotNull(rs); + } + } + @Test public void testCase10() throws SQLException { final long now = System.currentTimeMillis(); @@ -293,13 +328,12 @@ public class InsertSpecialCharacterJniTest { Assert.assertEquals(2, ret); } //query t1 - String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null"; + String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setString(1, dbName); pstmt.setInt(2, 1); pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(4, new Timestamp(0)); - pstmt.setString(5, "f1"); ResultSet rs = pstmt.executeQuery(); rs.next(); @@ -311,12 +345,11 @@ public class InsertSpecialCharacterJniTest { Assert.assertNull(f2); } // query t2 - query = "select * from t? where ts < ? and ts >= ? and ? is not null"; + query = "select * from t? where ts < ? and ts >= ? and f2 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setInt(1, 2); pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(3, new Timestamp(0)); - pstmt.setString(4, "f2"); ResultSet rs = pstmt.executeQuery(); rs.next(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java index 2e981e7f41..eedccec6f1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java @@ -293,13 +293,12 @@ public class InsertSpecialCharacterRestfulTest { Assert.assertEquals(2, ret); } //query t1 - String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null"; + String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setString(1, dbName); pstmt.setInt(2, 1); pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(4, new Timestamp(0)); - pstmt.setString(5, "f1"); ResultSet rs = pstmt.executeQuery(); rs.next(); @@ -311,12 +310,11 @@ public class InsertSpecialCharacterRestfulTest { Assert.assertNull(f2); } // query t2 - query = "select * from t? where ts < ? and ts >= ? and ? is not null"; + query = "select * from t? where ts < ? and ts >= ? and f2 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setInt(1, 2); pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(3, new Timestamp(0)); - pstmt.setString(4, "f2"); ResultSet rs = pstmt.executeQuery(); rs.next(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java index 3fea221446..b4449491a9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java @@ -22,6 +22,8 @@ public class QueryDataTest { public void createDatabase() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java index 4eebbd08e8..48753d62f0 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java @@ -1,51 +1,49 @@ package com.taosdata.jdbc.cases; -import com.taosdata.jdbc.TSDBDriver; -import org.junit.After; -import org.junit.Before; import org.junit.Test; -import java.sql.*; -import java.util.Properties; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; public class ResetQueryCacheTest { - static Connection connection; - static Statement statement; - static String host = "127.0.0.1"; + @Test + public void jni() throws SQLException { + // given + Connection connection = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:0/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8"); + Statement statement = connection.createStatement(); - @Before - public void init() { - try { - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - statement = connection.createStatement(); - } catch (SQLException e) { - return; - } + // when + boolean execute = statement.execute("reset query cache"); + + // then + assertFalse(execute); + assertEquals(0, statement.getUpdateCount()); + + statement.close(); + connection.close(); } @Test - public void testResetQueryCache() throws SQLException { - String resetSql = "reset query cache"; - statement.execute(resetSql); - } + public void restful() throws SQLException { + // given + Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://127.0.0.1:6041/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8"); + Statement statement = connection.createStatement(); - @After - public void close() { - try { - if (statement != null) - statement.close(); - if (connection != null) - connection.close(); - } catch (SQLException e) { - e.printStackTrace(); - } + // when + boolean execute = statement.execute("reset query cache"); + + // then + assertFalse(execute); + assertEquals(0, statement.getUpdateCount()); + + statement.close(); + connection.close(); } } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java index 0022ceaf21..b51c0309be 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java @@ -20,6 +20,8 @@ public class SelectTest { public void createDatabaseAndTable() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java index 1600fec13d..8e10743e5e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java @@ -24,6 +24,8 @@ public class StableTest { public static void createDatabase() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java deleted file mode 100644 index ccb0941da0..0000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.taosdata.jdbc.utils; - -import org.junit.Assert; -import org.junit.Test; - -public class SqlSyntaxValidatorTest { - - @Test - public void isSelectSQL() { - Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather")); - Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather")); - Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather ")); - Assert.assertFalse(SqlSyntaxValidator.isSelectSql("insert into test.weather values(now, 1.1, 2)")); - } - - @Test - public void isUseSQL() { - Assert.assertTrue(SqlSyntaxValidator.isUseSql("use database test")); - } - -} \ No newline at end of file diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 03d27e5593..5ba2739c35 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) return res; } +function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + if (dataEntry[0] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } + currOffset += nbytes; + } + return res; +} + function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) while (currOffset < data.length) { let len = data.readIntLE(currOffset, 2); let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; - res.push(dataEntry.toString("utf-8")); + if (dataEntry[0] == 255 && dataEntry[1] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } currOffset += nbytes; } return res; @@ -132,7 +154,7 @@ let convertFunctions = { [FieldTypes.C_BIGINT]: convertBigint, [FieldTypes.C_FLOAT]: convertFloat, [FieldTypes.C_DOUBLE]: convertDouble, - [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_BINARY]: convertBinary, [FieldTypes.C_TIMESTAMP]: convertTimestamp, [FieldTypes.C_NCHAR]: convertNchar } diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js index 0fc8dc8ef1..3bc0fe0aca 100644 --- a/src/connector/nodejs/nodetaos/taosobjects.js +++ b/src/connector/nodejs/nodetaos/taosobjects.js @@ -47,7 +47,8 @@ class TaosTimestamp extends Date { super(Math.floor(date / 1000)); this.precisionExtras = date % 1000; } else if (precision === 2) { - super(parseInt(date / 1000000)); + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) this.precisionExtras = parseInt(BigInt(date) % 1000000n); } else { diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index db37318a16..6a2c66100b 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.9", + "version": "2.0.10", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { diff --git a/src/connector/nodejs/test/testnchar.js b/src/connector/nodejs/test/testnchar.js new file mode 100644 index 0000000000..68fad89c22 --- /dev/null +++ b/src/connector/nodejs/test/testnchar.js @@ -0,0 +1,33 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var c1 = conn.cursor(); + + +function checkData(data, row, col, expect) { + let checkdata = data[row][col]; + if (checkdata == expect) { + // console.log('check pass') + } + else { + console.log('check failed, expect ' + expect + ', but is ' + checkdata) + } +} + +c1.execute('drop database if exists testnodejsnchar') +c1.execute('create database testnodejsnchar') +c1.execute('use testnodejsnchar'); +c1.execute('create table tb (ts timestamp, value float, text binary(200))') +c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") - +c1.execute('insert into tb values(1623254400150, 24.7, NULL);') +c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");') +sql = 'select * from tb;' + +console.log('*******************************************') + +c1.execute(sql); +data = c1.fetchall(); +console.log(data) +//check data about insert data +checkData(data, 0, 2, '中文10000000000000000000000') +checkData(data, 1, 2, null) +checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000') \ No newline at end of file diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index eac04fe7bb..5291fb73a0 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -166,7 +166,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -188,6 +187,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index ee37ffdcbb..2f77788025 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) { } } else if (strcmp(argv[i], "-C") == 0) { dump_config = 1; + } else if (strcmp(argv[i], "--force-keep-file") == 0) { + tsdbForceKeepFile = true; } else if (strcmp(argv[i], "--compact-mnode-wal") == 0) { tsCompactMnodeWal = 1; } else if (strcmp(argv[i], "-V") == 0) { diff --git a/src/inc/twal.h b/src/inc/twal.h index bce398d6f9..868a1fbd78 100644 --- a/src/inc/twal.h +++ b/src/inc/twal.h @@ -32,7 +32,7 @@ typedef enum { typedef struct { int8_t msgType; - int8_t sver; + int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility int8_t reserved[2]; int32_t len; uint64_t version; diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index d78f1a6b99..7fc8b1409a 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) { int32_t tbIndex = tbNum++; if (tbMallocNum < tbNum) { tbMallocNum = (tbMallocNum * 2 + 1); - tbNames = realloc(tbNames, tbMallocNum * sizeof(char *)); - if (tbNames == NULL) { + char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *)); + if (tbNames1 == NULL) { fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum); code = TSDB_CODE_TSC_OUT_OF_MEMORY; break; } + tbNames = tbNames1; } tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN); diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 4dcd8b3d50..5ca4537aeb 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -64,6 +64,10 @@ void printHelp() { exit(EXIT_SUCCESS); } +char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { wordexp_t full_path; for (int i = 1; i < argc; i++) { @@ -77,10 +81,21 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; + else if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Darwin"); + printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } - // for management port + // for management port else if (strcmp(argv[i], "-P") == 0) { if (i < argc - 1) { arguments->port = atoi(argv[++i]); @@ -98,7 +113,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1); exit(EXIT_FAILURE); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 58f4b7ff02..51a25d59c4 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -65,7 +65,15 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut */ TAOS *shellInit(SShellArguments *_args) { printf("\n"); - printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + if (!_args->is_use_passwd) { +#ifdef TD_WINDOWS + strcpy(tsOsName, "Windows"); +#elif defined(TD_DARWIN) + strcpy(tsOsName, "Darwin"); +#endif + printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + } + fflush(stdout); // set options before initializing @@ -73,9 +81,7 @@ TAOS *shellInit(SShellArguments *_args) { taos_options(TSDB_OPTION_TIMEZONE, _args->timezone); } - if (_args->is_use_passwd) { - if (_args->password == NULL) _args->password = getpass("Enter password: "); - } else { + if (!_args->is_use_passwd) { _args->password = TSDB_DEFAULT_PASS; } @@ -170,7 +176,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { system("clear"); return 0; } - + if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { strtok(command, " \t"); strtok(NULL, " \t"); @@ -182,7 +188,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { } return 0; } - + if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) { /* If source file. */ char *c_ptr = strtok(command, " ;"); @@ -247,7 +253,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { esc = false; continue; } - + if (c == '\\') { esc = true; continue; @@ -336,8 +342,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands - int error_no = 0; - + int error_no = 0; + int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); if (numOfRows < 0) { atomic_store_64(&result, 0); @@ -530,7 +536,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { fprintf(fp, "%s", fields[col].name); } fputc('\n', fp); - + int numOfRows = 0; do { int32_t* length = taos_fetch_lengths(tres); @@ -716,7 +722,7 @@ static int verticalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - do { + do { if (numOfRows < resShowMaxNum) { printf("*************************** %d.row ***************************\n", numOfRows + 1); @@ -851,7 +857,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - + do { int32_t* length = taos_fetch_lengths(tres); if (numOfRows < resShowMaxNum) { @@ -867,7 +873,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n"); showMore = 0; } - + numOfRows++; row = taos_fetch_row(tres); } while(row != NULL); @@ -909,7 +915,7 @@ void read_history() { if (errno != ENOENT) { fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno)); } -#endif +#endif return; } @@ -934,9 +940,9 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { -#ifndef WINDOWS +#ifndef WINDOWS fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno)); -#endif +#endif return; } @@ -982,13 +988,13 @@ void source_file(TAOS *con, char *fptr) { /* if (access(fname, F_OK) != 0) { fprintf(stderr, "ERROR: file %s is not exist\n", fptr); - + wordfree(&full_path); free(cmd); return; } */ - + FILE *f = fopen(fname, "r"); if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index dc74f6fcaa..d051d3535e 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -34,7 +34,7 @@ static char doc[] = ""; static char args_doc[] = ""; static struct argp_option options[] = { {"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."}, - {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, + {"password", 'p', 0, 0, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."}, @@ -63,8 +63,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { arguments->host = arg; break; case 'p': - arguments->is_use_passwd = true; - if (arg) arguments->password = arg; break; case 'P': if (arg) { @@ -160,12 +158,41 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; +char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + +static void parse_password( + int argc, char *argv[], SShellArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Linux"); + printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%20s", g_password) > 1) { + fprintf(stderr, "password reading error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; + arguments->is_use_passwd = true; + } + } +} + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { static char verType[32] = {0}; sprintf(verType, "version: %s\n", version); argp_program_version = verType; - + + if (argc > 1) { + parse_password(argc, argv, arguments); + } + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { #ifndef _ALPINE diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 0c70386061..4e00b0d8ff 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -71,7 +71,9 @@ int checkVersion() { // Global configurations SShellArguments args = { .host = NULL, +#ifndef TD_WINDOWS .password = NULL, +#endif .user = NULL, .database = NULL, .timezone = NULL, diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 87d11a3516..bf9afe4b80 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -19,6 +19,9 @@ extern char configDir[]; +char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; + void printVersion() { printf("version: %s\n", version); } @@ -61,6 +64,8 @@ void printHelp() { exit(EXIT_SUCCESS); } +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { // for host @@ -73,11 +78,20 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; - if (i < argc - 1 && argv[i + 1][0] != '-') { - arguments->password = argv[++i]; - } + else if (strncmp(argv[i], "-p", 2) == 0) { + arguments->is_use_passwd = true; + strcpy(tsOsName, "Windows"); + printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error!\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } // for management port else if (strcmp(argv[i], "-P") == 0) { @@ -104,7 +118,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { char *tmp = argv[++i]; if (strlen(tmp) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1); @@ -265,7 +279,7 @@ void *shellLoopQuery(void *arg) { if (command == NULL) return NULL; int32_t err = 0; - + do { memset(command, 0, MAX_COMMAND_SIZE); shellPrintPrompt(); @@ -274,7 +288,7 @@ void *shellLoopQuery(void *arg) { err = shellReadCommand(con, command); if (err) { break; - } + } } while (shellRunCommand(con, command) == 0); return NULL; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f327a8a585..9a7d3cd25c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -77,7 +77,7 @@ extern char configDir[]; #define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) #define MAX_USERNAME_SIZE 64 -#define MAX_PASSWORD_SIZE 64 +#define MAX_PASSWORD_SIZE 16 #define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space @@ -216,7 +216,7 @@ typedef struct SArguments_S { uint16_t port; uint16_t iface; char * user; - char * password; + char password[MAX_PASSWORD_SIZE]; char * database; int replica; char * tb_prefix; @@ -246,7 +246,6 @@ typedef struct SArguments_S { uint32_t disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms, us or ns. accordig to database precision uint32_t method_of_delete; - char ** arg_list; uint64_t totalInsertRows; uint64_t totalAffectedRows; bool demo_mode; // use default column name and semi-random data @@ -296,6 +295,9 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; +#if STMT_IFACE_ENABLED == 1 + char* sampleBindArray; +#endif //int sampleRowCount; //int sampleUsePos; @@ -442,6 +444,7 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; + int64_t *bind_ts; int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -455,7 +458,7 @@ typedef struct SThreadInfo_S { int64_t start_time; char* cols; bool use_metric; - SSuperTable* superTblInfo; + SSuperTable* stbInfo; char *buffer; // sql cmd buffer // for async insert @@ -623,7 +626,7 @@ SArguments g_args = { "INT", // datatype "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 }, - 16, // len_of_binary + 64, // len_of_binary 4, // num_of_CPR 10, // num_of_connections/thread 0, // insert_interval @@ -638,7 +641,6 @@ SArguments g_args = { 0, // disorderRatio 1000, // disorderRange 1, // method_of_delete - NULL, // arg_list 0, // totalInsertRows; 0, // totalAffectedRows; true, // demo_mode; @@ -676,7 +678,7 @@ static FILE * g_fpOfInsertResult = NULL; /////////////////////////////////////////////////// -static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } +static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); } #ifndef TAOSDEMO_COMMIT_SHA1 #define TAOSDEMO_COMMIT_SHA1 "unknown" @@ -711,24 +713,24 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-u", indent, "The TDengine user name to use when connecting to the server. Default is 'root'."); #ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'powerdb'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/power/'."); #elif (_TD_TQ_ == true) - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'tqueue'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/tq/'."); #else - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'taosdata'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); #endif printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); - printf("%s%s%s%s\n", indent, "-p", indent, + printf("%s%s%s%s\n", indent, "-P", indent, "The TCP/IP port number to use for the connection. Default is 0."); printf("%s%s%s%s\n", indent, "-I", indent, #if STMT_IFACE_ENABLED == 1 @@ -828,11 +830,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { + } else if (strcmp(argv[i], "-P") == 0) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-p need a number following!\n"); + errorPrint("%s", "\n\t-P need a number following!\n"); exit(EXIT_FAILURE); } arguments->port = atoi(argv[++i]); @@ -862,13 +864,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-P need a valid string following!\n"); - exit(EXIT_FAILURE); + } else if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + taosSetConsoleEcho(false); + if (scanf("%s", arguments->password) > 1) { + fprintf(stderr, "password read error!\n"); + } + taosSetConsoleEcho(true); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } - arguments->password = argv[++i]; } else if (strcmp(argv[i], "-o") == 0) { if (argc == i+1) { printHelp(); @@ -1010,6 +1016,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->datatype[0] = argv[i]; + arguments->datatype[1] = NULL; } else { // more than one col int index = 0; @@ -1066,7 +1073,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->debug_print = true; } else if (strcmp(argv[i], "-gg") == 0) { arguments->verbose_print = true; - } else if (strcmp(argv[i], "-pp") == 0) { + } else if (strcmp(argv[i], "-PP") == 0) { arguments->performance_print = true; } else if (strcmp(argv[i], "-O") == 0) { if ((argc == i+1) || @@ -1137,8 +1144,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } if (0 == columnCount) { - perror("data type error!"); - exit(-1); + ERROR_EXIT("data type error!"); } g_args.num_of_CPR = columnCount; @@ -1338,8 +1344,8 @@ static char *rand_bool_str(){ static int32_t rand_bool(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 2; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 2; } static char *rand_tinyint_str() @@ -1354,8 +1360,8 @@ static int32_t rand_tinyint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 128; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 128; } static char *rand_smallint_str() @@ -1370,8 +1376,8 @@ static int32_t rand_smallint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 32767; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 32767; } static char *rand_int_str() @@ -1386,8 +1392,8 @@ static int32_t rand_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND]; } static char *rand_bigint_str() @@ -1402,8 +1408,8 @@ static int64_t rand_bigint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randbigint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randbigint[cursor % MAX_PREPARED_RAND]; } static char *rand_float_str() @@ -1419,8 +1425,8 @@ static float rand_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randfloat[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randfloat[cursor % MAX_PREPARED_RAND]; } static char *demo_current_float_str() @@ -1435,8 +1441,9 @@ static float UNUSED_FUNC demo_current_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)(9.8 + 0.04 * (g_randint[cursor % MAX_PREPARED_RAND] % 10) + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000); } static char *demo_voltage_int_str() @@ -1451,8 +1458,8 @@ static int32_t UNUSED_FUNC demo_voltage_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return 215 + g_randint[cursor] % 10; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return 215 + g_randint[cursor % MAX_PREPARED_RAND] % 10; } static char *demo_phase_float_str() { @@ -1465,8 +1472,9 @@ static char *demo_phase_float_str() { static float UNUSED_FUNC demo_phase_float(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)((115 + g_randint[cursor % MAX_PREPARED_RAND] % 10 + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000)/360); } #if 0 @@ -2320,15 +2328,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[BUFFER_SIZE+1] = {0}; - char buffer[BUFFER_SIZE+1] = {0}; + char filename[MAX_FILE_NAME_LEN] = {0}; + char buffer[1024] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2360,12 +2368,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -2426,14 +2434,14 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); free(request_buf); - ERROR_EXIT("ERROR opening socket"); + ERROR_EXIT("opening socket"); } int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr)); debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); if (retConn < 0) { free(request_buf); - ERROR_EXIT("ERROR connecting"); + ERROR_EXIT("connecting"); } memset(base64_buf, 0, INPUT_BUF_LEN); @@ -2466,7 +2474,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port auth, strlen(sqlstr), sqlstr); if (r >= req_buf_len) { free(request_buf); - ERROR_EXIT("ERROR too long request"); + ERROR_EXIT("too long request"); } verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); @@ -2479,7 +2487,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port bytes = write(sockfd, request_buf + sent, req_str_len - sent); #endif if (bytes < 0) - ERROR_EXIT("ERROR writing message to socket"); + ERROR_EXIT("writing message to socket"); if (bytes == 0) break; sent+=bytes; @@ -2496,7 +2504,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif if (bytes < 0) { free(request_buf); - ERROR_EXIT("ERROR reading response from socket"); + ERROR_EXIT("reading response from socket"); } if (bytes == 0) break; @@ -2505,7 +2513,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port if (received == resp_len) { free(request_buf); - ERROR_EXIT("ERROR storing complete response from socket"); + ERROR_EXIT("storing complete response from socket"); } response_buf[RESP_BUF_LEN - 1] = '\0'; @@ -2594,7 +2602,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { if ((g_args.demo_mode) && (i == 0)) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", tableSeq % 10); + "%"PRId64",", (tableSeq % 10) + 1); } else { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, @@ -2668,8 +2676,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { - printf("get error data type : %s\n", dataType); - exit(-1); + errorPrint("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2699,8 +2707,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { - printf("get error tag type : %s\n", dataType); - exit(-1); + errorPrint("get error tag type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2714,7 +2722,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; char limitBuf[100] = "\0"; TAOS_RES * res; @@ -2728,7 +2736,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", + snprintf(command, 1024, "select tbname from %s.%s %s", dbName, sTblName, limitBuf); res = taos_query(taos, command); @@ -2738,7 +2746,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_close(taos); errorPrint("%s() LN%d, failed to run command %s\n", __func__, __LINE__, command); - exit(-1); + exit(EXIT_FAILURE); } int64_t childTblCount = (limit < 0)?10000:limit; @@ -2749,7 +2757,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2760,7 +2768,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (0 == strlen((char *)row[0])) { errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", __func__, __LINE__, count); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pTblName, (char *)row[0], len[0]+1); @@ -2776,12 +2784,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); tmfree(childTblName); taos_free_result(res); taos_close(taos); - exit(-1); + errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, sTblName); + exit(EXIT_FAILURE); } } pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; @@ -2806,13 +2814,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; TAOS_RES * res; TAOS_ROW row = NULL; int count = 0; //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName); res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { @@ -2892,7 +2900,8 @@ static int createSuperTable( TAOS * taos, char* dbName, SSuperTable* superTbl) { - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); char cols[COL_BUFFER_LEN] = "\0"; int colIndex; @@ -2903,6 +2912,7 @@ static int createSuperTable( if (superTbl->columnCount == 0) { errorPrint("%s() LN%d, super table column count is %d\n", __func__, __LINE__, superTbl->columnCount); + free(command); return -1; } @@ -2963,9 +2973,10 @@ static int createSuperTable( lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2974,10 +2985,11 @@ static int createSuperTable( // save for creating child table superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); if (NULL == superTbl->colsOfCreateChildTable) { + taos_close(taos); + free(command); errorPrint("%s() LN%d, Failed when calloc, size:%d", __func__, __LINE__, len+1); - taos_close(taos); - exit(-1); + exit(EXIT_FAILURE); } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); @@ -2987,6 +2999,7 @@ static int createSuperTable( if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", __func__, __LINE__, superTbl->tagCount); + free(command); return -1; } @@ -3050,9 +3063,10 @@ static int createSuperTable( lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); - exit(-1); + exit(EXIT_FAILURE); } } @@ -3067,13 +3081,16 @@ static int createSuperTable( if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint( "create supertable %s failed!\n\n", superTbl->sTblName); + free(command); return -1; } + debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + free(command); return 0; } -static int createDatabasesAndStables() { +int createDatabasesAndStables(char *command) { TAOS * taos = NULL; int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); @@ -3081,7 +3098,6 @@ static int createDatabasesAndStables() { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - char command[BUFFER_SIZE] = "\0"; for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { @@ -3093,35 +3109,43 @@ static int createDatabasesAndStables() { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + BUFFER_SIZE - dataLen, "create database if not exists %s", + g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + BUFFER_SIZE - dataLen, " blocks %d", + g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + BUFFER_SIZE - dataLen, " cache %d", + g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + BUFFER_SIZE - dataLen, " days %d", + g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + BUFFER_SIZE - dataLen, " keep %d", + g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + BUFFER_SIZE - dataLen, " quorum %d", + g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + BUFFER_SIZE - dataLen, " replica %d", + g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + BUFFER_SIZE - dataLen, " update %d", + g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { // dataLen += snprintf(command + dataLen, @@ -3129,42 +3153,48 @@ static int createDatabasesAndStables() { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + BUFFER_SIZE - dataLen, " minrows %d", + g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + BUFFER_SIZE - dataLen, " maxrows %d", + g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + BUFFER_SIZE - dataLen, " comp %d", + g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + BUFFER_SIZE - dataLen, " wal %d", + g_Dbs.db[i].dbCfg.walLevel); } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + BUFFER_SIZE - dataLen, " cachelast %d", + g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " fsync %d", g_Dbs.db[i].dbCfg.fsync); } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) #if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "ns", strlen("ns"))) + "ns", 2)) #endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { + "us", 2))) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); } if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + errorPrint( "\ncreate database %s failed!\n\n", + g_Dbs.db[i].dbName); return -1; } printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); @@ -3211,7 +3241,7 @@ static int createDatabasesAndStables() { static void* createTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("createTable"); @@ -3222,7 +3252,7 @@ static void* createTable(void *sarg) pThreadInfo->buffer = calloc(buff_len, 1); if (pThreadInfo->buffer == NULL) { errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } int len = 0; @@ -3241,11 +3271,11 @@ static void* createTable(void *sarg) g_args.tb_prefix, i, pThreadInfo->cols); } else { - if (superTblInfo == NULL) { + if (stbInfo == NULL) { + free(pThreadInfo->buffer); errorPrint("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); - free(pThreadInfo->buffer); - exit(-1); + exit(EXIT_FAILURE); } else { if (0 == len) { batchNum = 0; @@ -3253,29 +3283,35 @@ static void* createTable(void *sarg) len += snprintf(pThreadInfo->buffer + len, buff_len - len, "create table "); } + char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, i); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, i); } else { + if (0 == stbInfo->tagSampleCount) { + free(pThreadInfo->buffer); + ERROR_EXIT("use sample file for tag, but has no content!\n"); + } tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); + stbInfo, + i % stbInfo->tagSampleCount); } + if (NULL == tagsValBuf) { free(pThreadInfo->buffer); - return NULL; + ERROR_EXIT("use metric, but tag buffer is NULL\n"); } len += snprintf(pThreadInfo->buffer + len, buff_len - len, "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, + pThreadInfo->db_name, stbInfo->childTblPrefix, i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); + stbInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) + if ((batchNum < stbInfo->batchCreateTableNum) && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { + >= (stbInfo->lenOfTagOfOneRow + 256))) { continue; } } @@ -3310,14 +3346,13 @@ static void* createTable(void *sarg) static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, - char* db_name, SSuperTable* superTblInfo) { + char* db_name, SSuperTable* stbInfo) { pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); + ERROR_EXIT("createChildTable malloc failed\n"); } if (threads < 1) { @@ -3337,7 +3372,7 @@ static int startMultiThreadCreateChildTable( threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); pThreadInfo->taos = taos_connect( g_Dbs.host, @@ -3444,26 +3479,26 @@ static void createChildTables() { /* Read 10000 lines at most. If more than 10000 lines, continue to read after using */ -static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { +static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; - FILE *fp = fopen(superTblInfo->tagsFile, "r"); + FILE *fp = fopen(stbInfo->tagsFile, "r"); if (fp == NULL) { printf("Failed to open tags file: %s, reason:%s\n", - superTblInfo->tagsFile, strerror(errno)); + stbInfo->tagsFile, strerror(errno)); return -1; } - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; + if (stbInfo->tagDataBuf) { + free(stbInfo->tagDataBuf); + stbInfo->tagDataBuf = NULL; } int tagCount = 10000; int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); if (tagDataBuf == NULL) { printf("Failed to calloc, reason:%s\n", strerror(errno)); fclose(fp); @@ -3479,20 +3514,20 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { continue; } - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); count++; if (count >= tagCount - 1) { char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow); if (tmp != NULL) { tagDataBuf = tmp; tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow)); } else { // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile); tmfree(tagDataBuf); free(line); fclose(fp); @@ -3501,8 +3536,8 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { } } - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; + stbInfo->tagDataBuf = tagDataBuf; + stbInfo->tagSampleCount = count; free(line); fclose(fp); @@ -3513,28 +3548,28 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { Read 10000 lines at most. If more than 10000 lines, continue to read after using */ static int readSampleFromCsvFileToMem( - SSuperTable* superTblInfo) { + SSuperTable* stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; int getRows = 0; - FILE* fp = fopen(superTblInfo->sampleFile, "r"); + FILE* fp = fopen(stbInfo->sampleFile, "r"); if (fp == NULL) { errorPrint( "Failed to open sample file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); return -1; } - assert(superTblInfo->sampleDataBuf); - memset(superTblInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); + assert(stbInfo->sampleDataBuf); + memset(stbInfo->sampleDataBuf, 0, + MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { errorPrint( "Failed to fseek file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); fclose(fp); return -1; } @@ -3549,13 +3584,13 @@ static int readSampleFromCsvFileToMem( continue; } - if (readLen > superTblInfo->lenOfOneRow) { + if (readLen > stbInfo->lenOfOneRow) { printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", - (int32_t)readLen, superTblInfo->lenOfOneRow); + (int32_t)readLen, stbInfo->lenOfOneRow); continue; } - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, + memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, line, readLen); getRows++; @@ -5040,6 +5075,23 @@ static void postFreeResource() { free(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } +#if STMT_IFACE_ENABLED == 1 + if (g_Dbs.db[i].superTbls[j].sampleBindArray) { + for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( + g_Dbs.db[i].superTbls[j].sampleBindArray + + sizeof(uintptr_t *) * k)); + for (int c = 1; c < g_Dbs.db[i].superTbls[j].columnCount + 1; c++) { + TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); + if (bind) + tmfree(bind->buffer); + } + tmfree((char *)tmp); + } + } + tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray); +#endif + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { free(g_Dbs.db[i].superTbls[j].tagDataBuf); g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; @@ -5060,21 +5112,14 @@ static void postFreeResource() { tmfree(g_randfloat_buff); tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); - tmfree(g_randdouble_buff); + } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int64_t* sampleUsePos) + SSuperTable* stbInfo, int64_t* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { - /* int ret = readSampleFromCsvFileToMem(superTblInfo); - if (0 != ret) { - tmfree(superTblInfo->sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; - } - */ *sampleUsePos = 0; } @@ -5084,8 +5129,8 @@ static int getRowDataFromSample( "(%" PRId64 ", ", timestamp); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", - superTblInfo->sampleDataBuf - + superTblInfo->lenOfOneRow * (*sampleUsePos)); + stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * (*sampleUsePos)); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; @@ -5102,21 +5147,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5130,19 +5181,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5152,27 +5204,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5183,7 +5241,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } @@ -5228,7 +5286,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5238,7 +5296,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5246,8 +5304,7 @@ static int64_t generateData(char *recBuf, char **data_type, } if (strlen(recBuf) > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); + ERROR_EXIT("column length too long, abort"); } } @@ -5258,27 +5315,27 @@ static int64_t generateData(char *recBuf, char **data_type, return (int32_t)strlen(recBuf); } -static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { +static int prepareSampleDataForSTable(SSuperTable *stbInfo) { char* sampleDataBuf = NULL; sampleDataBuf = calloc( - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } - superTblInfo->sampleDataBuf = sampleDataBuf; - int ret = readSampleFromCsvFileToMem(superTblInfo); + stbInfo->sampleDataBuf = sampleDataBuf; + int ret = readSampleFromCsvFileToMem(stbInfo); if (0 != ret) { errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; + stbInfo->sampleDataBuf = NULL; return -1; } @@ -5288,14 +5345,14 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { int32_t affectedRows; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); uint16_t iface; - if (superTblInfo) - iface = superTblInfo->iface; + if (stbInfo) + iface = stbInfo->iface; else { if (g_args.iface == INTERFACE_BUT) iface = TAOSC_IFACE; @@ -5335,7 +5392,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); - exit(-1); + exit(EXIT_FAILURE); } affectedRows = k; break; @@ -5343,7 +5400,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) default: errorPrint("%s() LN%d: unknown insert mode: %d\n", - __func__, __LINE__, superTblInfo->iface); + __func__, __LINE__, stbInfo->iface); affectedRows = 0; } @@ -5353,24 +5410,24 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) { - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - if (superTblInfo) { - if (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable) { - if (superTblInfo->childTblLimit > 0) { + SSuperTable* stbInfo = pThreadInfo->stbInfo; + if (stbInfo) { + if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { + if (stbInfo->childTblLimit > 0) { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + - (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + + (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", - superTblInfo->childTblPrefix, tableSeq); + stbInfo->childTblPrefix, tableSeq); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", @@ -5451,7 +5508,7 @@ static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, } static int32_t generateStbDataTail( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, uint32_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, uint64_t recordFrom, int64_t startTime, @@ -5461,7 +5518,7 @@ static int32_t generateStbDataTail( char *pstr = buffer; bool tsRand; - if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { tsRand = true; } else { tsRand = false; @@ -5476,26 +5533,26 @@ static int32_t generateStbDataTail( int64_t lenOfRow = 0; if (tsRand) { - if (superTblInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData(superTblInfo, data, + if (stbInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange) + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange) ); } else { - lenOfRow = generateStbRowData(superTblInfo, data, + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, - startTime + superTblInfo->timeStampStep * k + startTime + stbInfo->timeStampStep * k ); } } else { lenOfRow = getRowDataFromSample( data, (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, + startTime + stbInfo->timeStampStep * k, + stbInfo, pSamplePos); } @@ -5551,7 +5608,7 @@ static int generateSQLHeadWithoutStb(char *tableName, } static int generateStbSQLHead( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, int remainderBufLen) @@ -5560,14 +5617,14 @@ static int generateStbSQLHead( char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { tagsValBuf = getTagValueFromTagSample( - superTblInfo, - tableSeq % superTblInfo->tagSampleCount); + stbInfo, + tableSeq % stbInfo->tagSampleCount); } if (NULL == tagsValBuf) { errorPrint("%s() LN%d, tag buf failed to allocate memory\n", @@ -5582,10 +5639,10 @@ static int generateStbSQLHead( dbName, tableName, dbName, - superTblInfo->sTblName, + stbInfo->sTblName, tagsValBuf); tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { len = snprintf( headBuf, HEAD_BUFF_LEN, @@ -5610,12 +5667,12 @@ static int generateStbSQLHead( } static int32_t generateStbInterlaceData( - SSuperTable *superTblInfo, + threadInfo *pThreadInfo, char *tableName, uint32_t batchPerTbl, uint64_t i, uint32_t batchPerTblTimes, uint64_t tableSeq, - threadInfo *pThreadInfo, char *buffer, + char *buffer, int64_t insertRows, int64_t startTime, uint64_t *pRemainderBufLen) @@ -5623,8 +5680,9 @@ static int32_t generateStbInterlaceData( assert(buffer); char *pstr = buffer; + SSuperTable *stbInfo = pThreadInfo->stbInfo; int headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, pThreadInfo->db_name, pstr, *pRemainderBufLen); @@ -5644,12 +5702,12 @@ static int32_t generateStbInterlaceData( pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { startTime = taosGetTimestamp(pThreadInfo->time_precision); } int32_t k = generateStbDataTail( - superTblInfo, + stbInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); @@ -5712,8 +5770,215 @@ static int64_t generateInterlaceDataWithoutStb( } #if STMT_IFACE_ENABLED == 1 -static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, - char *dataType, int32_t dataLen, char **ptr, char *value) +static int32_t prepareStmtBindArrayByType( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char *value) +{ + if (0 == strncasecmp(dataType, + "BINARY", strlen("BINARY"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "NCHAR", strlen("NCHAR"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } + + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "INT", strlen("INT"))) { + int32_t *bind_int = malloc(sizeof(int32_t)); + assert(bind_int); + + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BIGINT", strlen("BIGINT"))) { + int64_t *bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); + + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "FLOAT", strlen("FLOAT"))) { + float *bind_float = malloc(sizeof(float)); + assert(bind_float); + + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "DOUBLE", strlen("DOUBLE"))) { + double *bind_double = malloc(sizeof(double)); + assert(bind_double); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "SMALLINT", strlen("SMALLINT"))) { + int16_t *bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "TINYINT", strlen("TINYINT"))) { + int8_t *bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BOOL", strlen("BOOL"))) { + int8_t *bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + } else if (0 == strncasecmp(dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + int64_t *bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + free(bind_ts2); + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } + } else { + *bind_ts2 = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + errorPrint( "No support data type: %s\n", dataType); + return -1; + } + + return 0; +} + +static int32_t prepareStmtBindArrayByTypeForRand( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char **ptr, + char *value) { if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { @@ -5794,7 +6059,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { - float *bind_float = (float *) *ptr; + float *bind_float = (float *)*ptr; if (value) { *bind_float = (float)atof(value); @@ -5810,7 +6075,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = (double *)*ptr; + double *bind_double = (double *)*ptr; if (value) { *bind_double = atof(value); @@ -5842,7 +6107,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = (int8_t *)*ptr; + int8_t *bind_tinyint = (int8_t *)*ptr; if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5854,12 +6119,21 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, bind->buffer = bind_tinyint; bind->length = &bind->buffer_length; bind->is_null = NULL; + *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = (int8_t *)*ptr; + int8_t *bind_bool = (int8_t *)*ptr; - *bind_bool = rand_bool(); + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } bind->buffer_type = TSDB_DATA_TYPE_BOOL; bind->buffer_length = sizeof(int8_t); bind->buffer = bind_bool; @@ -5869,10 +6143,28 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = (int64_t *) *ptr; + int64_t *bind_ts2 = (int64_t *)*ptr; if (value) { - *bind_ts2 = atoll(value); + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } } else { *bind_ts2 = rand_bigint(); } @@ -5892,13 +6184,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, } static int32_t prepareStmtWithoutStb( - TAOS_STMT *stmt, + threadInfo *pThreadInfo, char *tableName, uint32_t batch, int64_t insertRows, int64_t recordFrom, int64_t startTime) { + TAOS_STMT *stmt = pThreadInfo->stmt; int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", @@ -5918,15 +6211,11 @@ static int32_t prepareStmtWithoutStb( int32_t k = 0; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - char *ptr = data; TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - int64_t *bind_ts; + int64_t *bind_ts = pThreadInfo->bind_ts; - bind_ts = (int64_t *)ptr; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; if (g_args.disorderRatio) { @@ -5942,8 +6231,6 @@ static int32_t prepareStmtWithoutStb( bind->length = &bind->buffer_length; bind->is_null = NULL; - ptr += bind->buffer_length; - for (int i = 0; i < g_args.num_of_CPR; i ++) { bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); @@ -5951,7 +6238,8 @@ static int32_t prepareStmtWithoutStb( bind, data_type[i], g_args.len_of_binary, - &ptr, NULL)) { + pThreadInfo->time_precision, + NULL)) { return -1; } } @@ -5978,15 +6266,47 @@ static int32_t prepareStmtWithoutStb( return k; } -static int32_t prepareStbStmtBind( - char *bindArray, SSuperTable *stbInfo, bool sourceRand, - int64_t startTime, int32_t recSeq, - bool isColumn) +static int32_t prepareStbStmtBindTag( + char *bindArray, SSuperTable *stbInfo, + char *tagsVal, + int32_t timePrec) { - char *bindBuffer = calloc(1, g_args.len_of_binary); + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); + return -1; + } + + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].dataType, + stbInfo->tags[t].dataLen, + timePrec, + NULL)) { + free(bindBuffer); + return -1; + } + } + + free(bindBuffer); + return 0; +} + +static int32_t prepareStbStmtBindRand( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq, + int32_t timePrec) +{ + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -5996,121 +6316,92 @@ static int32_t prepareStbStmtBind( TAOS_BIND *bind; - if (isColumn) { - int cursor = 0; + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); - for (int i = 0; i < stbInfo->columnCount + 1; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + if (i == 0) { + int64_t *bind_ts = ts; - if (i == 0) { - int64_t *bind_ts; - - bind_ts = (int64_t *)ptr; - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - ptr += bind->buffer_length; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); } else { - - if (sourceRand) { - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } - } else { - char *restStr = stbInfo->sampleDataBuf + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - memset(bindBuffer, 0, g_args.len_of_binary); - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - bindBuffer)) { - free(bindBuffer); - return -1; - } - } + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; } - } - } else { - TAOS_BIND *tag; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - for (int t = 0; t < stbInfo->tagCount; t ++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if ( -1 == prepareStmtBindArrayByType( - tag, - stbInfo->tags[t].dataType, - stbInfo->tags[t].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } + ptr += bind->buffer_length; + } else if ( -1 == prepareStmtBindArrayByTypeForRand( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + timePrec, + &ptr, + NULL)) { + tmfree(bindBuffer); + return -1; } - } - free(bindBuffer); + tmfree(bindBuffer); return 0; } -static int32_t prepareStbStmt( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtBindWithSample( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq, + int32_t timePrec, + int64_t samplePos) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static int32_t prepareStbStmtRand( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, uint64_t insertRows, uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) + int64_t startTime) { int ret; - - bool sourceRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - sourceRand = true; - } else { - sourceRand = false; // from sample data file - } + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - bool tagRand; if (0 == stbInfo->tagSource) { - tagRand = true; tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { - tagRand = false; tagsValBuf = getTagValueFromTagSample( stbInfo, tableSeq % stbInfo->tagSampleCount); @@ -6130,8 +6421,9 @@ static int32_t prepareStbStmt( return -1; } - if (-1 == prepareStbStmtBind( - tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) { + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { tmfree(tagsValBuf); tmfree(tagsArray); return -1; @@ -6166,8 +6458,12 @@ static int32_t prepareStbStmt( uint32_t k; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand, - startTime, k, true /* is column */)) { + if (-1 == prepareStbStmtBindRand( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision + /* is column */)) { free(bindArray); return -1; } @@ -6190,10 +6486,6 @@ static int32_t prepareStbStmt( k++; recordFrom ++; - if (!sourceRand) { - (*pSamplePos) ++; - } - if (recordFrom >= insertRows) { break; } @@ -6203,9 +6495,8 @@ static int32_t prepareStbStmt( return k; } -static int32_t prepareStbStmtInterlace( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtWithSample( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, @@ -6214,41 +6505,109 @@ static int32_t prepareStbStmtInterlace( int64_t startTime, int64_t *pSamplePos) { - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - batch, - insertRows, 0, startTime, - pSamplePos); -} + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; -static int32_t prepareStbStmtProgressive( - SSuperTable *stbInfo, - TAOS_STMT *stmt, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, recordFrom, startTime, - pSamplePos); -} + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (stbInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindWithSample( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision, + *pSamplePos + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} #endif static int32_t generateStbProgressiveData( - SSuperTable *superTblInfo, + SSuperTable *stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, @@ -6262,7 +6621,7 @@ static int32_t generateStbProgressiveData( memset(pstr, 0, *pRemainderBufLen); int64_t headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, dbName, buffer, *pRemainderBufLen); @@ -6274,7 +6633,7 @@ static int32_t generateStbProgressiveData( int64_t dataLen; - return generateStbDataTail(superTblInfo, + return generateStbDataTail(stbInfo, g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, startTime, @@ -6334,26 +6693,34 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t nTimeStampStep; uint64_t insert_interval; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + bool sourceRand; - if (superTblInfo) { - insertRows = superTblInfo->insertRows; + SSuperTable* stbInfo = pThreadInfo->stbInfo; - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + insertRows = stbInfo->insertRows; + + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; + } + maxSqlLen = stbInfo->maxSqlLen; + nTimeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file } - maxSqlLen = superTblInfo->maxSqlLen; - nTimeStampStep = superTblInfo->timeStampStep; - insert_interval = superTblInfo->insertInterval; } else { insertRows = g_args.num_of_DPT; interlaceRows = g_args.interlace_rows; maxSqlLen = g_args.max_sql_len; nTimeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; + sourceRand = true; } debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", @@ -6401,6 +6768,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; uint64_t sleepTimeTotal = 0; + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampMs(); @@ -6433,28 +6803,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint64_t oldRemainderLen = remainderBufLen; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtInterlace( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - batchPerTbl, - insertRows, i, - startTime, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbInterlaceData( - superTblInfo, + pThreadInfo, tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, - pThreadInfo, pstr, + pstr, insertRows, startTime, &remainderBufLen); @@ -6467,7 +6847,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { tableName, batchPerTbl, startTime); #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, tableName, + pThreadInfo, + tableName, batchPerTbl, insertRows, i, startTime); @@ -6577,6 +6958,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalAffectedRows += affectedRows; + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", @@ -6598,6 +6984,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } } } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); free_of_interlace: tmfree(pThreadInfo->buffer); @@ -6609,12 +6997,12 @@ free_of_interlace: static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len; int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step; + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; int64_t insertRows = - (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); @@ -6633,8 +7021,22 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; + bool sourceRand; + if (stbInfo) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file + } + } else { + sourceRand = true; + } + pThreadInfo->samplePos = 0; + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + for (uint64_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { @@ -6663,24 +7065,35 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { remainderBufLen -= len; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtProgressive( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, + i, start_time + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbProgressiveData( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, pstr, + stbInfo, + tableName, tableSeq, + pThreadInfo->db_name, pstr, insertRows, i, start_time, &(pThreadInfo->samplePos), &remainderBufLen); @@ -6689,7 +7102,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (g_args.iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, + pThreadInfo, tableName, g_args.num_of_RPR, insertRows, i, @@ -6740,6 +7153,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalAffectedRows += affectedRows; + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", @@ -6754,14 +7172,16 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } // num_of_DPT if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) && (0 == strncasecmp( - superTblInfo->dataSource, + stbInfo->dataSource, "sample", strlen("sample")))) { verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); } } // tableSeq + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); free_of_progressive: tmfree(pThreadInfo->buffer); @@ -6772,18 +7192,18 @@ free_of_progressive: static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("syncWrite"); uint32_t interlaceRows; - if (superTblInfo) { - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; } } else { interlaceRows = g_args.interlace_rows; @@ -6800,10 +7220,10 @@ static void* syncWrite(void *sarg) { static void callBack(void *param, TAOS_RES *res, int code) { threadInfo* pThreadInfo = (threadInfo*)param; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->et = taosGetTimestampMs(); if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { @@ -6811,13 +7231,13 @@ static void callBack(void *param, TAOS_RES *res, int code) { } } - char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); + char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { pThreadInfo->start_table_from++; pThreadInfo->counter = 0; @@ -6831,15 +7251,15 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->superTblInfo->disorderRatio - && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + if (0 != pThreadInfo->stbInfo->disorderRatio + && rand_num < pThreadInfo->stbInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->superTblInfo, data, + - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, d); } else { - generateStbRowData(pThreadInfo->superTblInfo, + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, pThreadInfo->lastTs += 1000); @@ -6847,7 +7267,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { pstr += sprintf(pstr, "%s", data); pThreadInfo->counter++; - if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { break; } } @@ -6863,7 +7283,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { static void *asyncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("asyncWrite"); @@ -6872,7 +7292,7 @@ static void *asyncWrite(void *sarg) { pThreadInfo->lastTs = pThreadInfo->start_time; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->st = taosGetTimestampMs(); } @@ -6909,8 +7329,81 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } +#if STMT_IFACE_ENABLED == 1 +static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) +{ + stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + if (stbInfo->sampleBindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + return -1; + } + + + for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); + return -1; + } + + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < stbInfo->columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char *restStr = stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, DOUBLE_BUFF_LEN); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[c-1].dataType, + stbInfo->columns[c-1].dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(stbInfo->sampleBindArray + (sizeof(char *)) * i)) = (uintptr_t)bindArray; + } + + return 0; +} +#endif + static void startMultiThreadInsertData(int threads, char* db_name, - char* precision, SSuperTable* superTblInfo) { + char* precision, SSuperTable* stbInfo) { int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { @@ -6924,19 +7417,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, #endif } else { errorPrint("Not support precision: %s\n", precision); - exit(-1); + exit(EXIT_FAILURE); } } int64_t start_time; - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (stbInfo) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { start_time = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, + stbInfo->startTimestamp, &start_time, - strlen(superTblInfo->startTimestamp), + strlen(stbInfo->startTimestamp), timePrec, 0)) { ERROR_EXIT("failed to parse time!\n"); } @@ -6950,12 +7443,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t start = taosGetTimestampMs(); // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { + if (0 != prepareSampleDataForSTable(stbInfo)) { errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -6965,68 +7458,68 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (NULL == taos0) { errorPrint("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = 0; uint64_t tableFrom; - if (superTblInfo) { + if (stbInfo) { int64_t limit; uint64_t offset; if ((NULL != g_args.sqlFile) - && (superTblInfo->childTblExists == TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset != 0) - || (superTblInfo->childTblLimit >= 0))) { + && (stbInfo->childTblExists == TBL_NO_EXISTS) + && ((stbInfo->childTblOffset != 0) + || (stbInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } - if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset - + superTblInfo->childTblLimit) - > (superTblInfo->childTblCount))) { - superTblInfo->childTblLimit = - superTblInfo->childTblCount - superTblInfo->childTblOffset; + if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { + if ((stbInfo->childTblLimit < 0) + || ((stbInfo->childTblOffset + + stbInfo->childTblLimit) + > (stbInfo->childTblCount))) { + stbInfo->childTblLimit = + stbInfo->childTblCount - stbInfo->childTblOffset; } - offset = superTblInfo->childTblOffset; - limit = superTblInfo->childTblLimit; + offset = stbInfo->childTblOffset; + limit = stbInfo->childTblLimit; } else { - limit = superTblInfo->childTblCount; + limit = stbInfo->childTblCount; offset = 0; } ntables = limit; tableFrom = offset; - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) - > superTblInfo->childTblCount)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && ((stbInfo->childTblOffset + stbInfo->childTblLimit) + > stbInfo->childTblCount)) { printf("WARNING: specified offset + limit > child table count!\n"); prompt(); } - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && (0 == superTblInfo->childTblLimit)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && (0 == stbInfo->childTblLimit)) { printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); prompt(); } - superTblInfo->childTblName = (char*)calloc(1, + stbInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); - if (superTblInfo->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + if (stbInfo->childTblName == NULL) { taos_close(taos0); - exit(-1); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + exit(EXIT_FAILURE); } int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos0, - db_name, superTblInfo->sTblName, - &superTblInfo->childTblName, &childTblCount, + db_name, stbInfo->sTblName, + &stbInfo->childTblName, &childTblCount, limit, offset); } else { @@ -7047,11 +7540,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, b = ntables % threads; } - if ((superTblInfo) - && (superTblInfo->iface == REST_IFACE)) { + if ((stbInfo) + && (stbInfo->iface == REST_IFACE)) { if (convertHostToServAddr( g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { - exit(-1); + ERROR_EXIT("convert host to server address"); } } @@ -7064,94 +7557,110 @@ static void startMultiThreadInsertData(int threads, char* db_name, memset(pids, 0, threads * sizeof(pthread_t)); memset(infos, 0, threads * sizeof(threadInfo)); +#if STMT_IFACE_ENABLED == 1 + char *stmtBuffer = calloc(1, BUFFER_SIZE); + assert(stmtBuffer); + if ((g_args.iface == STMT_IFACE) + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { + char *pstr = stmtBuffer; + + if ((stbInfo) + && (AUTO_CREATE_SUBTBL + == stbInfo->autoCreateTable)) { + pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", + stbInfo->sTblName); + for (int tag = 0; tag < (stbInfo->tagCount - 1); + tag ++ ) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ") VALUES(?"); + } else { + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); + } + + int columnCount; + if (stbInfo) { + columnCount = stbInfo->columnCount; + } else { + columnCount = g_args.num_of_CPR; + } + + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); + + debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); + + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, + "sample", strlen("sample")))) { + parseSampleFileToStmt(stbInfo, timePrec); + } + } +#endif + for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; pThreadInfo->start_time = start_time; pThreadInfo->minDelay = UINT64_MAX; - if ((NULL == superTblInfo) || - (superTblInfo->iface != REST_IFACE)) { + if ((NULL == stbInfo) || + (stbInfo->iface != REST_IFACE)) { //t_info->taos = taos; pThreadInfo->taos = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { + free(infos); errorPrint( "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - free(infos); - exit(-1); + exit(EXIT_FAILURE); } #if STMT_IFACE_ENABLED == 1 if ((g_args.iface == STMT_IFACE) - || ((superTblInfo) - && (superTblInfo->iface == STMT_IFACE))) { + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { - int columnCount; - if (superTblInfo) { - columnCount = superTblInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); if (NULL == pThreadInfo->stmt) { + free(pids); + free(infos); errorPrint( "%s() LN%d, failed init stmt, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); + if (ret != 0){ free(pids); free(infos); - exit(-1); - } - - char buffer[BUFFER_SIZE]; - char *pstr = buffer; - - if ((superTblInfo) - && (AUTO_CREATE_SUBTBL - == superTblInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - superTblInfo->sTblName); - for (int tag = 0; tag < (superTblInfo->tagCount - 1); - tag ++ ) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - for (int col = 0; col < columnCount; col ++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, buffer: %s", __func__, __LINE__, buffer); - int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); - if (ret != 0){ + free(stmtBuffer); errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", ret, taos_stmt_errstr(pThreadInfo->stmt)); - free(pids); - free(infos); - exit(-1); + exit(EXIT_FAILURE); } + pThreadInfo->bind_ts = malloc(sizeof(int64_t)); } #endif } else { pThreadInfo->taos = NULL; } - /* if ((NULL == superTblInfo) - || (0 == superTblInfo->multiThreadWriteOneTbl)) { + /* if ((NULL == stbInfo) + || (0 == stbInfo->multiThreadWriteOneTbl)) { */ pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to + 1; /* } else { pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = superTblInfo->childTblCount; + pThreadInfo->ntables = stbInfo->childTblCount; pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); } */ @@ -7171,6 +7680,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } +#if STMT_IFACE_ENABLED == 1 + free(stmtBuffer); +#endif + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -7184,11 +7697,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; - tsem_destroy(&(pThreadInfo->lock_sem)); - #if STMT_IFACE_ENABLED == 1 if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); + tmfree((char *)pThreadInfo->bind_ts); } #endif tsem_destroy(&(pThreadInfo->lock_sem)); @@ -7198,9 +7710,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, __func__, __LINE__, pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); - if (superTblInfo) { - superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows; + if (stbInfo) { + stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; + stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; } else { g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; g_args.totalInsertRows += pThreadInfo->totalInsertRows; @@ -7221,22 +7733,22 @@ static void startMultiThreadInsertData(int threads, char* db_name, double tInMs = t/1000.0; - if (superTblInfo) { + if (stbInfo) { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); } } else { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", @@ -7278,18 +7790,21 @@ static void *readTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readTable"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + uint64_t sTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } int64_t num_of_DPT; - /* if (pThreadInfo->superTblInfo) { - num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table; + /* if (pThreadInfo->stbInfo) { + num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; } else { */ num_of_DPT = g_args.num_of_DPT; @@ -7322,6 +7837,7 @@ static void *readTable(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } @@ -7342,6 +7858,7 @@ static void *readTable(void *sarg) { } fprintf(fp, "\n"); fclose(fp); + free(command); #endif return NULL; } @@ -7351,14 +7868,17 @@ static void *readMetric(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readMetric"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } - int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows; + int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows; int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; int64_t totalData = num_of_DPT * num_of_tables; bool do_aggreFunc = g_Dbs.do_aggreFunc; @@ -7399,6 +7919,7 @@ static void *readMetric(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } int count = 0; @@ -7416,6 +7937,7 @@ static void *readMetric(void *sarg) { fprintf(fp, "\n"); } fclose(fp); + free(command); #endif return NULL; } @@ -7452,11 +7974,16 @@ static int insertTestProcess() { init_rand_data(); // create database and super tables - if(createDatabasesAndStables() != 0) { + char *cmdBuffer = calloc(1, BUFFER_SIZE); + assert(cmdBuffer); + + if(createDatabasesAndStables(cmdBuffer) != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); + free(cmdBuffer); return -1; } + free(cmdBuffer); // pretreatement if (prepareSampleData() != 0) { @@ -7490,14 +8017,14 @@ static int insertTestProcess() { if (g_Dbs.db[i].superTblCount > 0) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j]; - if (superTblInfo && (superTblInfo->insertRows > 0)) { + if (stbInfo && (stbInfo->insertRows > 0)) { startMultiThreadInsertData( g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, - superTblInfo); + stbInfo); } } } @@ -7625,7 +8152,9 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { } static void *superTableQuery(void *sarg) { - char sqlstr[BUFFER_SIZE]; + char *sqlstr = calloc(1, BUFFER_SIZE); + assert(sqlstr); + threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7640,6 +8169,7 @@ static void *superTableQuery(void *sarg) { if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(sqlstr); return NULL; } else { pThreadInfo->taos = taos; @@ -7664,7 +8194,7 @@ static void *superTableQuery(void *sarg) { st = taosGetTimestampMs(); for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr,0,sizeof(sqlstr)); + memset(sqlstr, 0, BUFFER_SIZE); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", @@ -7695,6 +8225,7 @@ static void *superTableQuery(void *sarg) { (double)(et - st)/1000.0); } + free(sqlstr); return NULL; } @@ -7713,7 +8244,7 @@ static int queryTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -7733,7 +8264,7 @@ static int queryTestProcess() { if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { if (convertHostToServAddr( g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) - exit(-1); + ERROR_EXIT("convert host to server address"); } pthread_t *pids = NULL; @@ -7928,16 +8459,19 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[BUFFER_SIZE]; + char *subSqlStr = calloc(1, BUFFER_SIZE); + assert(subSqlStr); + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; setThreadName("superSub"); if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + free(subSqlStr); errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - exit(-1); + exit(EXIT_FAILURE); } if (pThreadInfo->taos == NULL) { @@ -7949,6 +8483,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(subSqlStr); return NULL; } } @@ -7959,6 +8494,7 @@ static void *superSubscribe(void *sarg) { taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); + free(subSqlStr); return NULL; } @@ -7973,25 +8509,26 @@ static void *superSubscribe(void *sarg) { pThreadInfo->end_table_to, i); sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", i, pThreadInfo->querySeq); - memset(subSqlstr, 0, sizeof(subSqlstr)); + memset(subSqlStr, 0, BUFFER_SIZE); replaceChildTblName( g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], - subSqlstr, i); + subSqlStr, i); if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); } - verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n", - __func__, __LINE__, pThreadInfo->threadID, subSqlstr); + verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlStr); tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8048,12 +8585,13 @@ static void *superSubscribe(void *sarg) { consumed[tsubSeq]= 0; tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval ); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8073,6 +8611,7 @@ static void *superSubscribe(void *sarg) { } taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } @@ -8198,7 +8737,7 @@ static int subscribeTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -8227,7 +8766,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); - exit(-1); + exit(EXIT_FAILURE); } pids = calloc( @@ -8242,7 +8781,7 @@ static int subscribeTestProcess() { sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { @@ -8279,7 +8818,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); // taos_close(taos); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; @@ -8375,9 +8914,7 @@ static void setParaFromArg() { tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); } - if (g_args.password) { - tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); - } + tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); if (g_args.port) { g_Dbs.port = g_args.port; @@ -8484,8 +9021,7 @@ static int regexMatch(const char *s, const char *reg, int cflags) { /* Compile regular expression */ if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); + ERROR_EXIT("Fail to compile regex\n"); } /* Execute regular expression */ @@ -8498,9 +9034,9 @@ static int regexMatch(const char *s, const char *reg, int cflags) { return 0; } else { regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); regfree(®ex); - exit(-1); + printf("Regex match failed: %s\n", msgbuf); + exit(EXIT_FAILURE); } return 0; @@ -8602,7 +9138,7 @@ static void queryResult() { if (g_args.use_metric) { pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; tstrncpy(pThreadInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); } else { @@ -8618,10 +9154,10 @@ static void queryResult() { g_Dbs.db[0].dbName, g_Dbs.port); if (pThreadInfo->taos == NULL) { + free(pThreadInfo); errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - free(pThreadInfo); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index bea6e65106..c54b8da1b7 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -206,9 +206,9 @@ static struct argp_option options[] = { {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, #ifdef _TD_POWER_ - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0}, #else - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0}, #endif {"port", 'P', "PORT", 0, "Port to connect", 0}, {"cversion", 'v', "CVERION", 0, "client version", 0}, @@ -248,12 +248,14 @@ static struct argp_option options[] = { {0} }; +#define MAX_PASSWORD_SIZE 20 + /* Used by main to communicate with parse_opt. */ typedef struct arguments { // connection option char *host; char *user; - char *password; + char password[MAX_PASSWORD_SIZE]; uint16_t port; char cversion[12]; uint16_t mysqlFlag; @@ -376,7 +378,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.user = arg; break; case 'p': - g_args.password = arg; break; case 'P': g_args.port = atoi(arg); @@ -554,6 +555,25 @@ static void parse_precision_first( } } +static void parse_password( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + taosSetConsoleEcho(false); + if(scanf("%20s", arguments->password) > 1) { + errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); + } + taosSetConsoleEcho(true); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + argv[i] = ""; + } + } +} + static void parse_timestamp( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -616,9 +636,10 @@ int main(int argc, char *argv[]) { int ret = 0; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - if (argc > 2) { + if (argc > 1) { parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); + parse_password(argc, argv, &g_args); } argp_parse(&argp, argc, argv, 0, 0, &g_args); diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..ddb9e660af 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -18,6 +18,7 @@ #include #include + #if defined(WINDOWS) int main(int argc, char *argv[]) { printf("welcome to use taospack tools v1.3 for windows.\n"); @@ -148,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){ //printf(" buff=%s float=%.50f \n ", buf, floats[fi]); if ( ++fi == malloc_cnt ) { malloc_cnt += 100000; - floats = realloc(floats, malloc_cnt*sizeof(float)); + float* floats1 = realloc(floats, malloc_cnt*sizeof(float)); + if(floats1 == NULL) + break; + floats = floats1; } memset(buf, 0, sizeof(buf)); } @@ -601,7 +605,6 @@ void test_threadsafe_double(int thread_count){ } - void unitTestFloat() { float ft1 [] = {1.11, 2.22, 3.333}; @@ -662,7 +665,50 @@ void unitTestFloat() { free(ft2); free(buff); free(output); - +} + +void leakFloat() { + + int cnt = sizeof(g_ft1)/sizeof(float); + float* floats = g_ft1; + int algorithm = 2; + + // compress + const char* input = (const char*)floats; + int input_len = cnt * sizeof(float); + int output_len = input_len + 1024; + char* output = (char*) malloc(output_len); + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + int ret_len = 0; + ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == 0) { + printf(" compress float error.\n"); + free(buff); + free(output); + return ; + } + + float* ft2 = (float*)malloc(input_len); + ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + if(ret_len == 0) { + printf(" decompress float error.\n"); + } + + free(ft2); + free(buff); + free(output); +} + + +void leakTest(){ + for(int i=0; i< 90000000000000; i++){ + if(i%10000==0) + printf(" ---------- %d ---------------- \n", i); + leakFloat(); + } } #define DB_CNT 500 @@ -689,7 +735,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.3\n"); + printf("welcome to use taospack tools v1.6\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); @@ -753,6 +799,9 @@ int main(int argc, char *argv[]) { if(strcmp(argv[1], "-mem") == 0) { memTest(); } + else if(strcmp(argv[1], "-leak") == 0) { + leakTest(); + } } else{ unitTestFloat(); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 0bc114ffdf..a4ecf5d6f3 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -2921,10 +2921,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray (*totalMallocLen) *= 2; } - pMultiMeta = realloc(pMultiMeta, *totalMallocLen); - if (pMultiMeta == NULL) { + SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen); + if (pMultiMeta1 == NULL) { return NULL; } + pMultiMeta = pMultiMeta1; } return pMultiMeta; diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h index e7a3ec13ae..4b79250740 100644 --- a/src/os/inc/osSystem.h +++ b/src/os/inc/osSystem.h @@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename); void* taosLoadSym(void* handle, char* name); void taosCloseDll(void *handle); +int taosSetConsoleEcho(bool on); + #ifdef __cplusplus } #endif diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c index 17cafdd664..6f296c9fef 100644 --- a/src/os/src/darwin/darwinSystem.c +++ b/src/os/src/darwin/darwinSystem.c @@ -29,4 +29,30 @@ void* taosLoadSym(void* handle, char* name) { void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ +#if 0 +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + +#endif + return 0; +} diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index d8194feab4..22954f1523 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) { void * tptr = (void *)((char *)ptr - sizeof(size_t)); size_t tsize = size + sizeof(size_t); - tptr = realloc(tptr, tsize); - if (tptr == NULL) return NULL; + void* tptr1 = realloc(tptr, tsize); + if (tptr1 == NULL) return NULL; + tptr = tptr1; *(size_t *)tptr = size; diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index 052b7a22a8..0cdb20dbdb 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -51,4 +51,28 @@ void taosCloseDll(void *handle) { } } +int taosSetConsoleEcho(bool on) +{ +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + + return 0; +} diff --git a/src/os/src/windows/wGetline.c b/src/os/src/windows/wGetline.c index 553aecaf0a..aa45854884 100644 --- a/src/os/src/windows/wGetline.c +++ b/src/os/src/windows/wGetline.c @@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t *n += MIN_CHUNK; nchars_avail = (int32_t)(*n + *lineptr - read_pos); - *lineptr = realloc(*lineptr, *n); - if (!*lineptr) { + char* lineptr1 = realloc(*lineptr, *n); + if (!lineptr1) { errno = ENOMEM; return -1; } + *lineptr = lineptr1; + read_pos = *n - nchars_avail + *lineptr; assert((*lineptr + *n) == (read_pos + nchars_avail)); } diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c index 17cafdd664..564005f79b 100644 --- a/src/os/src/windows/wSystem.c +++ b/src/os/src/windows/wSystem.c @@ -30,3 +30,17 @@ void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ + HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); + DWORD mode = 0; + GetConsoleMode(hStdin, &mode ); + if (on) { + mode |= ENABLE_ECHO_INPUT; + } else { + mode &= ~ENABLE_ECHO_INPUT; + } + SetConsoleMode(hStdin, mode); + + return 0; +} diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index ba88a2b9cd..02f21037b8 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) { } static void httpCleanupString(HttpString *str) { - free(str->str); - str->str = NULL; - str->pos = 0; - str->size = 0; + if (str->str) { + free(str->str); + str->str = NULL; + str->pos = 0; + str->size = 0; + } } static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { + char *new_str = NULL; + if (str->size == 0) { str->pos = 0; str->size = len + 1; @@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { } else if (str->pos + len + 1 >= str->size) { str->size += len; str->size *= 4; - str->str = realloc(str->str, str->size); + + new_str = realloc(str->str, str->size); + if (new_str == NULL && str->str) { + // if str->str was not NULL originally, + // the old allocated memory was left unchanged, + // see man 3 realloc + free(str->str); + } + + str->str = new_str; } else { } @@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { HttpContext *pContext = parser->pContext; - HttpString * buf = &parser->body; + HttpString *buf = &parser->body; if (parser->parseCode != TSDB_CODE_SUCCESS) return -1; if (buf->size <= 0) { @@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { } int32_t newSize = buf->pos + len + 1; + char *newStr = NULL; if (newSize >= buf->size) { if (buf->size >= HTTP_BUFFER_SIZE) { httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size); @@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { newSize = MAX(newSize, HTTP_BUFFER_INIT); newSize *= 4; newSize = MIN(newSize, HTTP_BUFFER_SIZE); - buf->str = realloc(buf->str, newSize); + newStr = realloc(buf->str, newSize); + if (newStr == NULL && buf->str) { + free(buf->str); + } + + buf->str = newStr; buf->size = newSize; if (buf->str == NULL) { @@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) { static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) { HttpStack *stack = &parser->stacks; + int8_t *newStacks = NULL; if (stack->size == 0) { stack->pos = 0; stack->size = 32; stack->stacks = malloc(stack->size * sizeof(int8_t)); } else if (stack->pos + 1 > stack->size) { stack->size *= 2; - stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + + newStacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + if (newStacks == NULL && stack->stacks) { + free(stack->stacks); + } + + stack->stacks = newStacks; } else { } diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index a8031d3fd8..ade50bdad6 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; } - multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + HttpSqlCmd *new_cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + if (new_cmds == NULL && multiCmds->cmds) { + free(multiCmds->cmds); + } + multiCmds->cmds = new_cmds; if (multiCmds->cmds == NULL) { httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize); return false; @@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (bufferSize > HTTP_MAX_BUFFER_SIZE) { + if (bufferSize <= 0 || bufferSize > HTTP_MAX_BUFFER_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd, pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE); return false; } - multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + char *new_buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + if (new_buffer == NULL && multiCmds->buffer) { + free(multiCmds->buffer); + } + multiCmds->buffer = new_buffer; if (multiCmds->buffer == NULL) { httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize); return false; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 5fe68e456f..e8b7855769 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -333,6 +333,7 @@ enum OPERATOR_TYPE_E { OP_StateWindow = 22, OP_AllTimeWindow = 23, OP_AllMultiTableTimeInterval = 24, + OP_Order = 25, }; typedef struct SOperatorInfo { @@ -417,7 +418,6 @@ typedef struct STableScanInfo { int32_t *rowCellInfoOffset; SExprInfo *pExpr; SSDataBlock block; - bool loadExternalRows; // load external rows (prev & next rows) int32_t numOfOutput; int64_t elapsedTime; @@ -510,13 +510,21 @@ typedef struct SStateWindowOperatorInfo { bool reptScan; } SStateWindowOperatorInfo ; +typedef struct SDistinctDataInfo { + int32_t index; + int32_t type; + int32_t bytes; +} SDistinctDataInfo; + typedef struct SDistinctOperatorInfo { SHashObj *pSet; SSDataBlock *pRes; bool recordNullVal; //has already record the null value, no need to try again int64_t threshold; int64_t outputCapacity; - int32_t colIndex; + int32_t totalBytes; + char* buf; + SArray* pDistinctDataInfo; } SDistinctOperatorInfo; struct SGlobalMerger; @@ -541,6 +549,13 @@ typedef struct SMultiwayMergeInfo { SArray *udfInfo; } SMultiwayMergeInfo; +// todo support the disk-based sort +typedef struct SOrderOperatorInfo { + int32_t colIndex; + int32_t order; + SSDataBlock *pDataBlock; +} SOrderOperatorInfo; + void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream); SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime); @@ -570,6 +585,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); +SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index cf0e8ce31a..c06fae298c 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder void tOrderDescDestroy(tOrderDescriptor *pDesc); +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn); + void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, int32_t numOfRowsToWrite, int32_t srcCapacity); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index d2802d9fe0..ce607f0fe2 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -39,7 +39,6 @@ #define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1) int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr); @@ -60,6 +59,7 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr); void* freeColumnInfo(SColumnInfo* pColumnInfo, int32_t numOfCols); +int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable); static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) { assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size); @@ -70,7 +70,7 @@ static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage* int32_t offset) { assert(rowOffset >= 0 && pQueryAttr != NULL); - int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); + int32_t numOfRows = (int32_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); return ((char *)page->data) + rowOffset + offset * numOfRows; } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 9f081cfb2f..1587746587 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { return; } + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { *(TSKEY *)pCtx->pOutput = pCtx->startTs; } else if (type == TSDB_FILL_NULL) { @@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } else if (type == TSDB_FILL_SET_VALUE) { tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true); } else { - if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) { + if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) { if (type == TSDB_FILL_PREV) { if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val); @@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY skey = GET_TS_DATA(pCtx, 0); if (type == TSDB_FILL_PREV) { - if (skey > pCtx->startTs) { + if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) { return; } if (pCtx->size > 1) { TSKEY ekey = GET_TS_DATA(pCtx, 1); - if (ekey > skey && ekey <= pCtx->startTs) { + if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) || + ((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){ skey = ekey; } } @@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = skey; char* val = NULL; - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { if (pCtx->size > 1) { ekey = GET_TS_DATA(pCtx, 1); - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { return; } @@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = GET_TS_DATA(pCtx, 1); // no data generated yet - if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { + if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs)) + || ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) { return; } - assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs); - char *start = GET_INPUT_DATA(pCtx, 0); char *end = GET_INPUT_DATA(pCtx, 1); @@ -3788,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { static void interp_function(SQLFunctionCtx *pCtx) { // at this point, the value is existed, return directly if (pCtx->size > 0) { - // impose the timestamp check - TSKEY key = GET_TS_DATA(pCtx, 0); + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + TSKEY key; + char *pData; + int32_t typedData = 0; + + if (ascQuery) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + key = pCtx->start.key; + if (key == INT64_MIN) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) { + pData = pCtx->start.ptr; + } else { + typedData = 1; + pData = (char *)&pCtx->start.val; + } + } + } + + //if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) { if (key == pCtx->startTs) { - char *pData = GET_INPUT_DATA(pCtx, 0); - assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + if (typedData) { + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData); + } else { + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + } + SET_VAL(pCtx, 1, 1); } else { interp_function_impl(pCtx); @@ -4062,12 +4090,15 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD pDist->maxRows = pSrc->maxRows; pDist->minRows = pSrc->minRows; - int32_t numSteps = tsMaxRowsInFileBlock/TSDB_BLOCK_DIST_STEP_ROWS; - pDist->dataBlockInfos = taosArrayInit(numSteps, sizeof(SFileBlockInfo)); - taosArraySetSize(pDist->dataBlockInfos, numSteps); + int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS; + if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++maxSteps; + } + pDist->dataBlockInfos = taosArrayInit(maxSteps, sizeof(SFileBlockInfo)); + taosArraySetSize(pDist->dataBlockInfos, maxSteps); } - size_t steps = taosArrayGetSize(pDist->dataBlockInfos); + size_t steps = taosArrayGetSize(pSrc->dataBlockInfos); for (int32_t i = 0; i < steps; ++i) { int32_t srcNumBlocks = ((SFileBlockInfo*)taosArrayGet(pSrc->dataBlockInfos, i))->numBlocksOfStep; SFileBlockInfo* blockInfo = (SFileBlockInfo*)taosArrayGet(pDist->dataBlockInfos, i); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 52e5dbb7f3..844b8ec683 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -44,6 +44,10 @@ #define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} +#define MULTI_KEY_DELIM "-" + +#define HASH_CAPACITY_LIMIT 10000000 + #define TIME_WINDOW_COPY(_dst, _src) do {\ (_dst).skey = (_src).skey;\ (_dst).ekey = (_src).ekey;\ @@ -224,6 +228,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); @@ -1326,6 +1331,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.key = curTs; pCtx[k].end.val = v2; + + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + if (prevRowIndex == -1) { + pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index]; + } else { + pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes; + } + + pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; + } } } else if (functionId == TSDB_FUNC_TWA) { SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; @@ -1595,6 +1610,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe SResultRow* pResult = NULL; int32_t forwardStep = 0; int32_t ret = 0; + STimeWindow preWin = win; while (1) { // null data, failed to allocate more memory buffer @@ -1609,12 +1625,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + preWin = win; + int32_t prevEndPos = (forwardStep - 1) * step + startPos; startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); if (startPos < 0) { - if (win.skey <= pQueryAttr->window.ekey) { + if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) { int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -1622,12 +1639,12 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe } startPos = pSDataBlock->info.rows - 1; - + // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } - + break; } setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); @@ -2213,6 +2230,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } break; } + case OP_StateWindow: { pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; @@ -2229,24 +2247,20 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Filter: { // todo refactor int32_t numOfFilterCols = 0; -// if (pQueryAttr->numOfFilterCols > 0) { -// pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, -// pQueryAttr->numOfOutput, pQueryAttr->tableCols, pQueryAttr->numOfFilterCols); -// } else { - if (pQueryAttr->stableQuery) { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, - pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); - } else { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, - pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); - } -// } + if (pQueryAttr->stableQuery) { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, + pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); + } else { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, + pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); + } + break; } @@ -2258,11 +2272,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_MultiwayMergeSort: { bool groupMix = true; - if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { + if (pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { groupMix = false; } + pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, - 4096, merger, groupMix); // TODO hack it + 4096, merger, groupMix); // TODO hack it break; } @@ -2283,6 +2298,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf break; } + case OP_Order: { + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order); + break; + } + default: { assert(0); } @@ -2706,7 +2726,7 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t MIN_ROWS_PER_PAGE = 4; - *rowsize = (int32_t)(pQueryAttr->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + *rowsize = (int32_t)(pQueryAttr->resultRowSize * getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); int32_t overhead = sizeof(tFilePage); // one page contains at least two rows @@ -3092,7 +3112,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // check if this data block is required to load if ((*status) != BLK_DATA_ALL_NEEDED) { bool needFilter = true; - + // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { @@ -3575,6 +3595,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; int64_t tid = 0; + pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES); SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { @@ -3630,7 +3651,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[0].pOutput; + pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; } } } @@ -5024,6 +5045,9 @@ static SSDataBlock* doBlockInfoScan(void* param, bool* newgroup) { tableBlockDist.numOfTables = (int32_t)pOperator->pRuntimeEnv->tableqinfoGroupInfo.numOfTables; int32_t numRowSteps = tsMaxRowsInFileBlock / TSDB_BLOCK_DIST_STEP_ROWS; + if (tsMaxRowsInFileBlock % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++numRowSteps; + } tableBlockDist.dataBlockInfos = taosArrayInit(numRowSteps, sizeof(SFileBlockInfo)); taosArraySetSize(tableBlockDist.dataBlockInfos, numRowSteps); tableBlockDist.maxRows = INT_MIN; @@ -5298,7 +5322,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); pInfo->resultRowFactor = - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); + (int32_t)(getRowNumForMultioutput(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx @@ -5404,6 +5428,114 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx return pOperator; } +static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { + assert(pSrc != NULL && pDest != NULL && pDest->info.numOfCols == pSrc->info.numOfCols); + + int32_t numOfCols = pSrc->info.numOfCols; + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); + SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); + + int32_t newSize = (pDest->info.rows + pSrc->info.rows) * pCol2->info.bytes; + char* tmp = realloc(pCol2->pData, newSize); + if (tmp != NULL) { + pCol2->pData = tmp; + int32_t offset = pCol2->info.bytes * pDest->info.rows; + memcpy(pCol2->pData + offset, pCol1->pData, pSrc->info.rows * pCol2->info.bytes); + } else { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + } + + pDest->info.rows += pSrc->info.rows; + + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* doSort(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SOrderOperatorInfo* pInfo = pOperator->info; + + SSDataBlock* pBlock = NULL; + while(1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); + pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + + // start to flush data into disk and try do multiway merge sort + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + break; + } + + int32_t code = doMergeSDatablock(pInfo->pDataBlock, pBlock); + if (code != TSDB_CODE_SUCCESS) { + // todo handle error + } + } + + int32_t numOfCols = pInfo->pDataBlock->info.numOfCols; + void** pCols = calloc(numOfCols, POINTER_BYTES); + SSchema* pSchema = calloc(numOfCols, sizeof(SSchema)); + + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* p1 = taosArrayGet(pInfo->pDataBlock->pDataBlock, i); + pCols[i] = p1->pData; + pSchema[i].colId = p1->info.colId; + pSchema[i].bytes = p1->info.bytes; + pSchema[i].type = (uint8_t) p1->info.type; + } + + __compar_fn_t comp = getKeyComparFunc(pSchema[pInfo->colIndex].type, pInfo->order); + taoscQSort(pCols, pSchema, numOfCols, pInfo->pDataBlock->info.rows, pInfo->colIndex, comp); + + tfree(pCols); + tfree(pSchema); + return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; +} + +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) { + SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); + + { + SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock)); + pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); + for(int32_t i = 0; i < numOfOutput; ++i) { + SColumnInfoData col = {{0}}; + col.info.colId = pExpr[i].base.colInfo.colId; + col.info.bytes = pExpr[i].base.colBytes; + col.info.type = pExpr[i].base.colType; + taosArrayPush(pDataBlock->pDataBlock, &col); + + if (col.info.colId == pOrderVal->orderColId) { + pInfo->colIndex = i; + } + } + + pDataBlock->info.numOfCols = numOfOutput; + pInfo->order = pOrderVal->order; + pInfo->pDataBlock = pDataBlock; + } + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "InMemoryOrder"; + pOperator->operatorType = OP_Order; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->info = pInfo; + pOperator->exec = doSort; + pOperator->cleanup = destroyOrderOperatorInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + + appendUpstream(pOperator, upstream); + return pOperator; +} + static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) { return pTableScanInfo->order; } @@ -6327,7 +6459,7 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t numOfRows = (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + int32_t numOfRows = (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); @@ -6404,6 +6536,11 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param; + pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock); +} + static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param; doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols); @@ -6412,6 +6549,8 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param; taosHashCleanup(pInfo->pSet); + tfree(pInfo->buf); + taosArrayDestroy(pInfo->pDistinctDataInfo); pInfo->pRes = destroyOutputBuf(pInfo->pRes); } @@ -6701,7 +6840,7 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize * - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); + (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); @@ -6752,7 +6891,6 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn pOperator->numOfOutput = numOfOutput; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->exec = doFill; pOperator->cleanup = destroySFillOperatorInfo; @@ -6954,6 +7092,52 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf return pOperator; } +static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) { + if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) { + // distinct info already inited + return true; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + pInfo->totalBytes += pOperator->pExpr[i].base.colBytes; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + int numOfBlock = (int)taosArrayGetSize(pBlock->pDataBlock); + assert(i < numOfBlock); + for (int j = 0; j < numOfBlock; j++) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j); + if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) { + SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes}; + taosArrayInsert(pInfo->pDistinctDataInfo, i, &item); + } + } + } + pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput); + pInfo->buf = calloc(1, pInfo->totalBytes); + return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false; +} +static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) { + char *p = pInfo->buf; + memset(p, 0, pInfo->totalBytes); + + for (int i = 0; i < taosArrayGetSize(pInfo->pDistinctDataInfo); i++) { + SDistinctDataInfo* pDistDataInfo = (SDistinctDataInfo *)taosArrayGet(pInfo->pDistinctDataInfo, i); + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); + char *val = ((char *)pColDataInfo->pData) + pColDataInfo->info.bytes * rowId; + if (isNull(val, pDistDataInfo->type)) { + p += pDistDataInfo->bytes; + continue; + } + if (IS_VAR_DATA_TYPE(pDistDataInfo->type)) { + memcpy(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + } else { + memcpy(p, val, pDistDataInfo->bytes); + p += pDistDataInfo->bytes; + } + memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM)); + p += strlen(MULTI_KEY_DELIM); + } +} static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; @@ -6961,11 +7145,9 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { return NULL; } - SDistinctOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; - pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { @@ -6978,77 +7160,60 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { pOperator->status = OP_EXEC_DONE; break; } - if (pInfo->colIndex == -1) { - for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i); - if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) { - pInfo->colIndex = i; - break; - } - } - } - if (pInfo->colIndex == -1) { + if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; - return NULL; - } - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); - - int16_t bytes = pColInfoData->info.bytes; - int16_t type = pColInfoData->info.type; - - // ensure the output buffer size - SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0); - if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { - int32_t newSize = pRes->info.rows + pBlock->info.rows; - char* tmp = realloc(pResultColInfoData->pData, newSize * bytes); - if (tmp == NULL) { - return NULL; - } else { - pResultColInfoData->pData = tmp; + break; + } + + // ensure result output buf + if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { + int32_t newSize = pRes->info.rows + pBlock->info.rows; + for (int i = 0; i < taosArrayGetSize(pRes->pDataBlock); i++) { + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, i); + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, i); + char* tmp = realloc(pResultColInfoData->pData, newSize * pDistDataInfo->bytes); + if (tmp == NULL) { + return NULL; + } else { + pResultColInfoData->pData = tmp; + } + } pInfo->outputCapacity = newSize; - } - } - - for(int32_t i = 0; i < pBlock->info.rows; ++i) { - char* val = ((char*)pColInfoData->pData) + bytes * i; - if (isNull(val, type)) { - continue; - } - char* p = val; - size_t keyLen = 0; - if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) { - tstr* var = (tstr*)(val); - p = var->data; - keyLen = varDataLen(var); - } else { - keyLen = bytes; - } - - int dummy; - void* res = taosHashGet(pInfo->pSet, p, keyLen); - if (res == NULL) { - taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy)); - char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; - memcpy(start, val, bytes); + } + for (int32_t i = 0; i < pBlock->info.rows; i++) { + buildMultiDistinctKey(pInfo, pBlock, i); + if (taosHashGet(pInfo->pSet, pInfo->buf, pInfo->totalBytes) == NULL) { + int32_t dummy; + taosHashPut(pInfo->pSet, pInfo->buf, pInfo->totalBytes, &dummy, sizeof(dummy)); + for (int j = 0; j < taosArrayGetSize(pRes->pDataBlock); j++) { + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, j); // distinct meta info + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); //src + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, j); // dist + char* val = ((char*)pColInfoData->pData) + pDistDataInfo->bytes * i; + char *start = pResultColInfoData->pData + pDistDataInfo->bytes * pInfo->pRes->info.rows; + memcpy(start, val, pDistDataInfo->bytes); + } pRes->info.rows += 1; - } - } + } + } if (pRes->info.rows >= pInfo->threshold) { break; } } - return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); - pInfo->colIndex = -1; - pInfo->threshold = 10000000; // distinct result threshold - pInfo->outputCapacity = 4096; - pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); + + pInfo->totalBytes = 0; + pInfo->buf = NULL; + pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold + pInfo->outputCapacity = 4096; + pInfo->pDistinctDataInfo = taosArrayInit(numOfOutput, sizeof(SDistinctDataInfo)); + pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index cc47cc824b..5152d17264 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1102,3 +1102,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { destroyColumnModel(pDesc->pColumnModel); tfree(pDesc); } + +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) { + assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols); + + int32_t bytes = pSchema[index].bytes; + int32_t size = bytes + sizeof(int32_t); + + char* buf = calloc(1, size * numOfRows); + + for(int32_t i = 0; i < numOfRows; ++i) { + char* dest = buf + size * i; + memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes); + *(int32_t*)(dest+bytes) = i; + } + + qsort(buf, numOfRows, size, compareFn); + + int32_t prevLength = 0; + char* p = NULL; + + for(int32_t i = 0; i < numOfCols; ++i) { + int32_t bytes1 = pSchema[i].bytes; + + if (i == index) { + for(int32_t j = 0; j < numOfRows; ++j){ + char* src = buf + (j * size); + char* dest = ((char*)pCols[i]) + (j * bytes1); + memcpy(dest, src, bytes1); + } + } else { + // make sure memory buffer is enough + if (prevLength < bytes1) { + char *tmp = realloc(p, bytes1 * numOfRows); + assert(tmp); + + p = tmp; + prevLength = bytes1; + } + + memcpy(p, pCols[i], bytes1 * numOfRows); + + for(int32_t j = 0; j < numOfRows; ++j){ + char* dest = ((char*)pCols[i]) + bytes1 * j; + + int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); + char* src = p + (newPos * bytes1); + memcpy(dest, src, bytes1); + } + } + } + + tfree(buf); + tfree(p); +} \ No newline at end of file diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 7dd73c9fe4..1a86bbae36 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -206,6 +206,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR } else { assert(pFillInfo->currentKey == ts); initBeforeAfterDataBuf(pFillInfo, prev); + if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) { + initBeforeAfterDataBuf(pFillInfo, next); + ++pFillInfo->index; + copyCurrentRowIntoBuf(pFillInfo, srcData, *next); + --pFillInfo->index; + } // assign rows to dst buffer for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { @@ -227,6 +233,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR } else if (pFillInfo->type == TSDB_FILL_LINEAR) { assignVal(output, src, pCol->col.bytes, pCol->col.type); memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else if (pFillInfo->type == TSDB_FILL_NEXT) { + if (*next) { + assignVal(output, *next + pCol->col.offset, pCol->col.bytes, pCol->col.type); + } else { + setNull(output, pCol->col.type, pCol->col.bytes); + } } else { assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index e3326cc26b..e9022db503 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, } pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes; - pBucket->comparFn = getKeyComparFunc(pBucket->type); + pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC); pBucket->hashFunc = getHashFunc(pBucket->type); if (pBucket->hashFunc == NULL) { diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..1988fc9df7 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -557,10 +557,9 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { int32_t op = 0; if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query - if (onlyQueryTags(pQueryAttr)) { - op = OP_TagScan; - taosArrayPush(plan, &op); - } + op = OP_TagScan; + taosArrayPush(plan, &op); + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); @@ -651,8 +650,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); } } + + // outer query order by support + int32_t orderColId = pQueryAttr->order.orderColId; + if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { + op = OP_Order; + taosArrayPush(plan, &op); + } } - if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { op = OP_Limit; @@ -693,7 +698,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { } // fill operator - if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) { op = OP_Fill; taosArrayPush(plan, &op); } diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 825b7960de..4cf05dd2c7 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { static void shrinkBuffer(STSList* ptsData) { // shrink tmp buffer size if it consumes too many memory compared to the pre-defined size if (ptsData->allocSize >= ptsData->threshold * 2) { - ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); - ptsData->allocSize = MEM_BUF_SIZE; + char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); + if(rawBuf) { + ptsData->rawBuf = rawBuf; + ptsData->allocSize = MEM_BUF_SIZE; + } } } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index a3d2e424d2..4caf351799 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -30,6 +30,18 @@ typedef struct SCompSupporter { int32_t order; } SCompSupporter; +int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) { + if (pQueryAttr && (!stable)) { + for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) { + return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64; + } + } + } + + return 1; +} + int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) { int32_t size = 0; diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 0449ecac8b..2549518249 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -397,7 +397,11 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin SThreadObj *pThreadObj = pClientObj->pThreadObj[index]; SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip); +#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) + if (fd == (SOCKET)-1) return NULL; +#else if (fd <= 0) return NULL; +#endif struct sockaddr_in sin; uint16_t localPort = 0; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..55db8d56ff 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) @@ -109,4 +112,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) { return 0; } -#endif /* _TD_TSDB_FS_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_FS_H_ */ diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 9a8de01f71..51801c843c 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -24,8 +24,7 @@ typedef struct STable { tstr* name; // NOTE: there a flexible string here uint64_t suid; struct STable* pSuper; // super table pointer - uint8_t numOfSchemas; - STSchema* schema[TSDB_MAX_TABLE_SCHEMAS]; + SArray* schema; STSchema* tagSchema; SKVRow tagVal; SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index @@ -107,10 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, if (lock) TSDB_RLOCK_TABLE(pDTable); if (_version < 0) { // get the latest version of schema - pTSchema = pDTable->schema[pDTable->numOfSchemas - 1]; + pTSchema = *(STSchema **)taosArrayGetLast(pDTable->schema); } else { // get the schema with version - void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*), - tsdbCompareSchemaVersion, TD_EQ); + void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); if (ptr == NULL) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; goto _exit; diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6c98283189..8f5f885d69 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -722,7 +722,7 @@ static int tsdbInitCommitH(SCommitH *pCommith, STsdbRepo *pRepo) { return -1; } - pCommith->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pCommith->pDataCols = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pCommith->pDataCols == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyCommitH(pCommith); @@ -920,7 +920,6 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCo SDataCol * pDataCol = pDataCols->cols + ncol; SBlockCol *pBlockCol = pBlockData->cols + nColsNotAllNull; - // if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it if (isAllRowsNull(pDataCol)) { // all data to commit are NULL, just ignore it continue; } @@ -1277,6 +1276,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt if (key1 < key2) { for (int i = 0; i < pDataCols->numOfCols; i++) { + //TODO: dataColAppendVal may fail dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } @@ -1308,6 +1308,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ASSERT(!isRowDel); for (int i = 0; i < pDataCols->numOfCols; i++) { + //TODO: dataColAppendVal may fail dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 62f9e41119..5ccb9e90f2 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -296,7 +296,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) { return -1; } - pComph->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pComph->pDataCols = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pComph->pDataCols == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyCompactH(pComph); diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index e53d2826c7..70a5262138 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -37,6 +37,7 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired); static int tsdbProcessExpiredFS(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo); +// For backward compatibility // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -1048,6 +1049,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (pfs->cstatus->pmf->info.size != tfstat.st_size) { + int64_t tfsize = pfs->cstatus->pmf->info.size; + pfs->cstatus->pmf->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size); + } + } + tsdbCloseMFile(pfs->cstatus->pmf); } } else if (code == REG_NOMATCH) { @@ -1212,6 +1233,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pDFile->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosArrayDestroy(fArray); + return -1; + } + + if (pDFile->info.size != tfstat.st_size) { + int64_t tfsize = pDFile->info.size; + pDFile->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); + } + } + tsdbCloseDFile(pDFile); index++; } @@ -1314,4 +1353,4 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired) { tsdbCloseDFileSet(&fset); } -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 8bb2d1c44e..e766d97a97 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -702,11 +702,12 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { } //row1 has higher priority -static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { +static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, + STSchema **ppSchema1, STSchema **ppSchema2, + STable* pTable, int32_t* pPoints, SMemRow* pLastRow) { //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) { - (*pAffectedRows)++; (*pPoints)++; return NULL; } @@ -715,7 +716,6 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1)); if(pMem == NULL) return NULL; memRowCpy(pMem, row1); - (*pAffectedRows)++; (*pPoints)++; *pLastRow = pMem; return pMem; @@ -750,18 +750,16 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep if(pMem == NULL) return NULL; memRowCpy(pMem, tmp); - (*pAffectedRows)++; (*pPoints)++; - *pLastRow = pMem; return pMem; } static void* tsdbInsertDupKeyMergePacked(void** args) { - return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]); + return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7]); } -static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { +static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pPoints, SMemRow* pLastRow) { if(pSkipList->insertHandleFn == NULL) { tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9); @@ -769,17 +767,16 @@ static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STa dupHandleSavedFunc->args[3] = NULL; dupHandleSavedFunc->args[4] = NULL; dupHandleSavedFunc->args[5] = pTable; - dupHandleSavedFunc->args[6] = pAffectedRows; - dupHandleSavedFunc->args[7] = pPoints; - dupHandleSavedFunc->args[8] = pLastRow; pSkipList->insertHandleFn = dupHandleSavedFunc; } + pSkipList->insertHandleFn->args[6] = pPoints; + pSkipList->insertHandleFn->args[7] = pLastRow; } static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) { STsdbMeta *pMeta = pRepo->tsdbMeta; - int64_t points = 0; + int32_t points = 0; STable *pTable = NULL; SSubmitBlkIter blkIter = {0}; SMemTable *pMemTable = NULL; @@ -830,9 +827,10 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t * SMemRow lastRow = NULL; int64_t osize = SL_SIZE(pTableData->pData); - tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow); + tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, &points, &lastRow); tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext); int64_t dsize = SL_SIZE(pTableData->pData) - osize; + (*pAffectedRows) += points; if(lastRow != NULL) { diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 619b32b3d9..96e86a6d99 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -17,7 +17,6 @@ #define TSDB_SUPER_TABLE_SL_LEVEL 5 #define DEFAULT_TAG_INDEX_COLUMN 0 -static int tsdbCompareSchemaVersion(const void *key1, const void *key2); static char * getTagIndexKey(const void *pData); static STable *tsdbNewTable(); static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pSTable); @@ -44,6 +43,8 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable); static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable); static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid); static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema); +static int tsdbAddSchema(STable *pTable, STSchema *pSchema); +static void tsdbFreeTableSchema(STable *pTable); // ------------------ OUTER FUNCTIONS ------------------ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { @@ -723,17 +724,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, STsdbMeta *pMeta = pRepo->tsdbMeta; STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; - ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1])); + ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pCTable->schema))); TSDB_WLOCK_TABLE(pCTable); - if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) { - pCTable->schema[pCTable->numOfSchemas++] = pSchema; - } else { - ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS); - tdFreeSchema(pCTable->schema[0]); - memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1)); - pCTable->schema[pCTable->numOfSchemas - 1] = pSchema; - } + tsdbAddSchema(pCTable, pSchema); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); @@ -829,9 +823,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST TABLE_TID(pTable) = -1; TABLE_SUID(pTable) = -1; pTable->pSuper = NULL; - pTable->numOfSchemas = 1; - pTable->schema[0] = tdDupSchema(pCfg->schema); - if (pTable->schema[0] == NULL) { + if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; } @@ -842,7 +834,8 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } pTable->tagVal = NULL; STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey); + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, + SL_ALLOW_DUP_KEY, getTagIndexKey); if (pTable->pIndex == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; @@ -871,9 +864,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } } else { TABLE_SUID(pTable) = -1; - pTable->numOfSchemas = 1; - pTable->schema[0] = tdDupSchema(pCfg->schema); - if (pTable->schema[0] == NULL) { + if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; } @@ -907,9 +898,7 @@ static void tsdbFreeTable(STable *pTable) { TABLE_UID(pTable)); tfree(TABLE_NAME(pTable)); if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { - for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { - tdFreeSchema(pTable->schema[i]); - } + tsdbFreeTableSchema(pTable); if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { tdFreeSchema(pTable->tagSchema); @@ -1261,9 +1250,10 @@ static int tsdbEncodeTable(void **buf, STable *pTable) { tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable)); tlen += tdEncodeKVRow(buf, pTable->tagVal); } else { - tlen += taosEncodeFixedU8(buf, pTable->numOfSchemas); - for (int i = 0; i < pTable->numOfSchemas; i++) { - tlen += tdEncodeSchema(buf, pTable->schema[i]); + tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema)); + for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) { + STSchema *pSchema = taosArrayGetP(pTable->schema, i); + tlen += tdEncodeSchema(buf, pSchema); } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { @@ -1294,9 +1284,12 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable)); buf = tdDecodeKVRow(buf, &(pTable->tagVal)); } else { - buf = taosDecodeFixedU8(buf, &(pTable->numOfSchemas)); - for (int i = 0; i < pTable->numOfSchemas; i++) { - buf = tdDecodeSchema(buf, &(pTable->schema[i])); + uint8_t nSchemas; + buf = taosDecodeFixedU8(buf, &nSchemas); + for (int i = 0; i < nSchemas; i++) { + STSchema *pSchema; + buf = tdDecodeSchema(buf, &pSchema); + tsdbAddSchema(pTable, pSchema); } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { @@ -1458,3 +1451,38 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) { return 0; } + +static int tsdbAddSchema(STable *pTable, STSchema *pSchema) { + ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE); + + if (pTable->schema == NULL) { + pTable->schema = taosArrayInit(TSDB_MAX_TABLE_SCHEMAS, sizeof(SSchema *)); + if (pTable->schema == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + } + + ASSERT(taosArrayGetSize(pTable->schema) == 0 || + schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema))); + + if (taosArrayPush(pTable->schema, &pSchema) == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return 0; +} + +static void tsdbFreeTableSchema(STable *pTable) { + ASSERT(pTable != NULL); + + if (pTable->schema) { + for (size_t i = 0; i < taosArrayGetSize(pTable->schema); i++) { + STSchema *pSchema = taosArrayGetP(pTable->schema, i); + tdFreeSchema(pSchema); + } + + taosArrayDestroy(pTable->schema); + } +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index b5374ccaed..524ab065d6 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -466,7 +466,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); - pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock); + pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock); if (pQueryHandle->pDataCols == NULL) { tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; @@ -1446,7 +1458,7 @@ static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) { return midPos; } -int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) { +static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) { char* pData = NULL; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1; @@ -1481,7 +1493,7 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity pData = (char*)pColInfo->pData + (capacity - numOfRows - num) * pColInfo->info.bytes; } - if (pColInfo->info.colId == src->colId) { + if (!isAllRowsNull(src) && pColInfo->info.colId == src->colId) { if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) { memmove(pData, (char*)src->pData + bytes * start, bytes * num); } else { // handle the var-string @@ -3468,6 +3480,7 @@ void filterPrepare(void* expr, void* param) { SArray *arr = (SArray *)(pCond->arr); for (size_t i = 0; i < taosArrayGetSize(arr); i++) { char* p = taosArrayGetP(arr, i); + strtolower(varDataVal(p), varDataVal(p)); taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy)); } } else { diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 711c32535b..74d41cce19 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -42,14 +42,14 @@ int tsdbInitReadH(SReadH *pReadh, STsdbRepo *pRepo) { return -1; } - pReadh->pDCols[0] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pReadh->pDCols[0] = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pReadh->pDCols[0] == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyReadH(pReadh); return -1; } - pReadh->pDCols[1] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pReadh->pDCols[1] = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pReadh->pDCols[1] == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyReadH(pReadh); @@ -463,7 +463,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat SDataCol *pDataCol = &(pDataCols->cols[dcol]); if (dcol != 0 && ccol >= pBlockData->numOfCols) { // Set current column as NULL and forward - dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + dataColReset(pDataCol); dcol++; continue; } @@ -503,7 +503,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat ccol++; } else { // Set current column as NULL and forward - dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + dataColReset(pDataCol); dcol++; } } @@ -608,7 +608,7 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols * } if (pBlockCol == NULL) { - dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + dataColReset(pDataCol); continue; } diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..84346cc79c 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -22,10 +22,10 @@ extern "C" { #include "os.h" -#define TSDB_PATTERN_MATCH 0 -#define TSDB_PATTERN_NOMATCH 1 -#define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 20 +#define TSDB_PATTERN_MATCH 0 +#define TSDB_PATTERN_NOMATCH 1 +#define TSDB_PATTERN_NOWILDCARDMATCH 2 +#define TSDB_PATTERN_STRING_DEFAULT_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) @@ -47,7 +47,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con int32_t doCompare(const char* a, const char* b, int32_t type, size_t size); -__compar_fn_t getKeyComparFunc(int32_t keyType); +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order); __compar_fn_t getComparFunc(int32_t type, int32_t optr); diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index d03ce6e0f1..f146ec0b8b 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -81,6 +81,7 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; +extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..1b980a4a1d 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -16,28 +16,22 @@ #include "os.h" #include "ttype.h" #include "tcompare.h" -#include "tarray.h" #include "hash.h" -int32_t compareInt32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes1(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; } -int32_t compareInt64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes2(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0; } -int32_t compareInt16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes4(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0; +} + +int32_t setCompareBytes8(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0; } int32_t compareInt8Val(const void *pLeft, const void *pRight) { @@ -47,27 +41,76 @@ int32_t compareInt8Val(const void *pLeft, const void *pRight) { return 0; } -int32_t compareUint32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); +int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) { + return compareInt8Val(pRight, pLeft); +} + +int32_t compareInt16Val(const void *pLeft, const void *pRight) { + int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) { + return compareInt16Val(pRight, pLeft); +} + +int32_t compareInt32Val(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) { + return compareInt32Val(pRight, pLeft); +} + +int32_t compareInt64Val(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) { + return compareInt64Val(pRight, pLeft); +} + +int32_t compareUint32Val(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) { + return compareUint32Val(pRight, pLeft); +} + int32_t compareUint64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); + uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) { + return compareUint64Val(pRight, pLeft); +} + int32_t compareUint16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); + uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) { + return compareUint16Val(pRight, pLeft); +} + int32_t compareUint8Val(const void* pLeft, const void* pRight) { uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight); if (left > right) return 1; @@ -75,6 +118,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) { return 0; } +int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) { + return compareUint8Val(pRight, pLeft); +} + int32_t compareFloatVal(const void *pLeft, const void *pRight) { float p1 = GET_FLOAT_VAL(pLeft); float p2 = GET_FLOAT_VAL(pRight); @@ -92,8 +139,12 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareFloatValDesc(const void* pLeft, const void* pRight) { + return compareFloatVal(pRight, pLeft); } int32_t compareDoubleVal(const void *pLeft, const void *pRight) { @@ -113,14 +164,18 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) { + return compareDoubleVal(pRight, pLeft); } int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -133,14 +188,18 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedStr(pRight, pLeft); +} + int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -149,6 +208,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedWStr(pRight, pLeft); +} + /* * Compare two strings * TSDB_MATCH: Match @@ -161,33 +224,33 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { */ int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) { char c, c1; - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == pInfo->matchAll) { /* Match "*" */ - + while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) { if (c == pInfo->matchOne && (j > size || str[j++] == 0)) { // empty string, return not match return TSDB_PATTERN_NOWILDCARDMATCH; } } - + if (c == 0) { return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */ } - + char next[3] = {toupper(c), tolower(c), 0}; while (1) { size_t n = strcspn(str, next); str += n; - + if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; @@ -195,18 +258,18 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat } return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } - + return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } @@ -214,13 +277,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c wchar_t c, c1; wchar_t matchOne = L'_'; // "_" wchar_t matchAll = L'%'; // "%" - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == matchAll) { /* Match "%" */ - + while ((c = patterStr[i++]) == matchAll || c == matchOne) { if (c == matchOne && (j > size || str[j++] == 0)) { return TSDB_PATTERN_NOWILDCARDMATCH; @@ -229,87 +292,108 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c if (c == 0) { return TSDB_PATTERN_MATCH; } - + wchar_t accept[3] = {towupper(c), towlower(c), 0}; while (1) { size_t n = wcscspn(str, accept); - + str += n; if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; } } - + return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } -static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - - char pattern[128] = {0}; + + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN); + char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); free(buf); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; - + return compareLenPrefixedStr(x, y); } -//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) { -// const SArray* arr = (const SArray*) pRight; -// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1; -//} -static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { - return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; +int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { + return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; } -static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - wchar_t pattern[128] = {0}; - assert(TSDB_PATTERN_STRING_MAX_LEN < 128); + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); + free(pattern); + return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } __compar_fn_t getComparFunc(int32_t type, int32_t optr) { __compar_fn_t comparFn = NULL; - + + if (optr == TSDB_RELATION_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + return setCompareBytes1; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + return setCompareBytes2; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_FLOAT: + return setCompareBytes4; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + return setCompareBytes8; + default: + assert(0); + } + } + switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break; @@ -327,13 +411,15 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } else { /* normal relational comparFn */ comparFn = compareLenPrefixedStr; } - + break; } - + case TSDB_DATA_TYPE_NCHAR: { if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; + } else if (optr == TSDB_RELATION_IN) { + comparFn = compareFindItemInSet; } else { comparFn = compareLenPrefixedWStr; } @@ -349,57 +435,57 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { comparFn = compareInt32Val; break; } - + return comparFn; } -__compar_fn_t getKeyComparFunc(int32_t keyType) { +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) { __compar_fn_t comparFn = NULL; - + switch (keyType) { case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - comparFn = compareInt8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc; break; case TSDB_DATA_TYPE_SMALLINT: - comparFn = compareInt16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc; break; case TSDB_DATA_TYPE_INT: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - comparFn = compareInt64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc; break; case TSDB_DATA_TYPE_FLOAT: - comparFn = compareFloatVal; + comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc; break; case TSDB_DATA_TYPE_DOUBLE: - comparFn = compareDoubleVal; + comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc; break; case TSDB_DATA_TYPE_UTINYINT: - comparFn = compareUint8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc; break; case TSDB_DATA_TYPE_USMALLINT: - comparFn = compareUint16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc; break; case TSDB_DATA_TYPE_UINT: - comparFn = compareUint32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc; break; case TSDB_DATA_TYPE_UBIGINT: - comparFn = compareUint64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc; break; case TSDB_DATA_TYPE_BINARY: - comparFn = compareLenPrefixedStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc; break; case TSDB_DATA_TYPE_NCHAR: - comparFn = compareLenPrefixedWStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc; break; default: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; } - + return comparFn; } @@ -423,8 +509,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } @@ -433,7 +518,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { default: { // todo refactor tstr* t1 = (tstr*) f1; tstr* t2 = (tstr*) f2; - + if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } else { diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index b464519ba6..98fd9c094c 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ pSkipList->keyFn = fn; pSkipList->seed = rand(); if (comparFn == NULL) { - pSkipList->comparFn = getKeyComparFunc(keyType); + pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC); } else { pSkipList->comparFn = comparFn; } diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index dfbe0f6716..df4c5af5e2 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -70,7 +70,7 @@ void doubleSkipListTest() { } void randKeyTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC), false, getkey); int32_t size = 200000; diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 4774998799..e991bf02aa 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -17,6 +17,7 @@ #define TAOS_RANDOM_FILE_FAIL_TEST #include "os.h" #include "taoserror.h" +#include "taosmsg.h" #include "tchecksum.h" #include "tfile.h" #include "twal.h" @@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) { #if defined(WAL_CHECKSUM_WHOLE) static void walUpdateChecksum(SWalHead *pHead) { - pHead->sver = 1; + pHead->sver = 2; pHead->cksum = 0; pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len); } @@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 1) { + } else if (pHead->sver >= 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 1) { + if (pHead->sver >= 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -306,7 +307,115 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_WAL_FILE_CORRUPTED; } +// Add SMemRowType ahead of SDataRow +static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + // copy the header firstly + memcpy(pDest, pSrc, sizeof(SSubmitBlk)); + int32_t nRows = htons(pDest->numOfRows); + int32_t dataLen = htonl(pDest->dataLen); + + if ((nRows <= 0) || (dataLen <= 0)) { + return; + } + + char *pDestData = pDest->data; + char *pSrcData = pSrc->data; + for (int32_t i = 0; i < nRows; ++i) { + memRowSetType(pDestData, SMEM_ROW_DATA); + memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); + pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); + pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); + ++(*lenExpand); + } + pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); +} + +// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen +static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { + if ((nRows <= 0) || (dataLen <= 0)) { + return true; + } + int32_t len = 0, kvLen = 0; + for (int i = 0; i < nRows; ++i) { + len += dataRowLen(pBlkData); + if (len > dataLen) { + return false; + } + + /** + * For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict. + * For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario + * - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check + */ + if (dataRowLen(pBlkData) == 257) { + SMemRow memRow = pBlkData; + SKVRow kvRow = memRowKvBody(memRow); + int nCols = kvRowNCols(kvRow); + uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; + if (calcTsOffset == realTsOffset) { + kvLen += memRowKvTLen(memRow); + } + } + pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); + } + if (len != dataLen) { + return false; + } + if (kvLen == dataLen) { + return false; + } + return true; +} +// for WAL SMemRow/SDataRow compatibility +static int walSMemRowCheck(SWalHead *pHead) { + if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) { + SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont; + int32_t numOfBlocks = htonl(pMsg->numOfBlocks); + if (numOfBlocks <= 0) { + return 0; + } + + int32_t nTotalRows = 0; + SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks; + for (int32_t i = 0; i < numOfBlocks; ++i) { + int32_t dataLen = htonl(pBlk->dataLen); + int32_t nRows = htons(pBlk->numOfRows); + nTotalRows += nRows; + if (!walIsSDataRow(pBlk->data, nRows, dataLen)) { + return 0; + } + pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); + } + ASSERT(nTotalRows >= 0); + SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); + if (pWalHead == NULL) { + return -1; + } + + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); + + SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; + SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks; + SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks; + int32_t lenExpand = 0; + for (int32_t i = 0; i < numOfBlocks; ++i) { + expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk)); + pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk)); + } + if (lenExpand > 0) { + pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand; + pDestMsg->length = htonl(pDestMsg->header.contLen); + pWalHead->len = pWalHead->len + lenExpand; + } + + memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); + tfree(pWalHead); + } + return 0; +} static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { int32_t size = WAL_MAX_SIZE; @@ -346,7 +455,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch } #if defined(WAL_CHECKSUM_WHOLE) - if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) { + if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) { wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -379,7 +488,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if (pHead->sver == 1 && !walValidateChecksum(pHead)) { + if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -431,7 +540,14 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; - //wInfo("writeFp: %ld", offset); + // wInfo("writeFp: %ld", offset); + if (0 != walSMemRowCheck(pHead)) { + wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, + pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + tfClose(tfd); + tfree(buffer); + return TAOS_SYSTEM_ERROR(errno); + } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 831e9f83ac..94e5bb44e0 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,7 +15,8 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest -go build +go mod init demotest > /dev/null 2>&1 +go mod tidy > /dev/null 2>&1 +go build > /dev/null 2>&1 sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 5b2c860122..68b64fd4e0 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -101,7 +101,14 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT } diff --git a/tests/pytest/alter/alterColMultiTimes.py b/tests/pytest/alter/alterColMultiTimes.py new file mode 100644 index 0000000000..173ca8158d --- /dev/null +++ b/tests/pytest/alter/alterColMultiTimes.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def genColList(self): + ''' + generate column list + ''' + col_list = list() + for i in range(1, 18): + col_list.append(f'c{i}') + return col_list + + def genIncreaseValue(self, input_value): + ''' + add ', 1' to end of value every loop + ''' + value_list = list(input_value) + value_list.insert(-1, ", 1") + return ''.join(value_list) + + def insertAlter(self): + ''' + after each alter and insert, when execute 'select * from {tbname};' taosd will coredump + ''' + tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7)) + input_value = '(now, 1)' + tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);') + tdSql.execute(f'insert into {tbname} values {input_value};') + for col in self.genColList(): + input_value = self.genIncreaseValue(input_value) + tdSql.execute(f'alter table {tbname} add column {col} int;') + tdSql.execute(f'insert into {tbname} values {input_value};') + tdSql.query(f'select * from {tbname};') + tdSql.checkRows(18) + + def run(self): + tdSql.prepare() + self.insertAlter() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 376567b7e8..6c92f33018 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17742,4 +17742,370 @@ fun:taosGetFqdn fun:taosCheckGlobalCfg fun:taos_init_imp +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/bin/python3.8 + fun:PyObject_GetItem + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/local/lib/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + obj:/usr/bin/python3.8 + fun:PyObject_CallFunctionObjArgs + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8) + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8) + obj:/usr/bin/python3.8) + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + ... + obj:/usr/local/lib/python3.8/dist-packages/pandas/* + ... +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall } \ No newline at end of file diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index eb9dbeef79..e444a0a318 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -76,6 +76,7 @@ python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py python3 ./test.py -f tag_lite/timestamp.py +python3 ./test.py -f tag_lite/TestModifyTag.py #python3 ./test.py -f dbmgmt/database-name-boundary.py python3 test.py -f dbmgmt/nanoSecondCheck.py @@ -151,6 +152,7 @@ python3 ./test.py -f import_merge/importCSV.py python3 ./test.py -f import_merge/import_update_0.py python3 ./test.py -f import_merge/import_update_1.py python3 ./test.py -f import_merge/import_update_2.py +python3 ./test.py -f update/merge_commit_data.py #======================p1-end=============== #======================p2-start=============== # tools @@ -167,11 +169,11 @@ python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py # nano support -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py -#python3 test.py -f tools/taosdumpTestNanoSupport.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py # update python3 ./test.py -f update/allow_update.py @@ -179,7 +181,7 @@ python3 ./test.py -f update/allow_update-0.py python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py -python3 ./test.py -f update/merge_commit_data.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py @@ -338,6 +340,7 @@ python3 ./test.py -f functions/function_twa.py -r 1 python3 ./test.py -f functions/function_twa_test2.py python3 ./test.py -f functions/function_stddev_td2555.py python3 ./test.py -f functions/showOfflineThresholdIs864000.py +python3 ./test.py -f functions/function_interp.py python3 ./test.py -f insert/metadataUpdate.py python3 ./test.py -f query/last_cache.py python3 ./test.py -f query/last_row_cache.py @@ -380,9 +383,12 @@ python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py +python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryWildcardLength.py +python3 ./test.py -f query/queryTbnameUpperLower.py #======================p4-end=============== -# python3 test.py -f tools/taosdemoAllTest/pytest.py + diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py new file mode 100644 index 0000000000..41215f15eb --- /dev/null +++ b/tests/pytest/functions/function_interp.py @@ -0,0 +1,98 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + tdSql.execute("create table ap1 (ts timestamp, pav float)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)") + + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)") + tdSql.checkRows(6) + tdSql.checkData(0,1,2.90799) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)") + tdSql.checkRows(7) + tdSql.checkData(1,1,1.47885) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(7) + + # check desc order + tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc") + tdSql.checkRows(6) + tdSql.checkData(0,1,4.60900) + tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(7) + + # check exception + tdSql.error("select interp(*) from ap1") + tdSql.error("select interp(*) from ap1 FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index b7480fdbd5..f320db43af 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -13,6 +13,8 @@ import sys import subprocess +import random +import math from util.log import * from util.cases import * @@ -56,7 +58,7 @@ class TDTestCase: def td3690(self): tdLog.printNoPrefix("==========TD-3690==========") tdSql.query("show variables") - tdSql.checkData(51, 1, 864000) + tdSql.checkData(53, 1, 864000) def td4082(self): tdLog.printNoPrefix("==========TD-4082==========") @@ -106,6 +108,9 @@ class TDTestCase: tdSql.execute("drop database if exists db1") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") @@ -122,6 +127,14 @@ class TDTestCase: # p1 不进入指定数据库 tdSql.query("show create database db") tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) tdSql.error("show create database ") tdSql.error("show create databases db ") tdSql.error("show create database db.stb1") @@ -255,7 +268,7 @@ class TDTestCase: tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.query("show databases") tdSql.checkData(0,7,"3650,3650,3650") @@ -283,7 +296,7 @@ class TDTestCase: tdSql.query("show databases") tdSql.checkData(0, 7, "3650,3650,3650") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.execute("alter database db1 keep 365") tdSql.execute("drop database if exists db1") @@ -340,17 +353,552 @@ class TDTestCase: pass + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + for j in range(100): + tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + for i in range(3): + tdSql.query("show vgroups") + if tdSql.getData(0, 6) == 1: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + if i == 3: + tdLog.exit("compacting not occured") + time.sleep(0.5) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(1000000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=2000000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query("select distinct c1, c2 from stb1 order by ts") + tdSql.checkRows(tbnum*3+1) + tdSql.query("select distinct c1, c2 from t1 order by ts") + tdSql.checkRows(4) + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(4) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + def run(self): # master branch # self.td3690() # self.td4082() # self.td4288() - self.td4724() + # self.td4724() + self.td5798() + # self.td5935() # develop branch # self.td4097() - + # self.td4889() + # self.td5168() + # self.td5433() def stop(self): tdSql.close() diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index a7a1c2bf3f..57d0b1921b 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -25,7 +25,7 @@ class TDTestCase: def run(self): tdSql.query("show variables") - tdSql.checkData(53, 1, 864000) + tdSql.checkData(54, 1, 864000) def stop(self): tdSql.close() diff --git a/tests/pytest/insert/schemalessInsert.py b/tests/pytest/insert/schemalessInsert.py index 5c93095a1e..88abea477a 100644 --- a/tests/pytest/insert/schemalessInsert.py +++ b/tests/pytest/insert/schemalessInsert.py @@ -705,7 +705,7 @@ class TDTestCase: case no id when stb exist """ self.cleanStb() - input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f") + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f") self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f") self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index ac78c0518f..e5c468600b 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -65,6 +65,10 @@ class TDTestCase: # TD-2208 tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001") + # TD-6006 + tdSql.error("select * from dev_001 where 'name' is not null") + tdSql.error("select * from dev_001 where \"name\" = 'first'") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/query/queryTbnameUpperLower.py b/tests/pytest/query/queryTbnameUpperLower.py new file mode 100644 index 0000000000..bd4e85c5ca --- /dev/null +++ b/tests/pytest/query/queryTbnameUpperLower.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.common import tdCom + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def checkStbWhereIn(self): + ''' + where in ---> upper lower mixed + ''' + tdCom.cleanTb() + table_name = tdCom.getLongName(8, "letters_mixed") + table_name_sub = f'{table_name}_sub' + tb_name_lower = table_name_sub.lower() + tb_name_upper = table_name_sub.upper() + + ## create stb and tb + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, id int, bi1 binary(20)) tags (si1 binary(20))') + tdSql.execute(f'create table {table_name_sub}1 using {table_name} tags ("{table_name_sub}1")') + tdSql.execute(f'create table {tb_name_lower}2 using {table_name} tags ("{tb_name_lower}2")') + tdSql.execute(f'create table {tb_name_upper}3 using {table_name} tags ("{tb_name_upper}3")') + + ## insert values + tdSql.execute(f'insert into {table_name_sub}1 values (now-1s, 1, "{table_name_sub}1")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-2s, 2, "{tb_name_lower}21")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-3s, 3, "{tb_name_lower}22")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-4s, 4, "{tb_name_upper}31")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-5s, 5, "{tb_name_upper}32")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-6s, 6, "{tb_name_upper}33")') + + ## query where tbname in single + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.upper()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.lower()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower.upper()}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper}3")') + tdSql.checkRows(3) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper.lower()}3")') + tdSql.checkRows(3) + + ## query where tbname in multi + tdSql.query(f'select * from {table_name} where id=5 and tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(6) + + def run(self): + tdSql.prepare() + self.checkStbWhereIn() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/queryWildcardLength.py b/tests/pytest/query/queryWildcardLength.py new file mode 100644 index 0000000000..1fc46fe7d6 --- /dev/null +++ b/tests/pytest/query/queryWildcardLength.py @@ -0,0 +1,207 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from copy import deepcopy +import string +import random +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongWildcardStr(self, len=None): + """ + generate long wildcard str + """ + maxWildCardsLength = int(tdSql.getVariable('maxWildCardsLength')[0]) + if len: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(maxWildCardsLength+1)) + return chars + + def genTableName(self): + ''' + generate table name + hp_name--->'%str' + lp_name--->'str%' + ul_name--->'st_r' + ''' + table_name = self.getLongWildcardStr() + table_name_list = list(table_name) + table_name_list.pop(-1) + + if len(table_name_list) > 1: + lp_name = deepcopy(table_name_list) + lp_name[-1] = '%' + lp_name = ''.join(lp_name) + + ul_name = list(lp_name) + ul_name[int(len(ul_name)/2)] = '_' + ul_name = ''.join(ul_name) + + table_name_list = list(table_name) + hp_name = deepcopy(table_name_list) + hp_name.pop(1) + hp_name[0] = '%' + hp_name = ''.join(hp_name) + else: + hp_name = '%' + lp_name = '%' + ul_name = '_' + return table_name, hp_name, lp_name, ul_name + + def checkRegularTableWildcardLength(self): + ''' + check regular table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, a1 int)") + sql_list = [f'show tables like "{hp_name}"', f'show tables like "{lp_name}"', f'show tables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show tables like "%{hp_name}"', f'show tables like "{lp_name}%"', f'show tables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkSuperTableWildcardLength(self): + ''' + check super table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, c1 int) tags (t1 int)") + sql_list = [f'show stables like "{hp_name}"', f'show stables like "{lp_name}"', f'show stables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show stables like "%{hp_name}"', f'show stables like "{lp_name}%"', f'show stables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkRegularWildcardSelectLength(self): + ''' + check regular table wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200))") + tdSql.execute(f'insert into {table_name} values (now, "{table_name}", "{table_name}")') + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkStbWildcardSelectLength(self): + ''' + check stb wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200)) tags (si1 binary(200), sc1 nchar(200))') + tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")') + tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");') + + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"', + f'select * from {table_name} where si1 like "{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}"', + f'select * from {table_name} where si1 like "{ul_name}"', + f'select * from {table_name} where sc1 like "{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}"', + f'select * from {table_name} where sc1 like "{ul_name}"'] + + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"', + f'select * from {table_name} where si1 like "%{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}%"', + f'select * from {table_name} where si1 like "{ul_name}%"', + f'select * from {table_name} where sc1 like "%{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}%"', + f'select * from {table_name} where sc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def run(self): + tdSql.prepare() + self.checkRegularTableWildcardLength() + self.checkSuperTableWildcardLength() + self.checkRegularWildcardSelectLength() + self.checkStbWildcardSelectLength() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/query/tbname.py b/tests/pytest/query/tbname.py index 08416ba3ed..30d90b1f9d 100644 --- a/tests/pytest/query/tbname.py +++ b/tests/pytest/query/tbname.py @@ -53,6 +53,9 @@ class TDTestCase: "select * from cars where id=0 and tbname in ('carzero', 'cartwo')") tdSql.checkRows(1) + tdSql.query("select * from cars where tbname in ('carZero', 'CARONE')") + tdSql.checkRows(2) + """ tdSql.query("select * from cars where tbname like 'car%'") tdSql.checkRows(2) diff --git a/tests/pytest/tag_lite/TestModifyTag.py b/tests/pytest/tag_lite/TestModifyTag.py new file mode 100644 index 0000000000..acf63695f6 --- /dev/null +++ b/tests/pytest/tag_lite/TestModifyTag.py @@ -0,0 +1,125 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def run(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # basic test for alter tags + tdSql.execute("create database tagdb ") + tdSql.execute(" use tagdb") + tdSql.execute("create table st (ts timestamp , a int) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into t using st (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tdSql.execute("alter table t set tag tg1='newtg1'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + + if res == [('newtg1', 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + tdSql.error("alter stable st modify tag tg2 binary(2)") + tdSql.execute("alter stable st modify tag tg2 binary(30) ") + tdSql.execute("alter table t set tag tg2 = 'abcdefghijklmnopqrstuvwxyz1234'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + if res == [('newtg1', 'abcdefghijklmnopqrstuvwxyz1234', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + # test boundary about tags + tdSql.execute("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16374))") + tdSql.error("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16375))") + bound_sql = "create stable stb2 (ts timestamp , a int) tags (tg1 binary(10)," + for i in range(127): + bound_sql+="tag"+str(i)+" binary(10)," + sql1 = bound_sql[:-1]+")" + tdSql.execute(sql1) + sql2 = bound_sql[:-1]+"tag127 binary(10))" + tdSql.error(sql2) + tdSql.execute("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4093))") + tdSql.error("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4094))") + tdSql.execute("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(8))") + tdSql.error("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(9))") + tdSql.execute("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(2))") + tdSql.error("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(3))") + + tdSql.execute("create table stt (ts timestamp , a binary(100)) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into tt using stt (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tags = "t"*16337 + sql3 = "alter table tt set tag tg1=" +"'"+tags+"'" + tdSql.error(sql3) + tdSql.execute("alter stable stt modify tag tg1 binary(16337)") + tdSql.execute(sql3) + res = tdSql.getResult("select tg1,tg2,tg3 from tt") + if res == [(tags, 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + os.system("rm -rf ./tag_lite/TestModifyTag.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 65abd3ef93..97dca6be18 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -87,7 +87,7 @@ if __name__ == "__main__": else: toBeKilled = "valgrind.bin" - killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output(psCmd, shell=True) @@ -110,7 +110,6 @@ if __name__ == "__main__": time.sleep(2) tdLog.info('stop All dnodes') - sys.exit(0) tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv new file mode 100644 index 0000000000..5fc779b41b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv @@ -0,0 +1,100 @@ +8.855,"binary_str0" ,1626870128248246976 +8.75,"binary_str1" ,1626870128249060032 +5.44,"binary_str2" ,1626870128249067968 +8.45,"binary_str3" ,1626870128249072064 +4.07,"binary_str4" ,1626870128249075904 +6.97,"binary_str5" ,1626870128249078976 +6.86,"binary_str6" ,1626870128249082048 +1.585,"binary_str7" ,1626870128249085120 +1.4,"binary_str8" ,1626870128249087936 +5.135,"binary_str9" ,1626870128249092032 +3.15,"binary_str10" ,1626870128249095104 +1.765,"binary_str11" ,1626870128249097920 +7.71,"binary_str12" ,1626870128249100992 +3.91,"binary_str13" ,1626870128249104064 +5.615,"binary_str14" ,1626870128249106880 +9.495,"binary_str15" ,1626870128249109952 +3.825,"binary_str16" ,1626870128249113024 +1.94,"binary_str17" ,1626870128249117120 +5.385,"binary_str18" ,1626870128249119936 +7.075,"binary_str19" ,1626870128249123008 +5.715,"binary_str20" ,1626870128249126080 +1.83,"binary_str21" ,1626870128249128896 +6.365,"binary_str22" ,1626870128249131968 +6.55,"binary_str23" ,1626870128249135040 +6.315,"binary_str24" ,1626870128249138112 +3.82,"binary_str25" ,1626870128249140928 +2.455,"binary_str26" ,1626870128249145024 +7.795,"binary_str27" ,1626870128249148096 +2.47,"binary_str28" ,1626870128249150912 +1.37,"binary_str29" ,1626870128249155008 +5.39,"binary_str30" ,1626870128249158080 +5.13,"binary_str31" ,1626870128249160896 +4.09,"binary_str32" ,1626870128249163968 +5.855,"binary_str33" ,1626870128249167040 +0.17,"binary_str34" ,1626870128249170112 +1.955,"binary_str35" ,1626870128249173952 +0.585,"binary_str36" ,1626870128249178048 +0.33,"binary_str37" ,1626870128249181120 +7.925,"binary_str38" ,1626870128249183936 +9.685,"binary_str39" ,1626870128249187008 +2.6,"binary_str40" ,1626870128249191104 +5.705,"binary_str41" ,1626870128249193920 +3.965,"binary_str42" ,1626870128249196992 +4.43,"binary_str43" ,1626870128249200064 +8.73,"binary_str44" ,1626870128249202880 +3.105,"binary_str45" ,1626870128249205952 +9.39,"binary_str46" ,1626870128249209024 +2.825,"binary_str47" ,1626870128249212096 +9.675,"binary_str48" ,1626870128249214912 +9.99,"binary_str49" ,1626870128249217984 +4.51,"binary_str50" ,1626870128249221056 +4.94,"binary_str51" ,1626870128249223872 +7.72,"binary_str52" ,1626870128249226944 +4.135,"binary_str53" ,1626870128249231040 +2.325,"binary_str54" ,1626870128249234112 +4.585,"binary_str55" ,1626870128249236928 +8.76,"binary_str56" ,1626870128249240000 +4.715,"binary_str57" ,1626870128249243072 +0.56,"binary_str58" ,1626870128249245888 +5.35,"binary_str59" ,1626870128249249984 +5.075,"binary_str60" ,1626870128249253056 +6.665,"binary_str61" ,1626870128249256128 +7.13,"binary_str62" ,1626870128249258944 +2.775,"binary_str63" ,1626870128249262016 +5.775,"binary_str64" ,1626870128249265088 +1.62,"binary_str65" ,1626870128249267904 +1.625,"binary_str66" ,1626870128249270976 +8.15,"binary_str67" ,1626870128249274048 +0.75,"binary_str68" ,1626870128249277120 +3.265,"binary_str69" ,1626870128249280960 +8.585,"binary_str70" ,1626870128249284032 +1.88,"binary_str71" ,1626870128249287104 +8.44,"binary_str72" ,1626870128249289920 +5.12,"binary_str73" ,1626870128249295040 +2.58,"binary_str74" ,1626870128249298112 +9.42,"binary_str75" ,1626870128249300928 +1.765,"binary_str76" ,1626870128249304000 +2.66,"binary_str77" ,1626870128249308096 +1.405,"binary_str78" ,1626870128249310912 +5.595,"binary_str79" ,1626870128249315008 +2.28,"binary_str80" ,1626870128249318080 +9.24,"binary_str81" ,1626870128249320896 +9.03,"binary_str82" ,1626870128249323968 +6.055,"binary_str83" ,1626870128249327040 +1.74,"binary_str84" ,1626870128249330112 +5.77,"binary_str85" ,1626870128249332928 +1.97,"binary_str86" ,1626870128249336000 +0.3,"binary_str87" ,1626870128249339072 +7.145,"binary_str88" ,1626870128249342912 +0.88,"binary_str89" ,1626870128249345984 +8.025,"binary_str90" ,1626870128249349056 +4.81,"binary_str91" ,1626870128249351872 +0.725,"binary_str92" ,1626870128249355968 +3.85,"binary_str93" ,1626870128249359040 +9.455,"binary_str94" ,1626870128249362112 +2.265,"binary_str95" ,1626870128249364928 +3.985,"binary_str96" ,1626870128249368000 +9.375,"binary_str97" ,1626870128249371072 +0.2,"binary_str98" ,1626870128249373888 +6.95,"binary_str99" ,1626870128249377984 diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv new file mode 100644 index 0000000000..18fb855d6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv @@ -0,0 +1,100 @@ +"string0",7,8.615 +"string1",4,9.895 +"string2",3,2.92 +"string3",3,5.62 +"string4",7,1.615 +"string5",6,1.45 +"string6",5,7.48 +"string7",7,3.01 +"string8",5,4.76 +"string9",10,7.09 +"string10",2,8.38 +"string11",7,8.65 +"string12",5,5.025 +"string13",10,5.765 +"string14",2,4.57 +"string15",2,1.03 +"string16",7,6.98 +"string17",10,0.23 +"string18",7,5.815 +"string19",1,2.37 +"string20",10,8.865 +"string21",3,1.235 +"string22",2,8.62 +"string23",9,1.045 +"string24",8,4.34 +"string25",1,5.455 +"string26",2,4.475 +"string27",1,6.95 +"string28",2,3.39 +"string29",3,6.79 +"string30",7,9.735 +"string31",1,9.79 +"string32",10,9.955 +"string33",1,5.095 +"string34",3,3.86 +"string35",9,5.105 +"string36",10,4.22 +"string37",1,2.78 +"string38",9,6.345 +"string39",1,0.975 +"string40",5,6.16 +"string41",4,7.735 +"string42",5,6.6 +"string43",8,2.845 +"string44",1,0.655 +"string45",3,2.995 +"string46",9,3.6 +"string47",8,3.47 +"string48",3,7.98 +"string49",6,2.225 +"string50",9,5.44 +"string51",4,6.335 +"string52",3,2.955 +"string53",1,0.565 +"string54",6,5.575 +"string55",6,9.905 +"string56",9,6.025 +"string57",8,0.94 +"string58",10,0.15 +"string59",8,1.555 +"string60",4,2.28 +"string61",2,8.29 +"string62",9,6.22 +"string63",6,3.35 +"string64",10,6.7 +"string65",3,9.345 +"string66",7,9.815 +"string67",1,5.365 +"string68",10,3.81 +"string69",1,6.405 +"string70",8,2.715 +"string71",3,8.58 +"string72",8,6.34 +"string73",2,7.49 +"string74",4,8.64 +"string75",3,8.995 +"string76",7,3.465 +"string77",1,7.64 +"string78",6,3.65 +"string79",6,1.4 +"string80",6,5.875 +"string81",2,1.22 +"string82",5,7.87 +"string83",9,8.41 +"string84",9,8.9 +"string85",9,3.89 +"string86",2,5.0 +"string87",2,4.495 +"string88",4,2.835 +"string89",3,5.895 +"string90",7,8.41 +"string91",5,5.125 +"string92",7,9.165 +"string93",5,8.315 +"string94",10,7.485 +"string95",7,4.635 +"string96",2,6.015 +"string97",8,0.595 +"string98",3,8.79 +"string99",4,1.72 diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json new file mode 100644 index 0000000000..8bd5ddbae8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb3", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json new file mode 100644 index 0000000000..5408a9841a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json new file mode 100644 index 0000000000..13eb80f3cf --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "us", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py new file mode 100644 index 0000000000..b248eda6b0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py @@ -0,0 +1,115 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + + # check the params of taosdemo about time_step is nano + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json -y " % binPath) + tdSql.execute("use testdb1") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") + + # check the params of taosdemo about time_step is us + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json -y " % binPath) + tdSql.execute("use testdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") + + # check the params of taosdemo about time_step is ms + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json -y " % binPath) + tdSql.execute("use testdb3") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.000") + + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json new file mode 100644 index 0000000000..38ac666fac --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json new file mode 100644 index 0000000000..9ef4a0af66 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "subnsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json new file mode 100644 index 0000000000..a09dec21fa --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json new file mode 100644 index 0000000000..e99c528c6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdbcsv", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py new file mode 100644 index 0000000000..f069bb8f70 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -0,0 +1,169 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert data from a special timestamp + # check stable stb0 + + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % + binPath) + tdSql.execute("use nsdb") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1, "TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") + + # check stable stb1 which is insert with disord + + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb1_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10000) + # check c8 is an nano timestamp + tdSql.query("describe stb1") + tdSql.checkDataType(9, 1, "TIMESTAMP") + # check insert timestamp_step is nano_second + tdSql.query("select last(ts) from stb1") + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") + + # insert data from now time + + # check stable stb0 + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % + binPath) + + tdSql.execute("use nsdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + # check c8 is an nano timestamp + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1, "TIMESTAMP") + + # insert by csv files and timetamp is long int , strings in ts and + # cols + + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % + binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query( + "select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + + os.system("rm -rf ./insert_res.txt") + os.system( + "rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") + + # taosdemo test insert with command and parameter , detals show + # taosdemo --help + os.system( + "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % + binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + # check taosdemo -s + + sqls_ls = [ + 'drop database if exists nsdbsql;', + 'create database nsdbsql precision "ns" keep 3600 days 6 update 1;', + 'use nsdbsql;', + 'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + + with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files: + for sql in sqls_ls: + sql_files.write(sql + "\n") + sql_files.close() + + sleep(10) + + os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) + tdSql.query("select count(*) from nsdbsql.meters") + tdSql.checkData(0, 0, 2) + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json new file mode 100644 index 0000000000..fff1017588 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json @@ -0,0 +1,92 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdb", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts>now-20d ;", + "result": "./query_res2.txt" + }, + { + "sql": "select max(c10) from stb0;", + "result": "./query_res3.txt" + }, + { + "sql": "select min(c1) from stb0;", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c1) from stb0;", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c10) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c1) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c10) from xxxx ;", + "result": "./query_res_tb7.txt" + + } + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py new file mode 100644 index 0000000000..6c3e4d6c8a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py @@ -0,0 +1,157 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # query: query test for nanoSecond with where and max min groupby order + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + + tdSql.execute("use nsdb") + + # use where to filter + + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ") + tdSql.checkData(0, 0, 4000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 5900) + + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;") + tdSql.checkData(0, 0, 40) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 59) + + + # select max min avg from special col + tdSql.query("select max(c10) from stb0;") + print("select max(c10) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select max(c10) from tb0_0;") + print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0)) + + + tdSql.query("select min(c1) from stb0;") + print( "select min(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select min(c1) from tb0_0;") + print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from stb0;") + print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from tb0_0;") + print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select count(*) from stb0 group by tbname;") + tdSql.checkData(0, 0, 100) + tdSql.checkData(10, 0, 100) + + # query : query above sqls by taosdemo and continuously + + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json -y " % binPath) + + + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"') + tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000') + tdSql.query('select avg(c0) from stb0 interval(5000000000b)') + tdSql.checkRows(1) + + tdSql.query('select avg(c0) from stb0 interval(100000000b)') + tdSql.checkRows(10) + + tdSql.error('select avg(c0) from stb0 interval(1b)') + tdSql.error('select avg(c0) from stb0 interval(999b)') + + tdSql.query('select avg(c0) from stb0 interval(1000b)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(1u)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)') + tdSql.checkRows(10) + + # query : query above sqls by taosdemo and continuously + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) + + os.system("rm -rf ./query_res*.txt*") + os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/*.py.sql") + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json new file mode 100644 index 0000000000..2323b0f370 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json @@ -0,0 +1,110 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdbcsv", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;", + "result": "./query_res2.txt" + }, + { + "sql": "select count(*) from stb0 where c2 <> 162687012800000000;", + "result": "./query_res3.txt" + }, + { + "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";", + "result": "./query_res4.txt" + }, + { + "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + }, + { + "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;", + "result":"./query_res7.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(5000000000b);", + "result":"./query_res8.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);", + "result":"./query_res9.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c0) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c0) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c0) from xxxx ;", + "result": "./query_res_tb7.txt" + + }, + { + "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;", + "result": "./query_res_tb8.txt" + + } + + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json new file mode 100644 index 0000000000..1cc834164e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json @@ -0,0 +1,32 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "subnsdb", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":10000, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb0 where ts < now -2d-1h-3s ;", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb0 where ts < 1626918583000000000 ;", + "result": "./subscribe_res2.txt" + }] + + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py new file mode 100644 index 0000000000..95c1a731bc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py @@ -0,0 +1,125 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # get the number of subscriptions + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # assert results + def assertCheck(self,filename,subResult,expectResult): + self.filename = filename + self.subResult = subResult + self.expectResult = expectResult + args0 = (filename, subResult, expectResult) + assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + + + # insert data + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + + # merge result files + sleep(5) + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + + + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) + + + # insert extral data + tdSql.execute("use subnsdb") + tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)") + sleep(15) + + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + subTimes0 = self.subTimes("all_subscribe_res0.txt") + print("pass") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) + + + + # correct data testcase + os.system("kill -9 %d" % query_pid) + sleep(3) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py new file mode 100644 index 0000000000..ca8832170b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py @@ -0,0 +1,362 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def createdb(self, precision="ns"): + tb_nums = self.numberOfTables + per_tb_rows = self.numberOfRecords + + def build_db(precision, start_time): + tdSql.execute("drop database if exists timedb1") + tdSql.execute( + "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") + + tdSql.execute("use timedb1") + tdSql.execute( + "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") + for tb in range(tb_nums): + tbname = "t"+str(tb) + tdSql.execute("create table " + tbname + + " using st tags(1, 'beijing')") + sql = "insert into " + tbname + " values" + currts = start_time + if precision == "ns": + ts_seed = 1000000000 + elif precision == "us": + ts_seed = 1000000 + else: + ts_seed = 1000 + + for i in range(per_tb_rows): + sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % + 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) + tdSql.execute(sql) + + if precision == "ns": + start_time = 1625068800000000000 + build_db(precision, start_time) + + elif precision == "us": + start_time = 1625068800000000 + build_db(precision, start_time) + + elif precision == "ms": + start_time = 1625068800000 + build_db(precision, start_time) + + else: + print("other time precision not valid , please check! ") + + + def run(self): + + # clear envs + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exist!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # create nano second database + + self.createdb(precision="ns") + + # dump all data + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + # dump part data with -S -E + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + # replace strings to dump in databases + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + # dump data and check for taosdump + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + # check data + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test nano second : dump check data pass for all data!" ) + else: + tdLog.info("test nano second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + + # us second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="us") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test us second : dump check data pass for all data!" ) + else: + tdLog.info("test us second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data! " ) + + + # ms second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="ms") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test ms second : dump check data pass for all data!" ) + else: + tdLog.info("test ms second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data! " ) + + + os.system("rm -rf ./taosdumptest/") + os.system("rm -rf ./dump_result.txt") + os.system("rm -rf *.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 266a8fa712..c3fdff00ec 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -120,7 +120,7 @@ class TDTestCase: os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 6b5681dfbc..1d28a2708f 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -19,11 +19,16 @@ import json import sys class taosdemoPerformace: - def __init__(self, commitID, dbName, branch, type): + def __init__(self, commitID, dbName, branch, type, numOfTables, numOfRows, numOfInt, numOfDouble, numOfBinary): self.commitID = commitID self.dbName = dbName self.branch = branch self.type = type + self.numOfTables = numOfTables + self.numOfRows = numOfRows + self.numOfInt = numOfInt + self.numOfDouble = numOfDouble + self.numOfBinary = numOfBinary self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -51,14 +56,14 @@ class taosdemoPerformace: stb = { "name": "meters", "child_table_exists": "no", - "childtable_count": 10000, + "childtable_count": self.numOfTables, "childtable_prefix": "stb_", "auto_create_table": "no", "data_source": "rand", "batch_create_tbl_num": 10, "insert_mode": "taosc", - "insert_rows": 100000, - "interlace_rows": 100, + "insert_rows": self.numOfRows, + "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -68,7 +73,9 @@ class taosdemoPerformace: "sample_file": "./sample.csv", "tags_file": "", "columns": [ - {"type": "INT", "count": 4} + {"type": "INT", "count": self.numOfInt}, + {"type": "DOUBLE", "count": self.numOfDouble}, + {"type": "BINARY", "len": 128, "count": self.numOfBinary} ], "tags": [ {"type": "INT", "count": 1}, @@ -76,6 +83,7 @@ class taosdemoPerformace: ] } + stables = [] stables.append(stb) @@ -137,43 +145,43 @@ class taosdemoPerformace: binPath = buildPath + "/build/bin/" os.system( - "%staosdemo -f %s > taosdemoperf.txt 2>&1" % + "%staosdemo -f %s > /dev/null 2>&1" % (binPath, self.generateJson())) self.createTableTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'") self.insertRecordsTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $2}'") self.recordsPerSecond = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $16}'") self.commitID = self.getCMDOutput("git rev-parse --short HEAD") delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $4}'") + "grep 'delay' insert_res.txt | awk '{print $4}'") self.avgDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $6}'") + "grep 'delay' insert_res.txt | awk '{print $6}'") self.maxDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $8}'") + "grep 'delay' insert_res.txt | awk '{print $8}'") self.minDelay = delay[:-3] - os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt") + os.system("[ -f insert_res.txt ] && rm insert_res.txt") def createTablesAndStoreData(self): cursor = self.conn2.cursor() cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) - cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20))") - print("==================== taosdemo performance ====================") + cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)") print("create tables time: %f" % float(self.createTableTime)) print("insert records time: %f" % float(self.insertRecordsTime)) print("records per second: %f" % float(self.recordsPerSecond)) print("avg delay: %f" % float(self.avgDelay)) print("max delay: %f" % float(self.maxDelay)) print("min delay: %f" % float(self.minDelay)) - cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s')" % + cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s', %d, %d, %d, %d, %d)" % (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), - self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, self.type)) + self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, + self.type, self.numOfTables, self.numOfRows, self.numOfInt, self.numOfDouble, self.numOfBinary)) cursor.close() cursor1 = self.conn.cursor() @@ -209,8 +217,43 @@ if __name__ == '__main__': default='glibc', type=str, help='build type (default: glibc)') + parser.add_argument( + '-i', + '--num-of-int', + action='store', + default=4, + type=int, + help='num of int columns (default: 4)') + parser.add_argument( + '-D', + '--num-of-double', + action='store', + default=0, + type=int, + help='num of double columns (default: 4)') + parser.add_argument( + '-B', + '--num-of-binary', + action='store', + default=0, + type=int, + help='num of binary columns (default: 4)') + parser.add_argument( + '-t', + '--num-of-tables', + action='store', + default=10000, + type=int, + help='num of tables (default: 10000)') + parser.add_argument( + '-r', + '--num-of-rows', + action='store', + default=100000, + type=int, + help='num of rows (default: 100000)') args = parser.parse_args() - perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type) + perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type, args.num_of_tables, args.num_of_rows, args.num_of_int, args.num_of_double, args.num_of_binary) perftest.insertData() perftest.createTablesAndStoreData() diff --git a/tests/pytest/tools/taosdemoTestInterlace.py b/tests/pytest/tools/taosdemoTestInterlace.py index 4c551f327a..30c04729b7 100644 --- a/tests/pytest/tools/taosdemoTestInterlace.py +++ b/tests/pytest/tools/taosdemoTestInterlace.py @@ -49,7 +49,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -pp 2>&1 | grep sleep | wc -l" % binPath + taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -PP 2>&1 | grep sleep | wc -l" % binPath sleepTimes = subprocess.check_output( taosdemoCmd, shell=True).decode("utf-8") print("sleep times: %d" % int(sleepTimes)) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py new file mode 100644 index 0000000000..1c7d94a8a4 --- /dev/null +++ b/tests/pytest/util/common.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.sql import tdSql + +class TDCom: + def init(self, conn, logSql): + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongName(self, len, mode = "mixed"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + chars = ''.join(random.choice(string.digits) for i in range(len)) + elif mode == "letters": + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + elif mode == "letters_mixed": + chars = ''.join(random.choice(string.ascii_letters.upper() + string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) + return chars + + def close(self): + self.cursor.close() + +tdCom = TDCom() \ No newline at end of file diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index ae4ba97eb3..0f4919ba96 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -436,7 +436,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -445,7 +445,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -556,7 +556,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index b42af27d06..dfe1e4a582 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -81,6 +81,22 @@ class TDSql: return self.queryResult return self.queryRows + def getVariable(self, search_attr): + ''' + get variable of search_attr access "show variables" + ''' + try: + sql = 'show variables' + param_list = self.query(sql, row_tag=True) + for param in param_list: + if param[0] == search_attr: + return param[1], param_list + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + def getColNameList(self, sql, col_tag=None): self.sql = sql try: diff --git a/tests/script/general/compute/block_dist.sim b/tests/script/general/compute/block_dist.sim index 51cf903654..5343c1db28 100644 --- a/tests/script/general/compute/block_dist.sim +++ b/tests/script/general/compute/block_dist.sim @@ -84,6 +84,10 @@ if $rows != 1 then return -1 endi +print ============== TD-5998 +sql_error select _block_dist() from (select * from $nt) +sql_error select _block_dist() from (select * from $mt) + print =============== clear sql drop database $db sql show databases @@ -91,4 +95,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index d109dd50f7..3413a0b596 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -1050,6 +1050,27 @@ sql_error select min(c3) from m_fl_mt0 interval(10w) fill(value, 20) sql_error select max(c3) from m_fl_mt0 interval(1n) fill(prev) sql_error select min(c3) from m_fl_mt0 interval(1y) fill(value, 20) +sql create table nexttb1 (ts timestamp, f1 int); +sql insert into nexttb1 values ('2021-08-08 1:1:1', NULL); +sql insert into nexttb1 values ('2021-08-08 1:1:5', 3); + +sql select last(*) from nexttb1 where ts >= '2021-08-08 1:1:1' and ts < '2021-08-08 1:1:10' interval(1s) fill(next); +if $rows != 9 then + return -1 +endi +if $data00 != @21-08-08 01:01:01.000@ then + return -1 +endi +if $data01 != @21-08-08 01:01:01.000@ then + return -1 +endi +if $data02 != 3 then + return -1 +endi + + + + print =============== clear #sql drop database $db #sql show databases @@ -1057,4 +1078,4 @@ print =============== clear # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 5edadad3a6..556292b21b 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -313,6 +313,12 @@ if $rows != 6 then return -1 endi +print =============================> TD-6086 +sql create stable td6086st(ts timestamp, d double) tags(t nchar(50)); +sql create table td6086ct1 using td6086st tags("ct1"); +sql create table td6086ct2 using td6086st tags("ct2"); +sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname; + print ==================> td-2624 sql create table tm2(ts timestamp, k int, b binary(12)); sql insert into tm2 values('2011-01-02 18:42:45.326', -1,'abc'); @@ -1148,3 +1154,35 @@ endi sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); + +sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int); +sql create table smeter1 using smeters tags (1); +sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1); + +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a); +if $rows != 2 then + return -1 +endi +if $data00 != @21-08-08 10:10:10.000@ then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data10 != @21-08-08 10:10:12.000@ then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi + +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10; +if $rows != 1 then + return -1 +endi +if $data00 != 0.000000000 then + return -1 +endi + diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..4654913d1e 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,19 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) + +sql select interp(*) from nt5931 where ts=now + +sql select interp(*) from st5931 where ts=now + +sql select interp(*) from ct5931 where ts=now + +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim index 845afb0173..5a2021dcfc 100644 --- a/tests/script/general/parser/interp_test.sim +++ b/tests/script/general/parser/interp_test.sim @@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear); +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data51 != 0 then + return -1 +endi +if $data60 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc; +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data21 != 0 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi +if $data60 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts; +if $rows != 9 then + return -1 +endi +if $data00 != @18-09-17 20:34:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data31 != 0.00000 then + return -1 +endi +if $data40 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data41 != 0.20000 then + return -1 +endi +if $data50 != @18-09-17 20:44:00.000@ then + return -1 +endi +if $data51 != 0.40000 then + return -1 +endi +if $data60 != @18-09-17 20:46:00.000@ then + return -1 +endi +if $data61 != 0.60000 then + return -1 +endi +if $data70 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data71 != 0.80000 then + return -1 +endi +if $data80 != @18-09-17 20:50:00.000@ then + return -1 +endi +if $data81 != 1.00000 then + return -1 +endi + + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data31 != 0.20000 then + return -1 +endi +if $data40 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data41 != 0.50000 then + return -1 +endi +if $data50 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data51 != 0.80000 then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data01 != 0.80000 then + return -1 +endi +if $data10 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data11 != 0.50000 then + return -1 +endi +if $data20 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data21 != 0.20000 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index 23b85095c5..3af2cb3018 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -75,4 +75,9 @@ sleep 100 run general/parser/limit_tb.sim run general/parser/limit_stb.sim +print ========> TD-6017 +sql use $db +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1) +sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1) + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim index 0c987d88c9..4a93797d40 100644 --- a/tests/script/general/parser/limit_tb.sim +++ b/tests/script/general/parser/limit_tb.sim @@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1 if $rows != 0 then return -1 endi + +print ========> TD-6017 +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1) + sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print $data00 $data01 diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim index 65bb89d549..db27886bbf 100644 --- a/tests/script/general/parser/tbnameIn_query.sim +++ b/tests/script/general/parser/tbnameIn_query.sim @@ -101,6 +101,30 @@ if $data11 != 2 then return -1 endi +## tbname in can accpet Upper case table name +sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1 +if $rows != 3 then + return -1 +endi +if $data00 != 10 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data10 != 10 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data20 != 10 then + return -1 +endi +if $data21 != 2 then + return -1 +endi + # multiple tbname in is not allowed NOW sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc #if $rows != 4 then diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 6ad6a74eed..b4aad278d8 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -90,6 +90,14 @@ cd ../../../debug; make ./test.sh -f general/parser/function.sim ./test.sh -f unique/cluster/vgroup100.sim +./test.sh -f unique/http/admin.sim +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim + +./test.sh -f general/alter/cached_schema_after_alter.sim + #======================b1-end=============== #======================b2-start=============== @@ -198,13 +206,7 @@ cd ../../../debug; make #======================b3-end=============== #======================b4-start=============== -./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim - -./test.sh -f general/alter/cached_schema_after_alter.sim ./test.sh -f general/alter/count.sim ./test.sh -f general/alter/dnode.sim ./test.sh -f general/alter/import.sim diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..b54857cf88 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail ' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi @@ -179,6 +179,9 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case + if [[ $1 =~ full ]] ; then + line=$line" -s" + fi $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log